summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAndroid Build Coastguard Worker <android-build-coastguard-worker@google.com>2023-05-23 20:09:52 +0000
committerAndroid Build Coastguard Worker <android-build-coastguard-worker@google.com>2023-05-23 20:09:52 +0000
commit7ea410e615fcda35ed222eece7626731d02324af (patch)
treece23e15e88a8c51e74a637916f0404087ac5459b
parentda198237e5946bb344aabcc1d5dbd5628b8d496d (diff)
parentec2e64f0bb54d942059d75ccc7c4ab6daa775f78 (diff)
downloadgs201-android-gs-lynx-5.10-u-beta3.tar.gz
Change-Id: I741d8776f8634b51b5dc7d9717341f68518674a1
-rw-r--r--.gitignore2
-rw-r--r--Makefile83
-rw-r--r--amalthea/config-pwr-state.h41
-rw-r--r--amalthea/config.h25
-rw-r--r--amalthea/context.h16
-rw-r--r--amalthea/csrs.h75
-rw-r--r--amalthea/iova.h (renamed from gxp-iova.h)19
-rw-r--r--amalthea/lpm.h139
-rw-r--r--amalthea/mailbox-regs.h30
-rw-r--r--gcip-kernel-driver/drivers/gcip/Makefile32
-rw-r--r--gcip-kernel-driver/drivers/gcip/gcip-alloc-helper.c97
-rw-r--r--gcip-kernel-driver/drivers/gcip/gcip-dma-fence.c196
-rw-r--r--gcip-kernel-driver/drivers/gcip/gcip-domain-pool.c147
-rw-r--r--gcip-kernel-driver/drivers/gcip/gcip-firmware.c156
-rw-r--r--gcip-kernel-driver/drivers/gcip/gcip-image-config.c198
-rw-r--r--gcip-kernel-driver/drivers/gcip/gcip-kci.c525
-rw-r--r--gcip-kernel-driver/drivers/gcip/gcip-mailbox.c689
-rw-r--r--gcip-kernel-driver/drivers/gcip/gcip-mem-pool.c66
-rw-r--r--gcip-kernel-driver/drivers/gcip/gcip-pm.c220
-rw-r--r--gcip-kernel-driver/drivers/gcip/gcip-telemetry.c268
-rw-r--r--gcip-kernel-driver/drivers/gcip/gcip-thermal.c517
-rw-r--r--gcip-kernel-driver/include/gcip/gcip-alloc-helper.h50
-rw-r--r--gcip-kernel-driver/include/gcip/gcip-common-image-header.h67
-rw-r--r--gcip-kernel-driver/include/gcip/gcip-dma-fence.h154
-rw-r--r--gcip-kernel-driver/include/gcip/gcip-domain-pool.h54
-rw-r--r--gcip-kernel-driver/include/gcip/gcip-firmware.h134
-rw-r--r--gcip-kernel-driver/include/gcip/gcip-image-config.h180
-rw-r--r--gcip-kernel-driver/include/gcip/gcip-kci.h394
-rw-r--r--gcip-kernel-driver/include/gcip/gcip-mailbox.h554
-rw-r--r--gcip-kernel-driver/include/gcip/gcip-mem-pool.h70
-rw-r--r--gcip-kernel-driver/include/gcip/gcip-pm.h155
-rw-r--r--gcip-kernel-driver/include/gcip/gcip-telemetry.h124
-rw-r--r--gcip-kernel-driver/include/gcip/gcip-thermal.h118
-rw-r--r--gsx01-mailbox-driver.c70
-rw-r--r--gxp-bpm.c20
-rw-r--r--gxp-client.c291
-rw-r--r--gxp-client.h93
-rw-r--r--gxp-common-platform.c2247
-rw-r--r--gxp-config.h35
-rw-r--r--gxp-core-telemetry.c936
-rw-r--r--gxp-core-telemetry.h184
-rw-r--r--gxp-debug-dump.c383
-rw-r--r--gxp-debug-dump.h66
-rw-r--r--gxp-debugfs.c339
-rw-r--r--gxp-debugfs.h7
-rw-r--r--gxp-dma-fence.c82
-rw-r--r--gxp-dma-fence.h38
-rw-r--r--gxp-dma-iommu.c869
-rw-r--r--gxp-dma.h420
-rw-r--r--gxp-dmabuf.c22
-rw-r--r--gxp-dmabuf.h10
-rw-r--r--gxp-domain-pool.c109
-rw-r--r--gxp-domain-pool.h27
-rw-r--r--gxp-doorbell.c4
-rw-r--r--gxp-firmware-data.c854
-rw-r--r--gxp-firmware-data.h114
-rw-r--r--gxp-firmware-loader.c301
-rw-r--r--gxp-firmware-loader.h85
-rw-r--r--gxp-firmware.c716
-rw-r--r--gxp-firmware.h86
-rw-r--r--gxp-host-device-structs.h442
-rw-r--r--gxp-hw-mailbox-driver.c323
-rw-r--r--gxp-internal.h203
-rw-r--r--gxp-lpm.c80
-rw-r--r--gxp-lpm.h91
-rw-r--r--gxp-mailbox-driver.c511
-rw-r--r--gxp-mailbox-driver.h124
-rw-r--r--gxp-mailbox-impl.c790
-rw-r--r--gxp-mailbox-impl.h142
-rw-r--r--gxp-mailbox-manager.c32
-rw-r--r--gxp-mailbox-manager.h137
-rw-r--r--gxp-mailbox-regs.h31
-rw-r--r--gxp-mailbox.c1017
-rw-r--r--gxp-mailbox.h293
-rw-r--r--gxp-mapping.c73
-rw-r--r--gxp-mapping.h33
-rw-r--r--gxp-mba-driver.c73
-rw-r--r--gxp-notification.h2
-rw-r--r--gxp-platform.c2323
-rw-r--r--gxp-pm.c245
-rw-r--r--gxp-pm.h115
-rw-r--r--gxp-range-alloc.c118
-rw-r--r--gxp-range-alloc.h94
-rw-r--r--gxp-ssmt.c93
-rw-r--r--gxp-ssmt.h47
-rw-r--r--gxp-telemetry.c705
-rw-r--r--gxp-telemetry.h135
-rw-r--r--gxp-thermal.c335
-rw-r--r--gxp-thermal.h33
-rw-r--r--gxp-vd.c1580
-rw-r--r--gxp-vd.h302
-rw-r--r--gxp-wakelock.c154
-rw-r--r--gxp-wakelock.h73
-rw-r--r--gxp.h1161
-rw-r--r--mm-backport.h33
95 files changed, 17089 insertions, 8857 deletions
diff --git a/.gitignore b/.gitignore
index 0c053d1..ba9b0a9 100644
--- a/.gitignore
+++ b/.gitignore
@@ -7,3 +7,5 @@ modules.order
Module.symvers
.cache.mk
.thinlto-cache/
+.repo/
+/gcip-kernel-driver
diff --git a/Makefile b/Makefile
index 70342d2..f041243 100644
--- a/Makefile
+++ b/Makefile
@@ -3,31 +3,54 @@
# Makefile for GXP driver.
#
-obj-$(CONFIG_GXP) += gxp.o
+GXP_CHIP ?= AMALTHEA
+CONFIG_$(GXP_CHIP) ?= m
+GCIP_DIR := gcip-kernel-driver/drivers/gcip
-gxp-objs += \
+obj-$(CONFIG_$(GXP_CHIP)) += gxp.o
+
+gxp-objs += \
gxp-bpm.o \
gxp-client.o \
+ gxp-core-telemetry.o \
gxp-debug-dump.o \
gxp-debugfs.o \
+ gxp-dma-fence.o \
+ gxp-dma-iommu.o \
gxp-dmabuf.o \
gxp-domain-pool.o \
gxp-doorbell.o \
gxp-eventfd.o \
- gxp-firmware.o \
gxp-firmware-data.o \
- gxp-hw-mailbox-driver.o \
+ gxp-firmware-loader.o \
+ gxp-firmware.o \
gxp-lpm.o \
+ gxp-mailbox-manager.o \
gxp-mailbox.o \
gxp-mapping.o \
gxp-mb-notification.o \
- gxp-platform.o \
- gxp-range-alloc.o \
gxp-pm.o \
- gxp-telemetry.o \
+ gxp-ssmt.o \
gxp-thermal.o \
- gxp-vd.o \
- gxp-wakelock.o
+ gxp-vd.o
+
+ifeq ($(GXP_CHIP),AMALTHEA)
+
+gxp-objs += \
+ gsx01-mailbox-driver.o \
+ gxp-platform.o \
+ gxp-mailbox-impl.o
+
+GMODULE_PATH := $(OUT_DIR)/../google-modules
+EDGETPU_CHIP := janeiro
+
+endif
+
+ifeq ($(CONFIG_$(GXP_CHIP)),m)
+
+gxp-objs += $(GCIP_DIR)/gcip.o
+
+endif
KERNEL_SRC ?= /lib/modules/$(shell uname -r)/build
M ?= $(shell pwd)
@@ -44,35 +67,29 @@ endif
# If building via make directly, specify target platform by adding
# "GXP_PLATFORM=<target>"
# With one of the following values:
-# - CLOUDRIPPER
+# - SILICON
# - ZEBU
# - IP_ZEBU
-# Defaults to building for CLOUDRIPPER if not otherwise specified.
-GXP_PLATFORM ?= CLOUDRIPPER
-GXP_CHIP ?= AMALTHEA
+# - GEM5
+# Defaults to building for SILICON if not otherwise specified.
+GXP_PLATFORM ?= SILICON
-# Setup which version of the gxp-dma interface is used.
-# For gem5, need to adopt dma interface without aux domain.
-ifeq ($(GXP_PLATFORM), GEM5)
- gxp-objs += gxp-dma-iommu-gem5.o
-else
- gxp-objs += gxp-dma-iommu.o
-endif
-
-ccflags-y += -DCONFIG_GXP_$(GXP_PLATFORM) -DCONFIG_$(GXP_CHIP)=1 \
- -I$(M)/include -I$(srctree)/drivers/gxp/include
+gxp-flags := -DCONFIG_GXP_$(GXP_PLATFORM) -DCONFIG_$(GXP_CHIP)=1 \
+ -I$(M)/include -I$(M)/gcip-kernel-driver/include \
+ -I$(srctree)/$(M)/include \
+ -I$(srctree)/$(M)/gcip-kernel-driver/include \
+ -I$(srctree)/drivers/gxp/include
+ccflags-y += $(EXTRA_CFLAGS) $(gxp-flags)
-KBUILD_OPTIONS += CONFIG_GXP=m GXP_CHIP=AMALTHEA
-
-ifdef CONFIG_GXP_TEST
-subdir-ccflags-y += -Wall -Werror -I$(srctree)/drivers/gxp/include
-obj-y += unittests/
-include $(srctree)/drivers/gxp/unittests/Makefile.include
-$(call include_test_path, $(gxp-objs))
-endif
+KBUILD_OPTIONS += GXP_CHIP=$(GXP_CHIP) GXP_PLATFORM=$(GXP_PLATFORM)
# Access TPU driver's exported symbols.
-KBUILD_EXTRA_SYMBOLS += ../google-modules/edgetpu/janeiro/drivers/edgetpu/Module.symvers
+EXTRA_SYMBOLS += $(GMODULE_PATH)/edgetpu/$(EDGETPU_CHIP)/drivers/edgetpu/Module.symvers
-modules modules_install clean:
+modules modules_install:
+ $(MAKE) -C $(KERNEL_SRC) M=$(M)/$(GCIP_DIR) gcip.o
+ $(MAKE) -C $(KERNEL_SRC) M=$(M) W=1 $(KBUILD_OPTIONS) \
+ EXTRA_CFLAGS="$(EXTRA_CFLAGS)" KBUILD_EXTRA_SYMBOLS="$(EXTRA_SYMBOLS)" $(@)
+clean:
+ $(MAKE) -C $(KERNEL_SRC) M=$(M)/$(GCIP_DIR) $(@)
$(MAKE) -C $(KERNEL_SRC) M=$(M) W=1 $(KBUILD_OPTIONS) $(@)
diff --git a/amalthea/config-pwr-state.h b/amalthea/config-pwr-state.h
new file mode 100644
index 0000000..2712797
--- /dev/null
+++ b/amalthea/config-pwr-state.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Chip-dependent power configuration and states.
+ *
+ * Copyright (C) 2022 Google LLC
+ */
+
+#ifndef __AMALTHEA_CONFIG_PWR_STATE_H__
+#define __AMALTHEA_CONFIG_PWR_STATE_H__
+
+enum aur_power_rate {
+ AUR_OFF_RATE = 0,
+ AUR_UUD_RATE = 178000,
+ AUR_SUD_RATE = 373000,
+ AUR_UD_RATE = 750000,
+ AUR_NOM_RATE = 1155000,
+ AUR_READY_RATE = 178000,
+ AUR_UUD_PLUS_RATE = 268000,
+ AUR_SUD_PLUS_RATE = 560000,
+ AUR_UD_PLUS_RATE = 975000,
+};
+
+enum aur_mem_int_rate {
+ AUR_MEM_INT_MIN = 0,
+ AUR_MEM_INT_VERY_LOW = 0,
+ AUR_MEM_INT_LOW = 200000,
+ AUR_MEM_INT_HIGH = 332000,
+ AUR_MEM_INT_VERY_HIGH = 465000,
+ AUR_MEM_INT_MAX = 533000,
+};
+
+enum aur_mem_mif_rate {
+ AUR_MEM_MIF_MIN = 0,
+ AUR_MEM_MIF_VERY_LOW = 0,
+ AUR_MEM_MIF_LOW = 1014000,
+ AUR_MEM_MIF_HIGH = 1352000,
+ AUR_MEM_MIF_VERY_HIGH = 2028000,
+ AUR_MEM_MIF_MAX = 3172000,
+};
+
+#endif /* __AMALTHEA_CONFIG_PWR_STATE_H__ */
diff --git a/amalthea/config.h b/amalthea/config.h
index 19afff6..34f658b 100644
--- a/amalthea/config.h
+++ b/amalthea/config.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Include all configuration files for Amalthea.
*
@@ -8,10 +8,33 @@
#ifndef __AMALTHEA_CONFIG_H__
#define __AMALTHEA_CONFIG_H__
+#include <linux/sizes.h>
+
#define GXP_DRIVER_NAME "gxp_platform"
+#define DSP_FIRMWARE_DEFAULT_PREFIX "gxp_fw_core"
+
+#define AUR_DVFS_DOMAIN 17
#define GXP_NUM_CORES 4
+#define GXP_NUM_MAILBOXES GXP_NUM_CORES
+#define GXP_NUM_WAKEUP_DOORBELLS GXP_NUM_CORES
+
+/* The total size of the configuration region. */
+#define GXP_SHARED_BUFFER_SIZE SZ_256K
+/* Size of slice per VD. */
+#define GXP_SHARED_SLICE_SIZE 0x9000 /* 36K */
+/* At most GXP_NUM_CORES VDs can be supported on Amalthea. */
+#define GXP_NUM_SHARED_SLICES GXP_NUM_CORES
+
+#define GXP_USE_LEGACY_MAILBOX 1
+
+#define GXP_HAS_MCU 0
+#include "config-pwr-state.h"
+#include "context.h"
#include "csrs.h"
+#include "iova.h"
+#include "lpm.h"
+#include "mailbox-regs.h"
#endif /* __AMALTHEA_CONFIG_H__ */
diff --git a/amalthea/context.h b/amalthea/context.h
new file mode 100644
index 0000000..8afc99c
--- /dev/null
+++ b/amalthea/context.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Amalthea context related macros.
+ *
+ * Copyright (C) 2022 Google LLC
+ */
+
+#ifndef __AMALTHEA_CONTEXT_H__
+#define __AMALTHEA_CONTEXT_H__
+
+/* The stream IDs used for each core. */
+#define INST_SID_FOR_CORE(_x_) ((1 << 6) | ((_x_) << 4) | (0 << 3))
+#define DATA_SID_FOR_CORE(_x_) ((1 << 6) | ((_x_) << 4) | (1 << 3))
+#define IDMA_SID_FOR_CORE(_x_) ((1 << 6) | ((_x_) << 4))
+
+#endif /* __AMALTHEA_CONTEXT_H__ */
diff --git a/amalthea/csrs.h b/amalthea/csrs.h
index a8b8d07..8fee289 100644
--- a/amalthea/csrs.h
+++ b/amalthea/csrs.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Amalthea CSR definitions.
*
@@ -16,11 +16,6 @@
enum gxp_csrs {
GXP_REG_LPM_VERSION = 0x40000,
- GXP_REG_LPM_PSM_0 = 0x41000,
- GXP_REG_LPM_PSM_1 = 0x42000,
- GXP_REG_LPM_PSM_2 = 0x43000,
- GXP_REG_LPM_PSM_3 = 0x44000,
- GXP_REG_LPM_PSM_4 = 0x45000,
GXP_REG_AURORA_REVISION = 0x80000,
GXP_REG_COMMON_INT_POL_0 = 0x81000,
GXP_REG_COMMON_INT_POL_1 = 0x81004,
@@ -49,14 +44,20 @@ enum gxp_csrs {
#define GXP_REG_COMMON_INT_MASK_0_DOORBELLS_MASK 0xFFFFFFFF
#define GXP_REG_ETM_PWRCTL_CORE_RESET_SHIFT 16
-enum gxp_core_csrs {
- GXP_REG_INST_BPM = 0x0000,
- GXP_REG_PROFILING_CONDITION = 0x4000,
- GXP_REG_PROCESSOR_ID = 0x4004,
- GXP_REG_ALT_RESET_VECTOR = 0x4008,
- GXP_REG_COMMON_INT_MASK_0 = 0x4010,
- GXP_REG_ETM_PWRCTL = 0xB020,
-};
+/* helpers for calculating core CSRs offsets */
+#define GXP_CORE_0_BASE GXP_REG_CORE_0_INST_BPM
+#define GXP_CORE_SIZE (GXP_REG_CORE_1_INST_BPM - GXP_REG_CORE_0_INST_BPM)
+#define GXP_CORE_REG(core, off) (GXP_CORE_0_BASE + GXP_CORE_SIZE * core + off)
+
+/* Per core CSRs. @core should be in region 0 ~ GXP_NUM_CORES-1 */
+#define GXP_CORE_REG_INST_BPM(core) GXP_CORE_REG(core, 0x0)
+#define GXP_CORE_REG_PROFILING_CONDITION(core) GXP_CORE_REG(core, 0x4000)
+#define GXP_CORE_REG_PROCESSOR_ID(core) GXP_CORE_REG(core, 0x4004)
+#define GXP_CORE_REG_ALT_RESET_VECTOR(core) GXP_CORE_REG(core, 0x4008)
+#define GXP_CORE_REG_COMMON_INT_MASK_0(core) GXP_CORE_REG(core, 0x4010)
+#define GXP_CORE_REG_COMMON_INT_MASK_1(core) GXP_CORE_REG(core, 0x4014)
+#define GXP_CORE_REG_DEDICATED_INT_MASK(core) GXP_CORE_REG(core, 0x401C)
+#define GXP_CORE_REG_ETM_PWRCTL(core) GXP_CORE_REG(core, 0xB020)
#define SYNC_BARRIER_SHADOW_OFFSET 0x800
@@ -73,50 +74,4 @@ enum gxp_core_csrs {
#define PLL_CON0_PLL_AUR 0x100
#define PLL_CON0_NOC_USER 0x610
-/* LPM Registers */
-#define LPM_VERSION_OFFSET 0x0
-#define TRIGGER_CSR_START_OFFSET 0x4
-#define IMEM_START_OFFSET 0x8
-#define LPM_CONFIG_OFFSET 0xC
-#define PSM_DESCRIPTOR_OFFSET 0x10
-#define EVENTS_EN_OFFSET 0x100
-#define EVENTS_INV_OFFSET 0x140
-#define FUNCTION_SELECT_OFFSET 0x180
-#define TRIGGER_STATUS_OFFSET 0x184
-#define EVENT_STATUS_OFFSET 0x188
-#define OPS_OFFSET 0x800
-#define PSM_DESCRIPTOR_BASE(_x_) ((_x_) << 2)
-#define PSM_DESCRIPTOR_COUNT 5
-#define EVENTS_EN_BASE(_x_) ((_x_) << 2)
-#define EVENTS_EN_COUNT 16
-#define EVENTS_INV_BASE(_x_) ((_x_) << 2)
-#define EVENTS_INV_COUNT 16
-#define OPS_BASE(_x_) ((_x_) << 2)
-#define OPS_COUNT 128
-#define PSM_COUNT 5
-#define PSM_STATE_TABLE_BASE(_x_) ((_x_) << 8)
-#define PSM_STATE_TABLE_COUNT 6
-#define PSM_TRANS_BASE(_x_) ((_x_) << 5)
-#define PSM_TRANS_COUNT 4
-#define PSM_DMEM_BASE(_x_) ((_x_) << 2)
-#define PSM_DATA_COUNT 32
-#define PSM_NEXT_STATE_OFFSET 0x0
-#define PSM_SEQ_ADDR_OFFSET 0x4
-#define PSM_TIMER_VAL_OFFSET 0x8
-#define PSM_TIMER_EN_OFFSET 0xC
-#define PSM_TRIGGER_NUM_OFFSET 0x10
-#define PSM_TRIGGER_EN_OFFSET 0x14
-#define PSM_ENABLE_STATE_OFFSET 0x80
-#define PSM_DATA_OFFSET 0x600
-#define PSM_CFG_OFFSET 0x680
-#define PSM_START_OFFSET 0x684
-#define PSM_STATUS_OFFSET 0x688
-#define PSM_DEBUG_CFG_OFFSET 0x68C
-#define PSM_BREAK_ADDR_OFFSET 0x694
-#define PSM_GPIN_LO_RD_OFFSET 0x6A0
-#define PSM_GPIN_HI_RD_OFFSET 0x6A4
-#define PSM_GPOUT_LO_RD_OFFSET 0x6B0
-#define PSM_GPOUT_HI_RD_OFFSET 0x6B4
-#define PSM_DEBUG_STATUS_OFFSET 0x6B8
-
#endif /* __AMALTHEA_CSRS_H__ */
diff --git a/gxp-iova.h b/amalthea/iova.h
index 8b7de59..505e895 100644
--- a/gxp-iova.h
+++ b/amalthea/iova.h
@@ -1,11 +1,19 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* GXP IOVAs. The list of addresses for fixed device-side IOVAs
*
- * Copyright (C) 2021 Google LLC
+ * Copyright (C) 2021-2022 Google LLC
*/
-#ifndef __GXP_IOVAS_H__
-#define __GXP_IOVAS_H__
+
+#ifndef __AMALTHEA_IOVA_H__
+#define __AMALTHEA_IOVA_H__
+
+/*
+ * No local access path.
+ * Need to define GXP_IOVA_SYNC_BARRIERS and GXP_IOVA_AURORA_TOP in this
+ * case.
+ */
+#define GXP_HAS_LAP 0
#define GXP_IOVA_SYNC_BARRIERS (0x100000)
#define GXP_IOVA_MAILBOX(_x_) (0x18390000 + (_x_) * 0x00020000)
@@ -13,6 +21,7 @@
#define GXP_IOVA_AURORA_TOP (0x25C00000)
#define GXP_IOVA_FIRMWARE(_x_) (0xFA000000 + (_x_) * 0x0100000)
#define GXP_IOVA_FW_DATA (0xFA400000)
+#define GXP_IOVA_PRIV_FW_DATA (0xFA500000)
#define GXP_IOVA_TPU_MBX_BUFFER(_x_) (0xFE100000 + (_x_) * 0x00040000)
-#endif /* __GXP_IOVAS_H__ */
+#endif /* __AMALTHEA_IOVA_H__ */
diff --git a/amalthea/lpm.h b/amalthea/lpm.h
new file mode 100644
index 0000000..1d86d69
--- /dev/null
+++ b/amalthea/lpm.h
@@ -0,0 +1,139 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Amalthea LPM chip-dependent settings.
+ *
+ * Copyright (C) 2022 Google LLC
+ */
+
+#ifndef __AMALTHEA_LPM_H__
+#define __AMALTHEA_LPM_H__
+
+#include <linux/types.h>
+
+enum gxp_lpm_psm {
+ LPM_PSM_CORE0,
+ LPM_PSM_CORE1,
+ LPM_PSM_CORE2,
+ LPM_PSM_CORE3,
+ LPM_PSM_TOP,
+ LPM_NUM_PSMS,
+};
+
+#define CORE_TO_PSM(core) (LPM_PSM_CORE0 + (core))
+
+enum lpm_psm_csrs {
+ LPM_REG_ENABLE_STATE_0 = 0x080,
+ LPM_REG_ENABLE_STATE_1 = 0x180,
+ LPM_REG_ENABLE_STATE_2 = 0x280,
+ LPM_REG_ENABLE_STATE_3 = 0x380,
+};
+
+/* offset from GXP_LPM_BASE */
+enum lpm_psm_base {
+ GXP_REG_LPM_PSM_0 = 0x1000,
+ GXP_REG_LPM_PSM_1 = 0x2000,
+ GXP_REG_LPM_PSM_2 = 0x3000,
+ GXP_REG_LPM_PSM_3 = 0x4000,
+ GXP_REG_LPM_PSM_4 = 0x5000,
+};
+
+#define LPM_STATE_TABLE_SIZE (LPM_REG_ENABLE_STATE_1 - LPM_REG_ENABLE_STATE_0)
+
+/* LPM address space starts at lpm_version register */
+#define GXP_LPM_BASE GXP_REG_LPM_VERSION
+#define GXP_LPM_PSM_0_BASE GXP_REG_LPM_PSM_0
+#define GXP_LPM_PSM_SIZE (GXP_REG_LPM_PSM_1 - GXP_REG_LPM_PSM_0)
+
+/* LPM Registers */
+#define LPM_VERSION_OFFSET 0x0
+#define TRIGGER_CSR_START_OFFSET 0x4
+#define IMEM_START_OFFSET 0x8
+#define LPM_CONFIG_OFFSET 0xC
+#define PSM_DESCRIPTOR_OFFSET 0x10
+#define EVENTS_EN_OFFSET 0x100
+#define EVENTS_INV_OFFSET 0x140
+#define FUNCTION_SELECT_OFFSET 0x180
+#define TRIGGER_STATUS_OFFSET 0x184
+#define EVENT_STATUS_OFFSET 0x188
+#define OPS_OFFSET 0x800
+#define PSM_DESCRIPTOR_BASE(_x_) ((_x_) << 2)
+#define PSM_DESCRIPTOR_COUNT 5
+#define EVENTS_EN_BASE(_x_) ((_x_) << 2)
+#define EVENTS_EN_COUNT 16
+#define EVENTS_INV_BASE(_x_) ((_x_) << 2)
+#define EVENTS_INV_COUNT 16
+#define OPS_BASE(_x_) ((_x_) << 2)
+#define OPS_COUNT 128
+#define PSM_COUNT 5
+#define PSM_STATE_TABLE_BASE(_x_) ((_x_) << 8)
+#define PSM_STATE_TABLE_COUNT 6
+#define PSM_TRANS_BASE(_x_) ((_x_) << 5)
+#define PSM_TRANS_COUNT 4
+#define PSM_DMEM_BASE(_x_) ((_x_) << 2)
+#define PSM_DATA_COUNT 32
+#define PSM_NEXT_STATE_OFFSET 0x0
+#define PSM_SEQ_ADDR_OFFSET 0x4
+#define PSM_TIMER_VAL_OFFSET 0x8
+#define PSM_TIMER_EN_OFFSET 0xC
+#define PSM_TRIGGER_NUM_OFFSET 0x10
+#define PSM_TRIGGER_EN_OFFSET 0x14
+#define PSM_ENABLE_STATE_OFFSET 0x80
+#define PSM_DATA_OFFSET 0x600
+#define PSM_CFG_OFFSET 0x680
+#define PSM_START_OFFSET 0x684
+#define PSM_STATUS_OFFSET 0x688
+#define PSM_DEBUG_CFG_OFFSET 0x68C
+#define PSM_BREAK_ADDR_OFFSET 0x694
+#define PSM_GPIN_LO_RD_OFFSET 0x6A0
+#define PSM_GPIN_HI_RD_OFFSET 0x6A4
+#define PSM_GPOUT_LO_RD_OFFSET 0x6B0
+#define PSM_GPOUT_HI_RD_OFFSET 0x6B4
+#define PSM_DEBUG_STATUS_OFFSET 0x6B8
+
+static inline u32 gxp_lpm_psm_get_status_offset(enum gxp_lpm_psm psm)
+{
+ if (psm >= LPM_NUM_PSMS)
+ return 0;
+ return GXP_LPM_PSM_0_BASE + (GXP_LPM_PSM_SIZE * psm) +
+ PSM_STATUS_OFFSET;
+}
+
+static inline u32 gxp_lpm_psm_get_start_offset(enum gxp_lpm_psm psm)
+{
+ if (psm >= LPM_NUM_PSMS)
+ return 0;
+ return GXP_LPM_PSM_0_BASE + (GXP_LPM_PSM_SIZE * psm) + PSM_START_OFFSET;
+}
+
+static inline u32 gxp_lpm_psm_get_cfg_offset(enum gxp_lpm_psm psm)
+{
+ if (psm >= LPM_NUM_PSMS)
+ return 0;
+ return GXP_LPM_PSM_0_BASE + (GXP_LPM_PSM_SIZE * psm) + PSM_CFG_OFFSET;
+}
+
+static inline u32 gxp_lpm_psm_get_state_offset(enum gxp_lpm_psm psm, uint state)
+{
+ uint reg_offset;
+
+ if (psm >= LPM_NUM_PSMS || state > 3)
+ return 0;
+
+ switch (state) {
+ case 0:
+ reg_offset = LPM_REG_ENABLE_STATE_0;
+ break;
+ case 1:
+ reg_offset = LPM_REG_ENABLE_STATE_1;
+ break;
+ case 2:
+ reg_offset = LPM_REG_ENABLE_STATE_2;
+ break;
+ case 3:
+ reg_offset = LPM_REG_ENABLE_STATE_3;
+ break;
+ }
+ return GXP_LPM_PSM_0_BASE + (GXP_LPM_PSM_SIZE * psm) + reg_offset;
+}
+
+#endif /* __AMALTHEA_LPM_H__ */
diff --git a/amalthea/mailbox-regs.h b/amalthea/mailbox-regs.h
new file mode 100644
index 0000000..0a2fb27
--- /dev/null
+++ b/amalthea/mailbox-regs.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * GXP mailbox registers.
+ *
+ * Copyright (C) 2022 Google LLC
+ */
+
+#ifndef __AMALTHEA_MAILBOX_REGS_H__
+#define __AMALTHEA_MAILBOX_REGS_H__
+
+/* Mailbox CSRs */
+#define MBOX_MCUCTLR_OFFSET 0x0000
+
+#define MBOX_INTGR0_OFFSET 0x0020
+#define MBOX_INTMSR0_OFFSET 0x0030
+
+#define MBOX_INTCR1_OFFSET 0x0044
+#define MBOX_INTMR1_OFFSET 0x0048
+#define MBOX_INTSR1_OFFSET 0x004C
+#define MBOX_INTMSR1_OFFSET 0x0050
+
+/* Mailbox Shared Data Registers */
+#define MBOX_DATA_REG_BASE 0x0080
+
+#define MBOX_DATA_STATUS_OFFSET 0x00
+#define MBOX_DATA_DESCRIPTOR_ADDR_OFFSET 0x04
+#define MBOX_DATA_CMD_TAIL_RESP_HEAD_OFFSET 0x08
+#define MBOX_DATA_CMD_HEAD_RESP_TAIL_OFFSET 0x0C
+
+#endif /* __AMALTHEA_MAILBOX_REGS_H__ */
diff --git a/gcip-kernel-driver/drivers/gcip/Makefile b/gcip-kernel-driver/drivers/gcip/Makefile
new file mode 100644
index 0000000..7de0874
--- /dev/null
+++ b/gcip-kernel-driver/drivers/gcip/Makefile
@@ -0,0 +1,32 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Makefile for GCIP framework.
+#
+
+CONFIG_GCIP ?= m
+obj-$(CONFIG_GCIP) += gcip.o
+
+gcip-objs := gcip-alloc-helper.o \
+ gcip-dma-fence.o \
+ gcip-domain-pool.o \
+ gcip-firmware.o \
+ gcip-image-config.o \
+ gcip-kci.o \
+ gcip-mailbox.o \
+ gcip-mem-pool.o \
+ gcip-pm.o \
+ gcip-telemetry.o \
+ gcip-thermal.o
+
+CURRENT_DIR=$(dir $(abspath $(lastword $(MAKEFILE_LIST))))
+
+ccflags-y += -I$(CURRENT_DIR)/../../include
+
+ifdef CONFIG_GCIP_TEST
+obj-y += unittests/
+include $(srctree)/drivers/gcip/unittests/Makefile.include
+$(call include_test_path, $(gcip-objs))
+endif
+
+modules modules_install clean:
+ $(MAKE) -C $(KERNEL_SRC) M=$(M) W=1 $(KBUILD_OPTIONS) $(@)
diff --git a/gcip-kernel-driver/drivers/gcip/gcip-alloc-helper.c b/gcip-kernel-driver/drivers/gcip/gcip-alloc-helper.c
new file mode 100644
index 0000000..4008dff
--- /dev/null
+++ b/gcip-kernel-driver/drivers/gcip/gcip-alloc-helper.c
@@ -0,0 +1,97 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * GCIP helpers for allocating memories.
+ *
+ * Copyright (C) 2022 Google LLC
+ */
+
+#include <asm/page.h>
+#include <linux/device.h>
+#include <linux/gfp.h>
+#include <linux/mm_types.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+
+#include <gcip/gcip-alloc-helper.h>
+
+/*
+ * Set @pages to the pages @mem represents.
+ * @mem must be a pointer returned by vmalloc.
+ *
+ * Returns 0 on success, -ENOMEM when any page is NULL.
+ */
+static int gcip_vmalloc_to_pages(void *mem, size_t count, struct page **pages)
+{
+ size_t i = 0;
+
+ while (count--) {
+ pages[i] = vmalloc_to_page(mem);
+ if (!pages[i])
+ return -ENOMEM;
+ i++;
+ mem += PAGE_SIZE;
+ }
+ return 0;
+}
+
+struct sg_table *gcip_alloc_noncontiguous(struct device *dev, size_t size, gfp_t gfp)
+{
+ struct gcip_sgt_handle *sh = kmalloc(sizeof(*sh), gfp);
+ void *mem;
+ struct page **pages;
+ size_t count;
+ int ret;
+
+ if (!sh)
+ return NULL;
+
+ size = PAGE_ALIGN(size);
+ count = size >> PAGE_SHIFT;
+ if (gfp & __GFP_ZERO)
+ mem = vzalloc(size);
+ else
+ mem = vmalloc(size);
+ if (!mem) {
+ dev_err(dev, "GCIP noncontiguous alloc size=%#zx failed", size);
+ goto err_free_sh;
+ }
+
+ pages = kmalloc_array(count, sizeof(*pages), gfp);
+ if (!pages) {
+ dev_err(dev, "GCIP alloc pages array count=%zu failed", count);
+ goto err_free_mem;
+ }
+
+ if (gcip_vmalloc_to_pages(mem, count, pages)) {
+ dev_err(dev, "convert memory to pages failed");
+ goto err_free_pages;
+ }
+
+ ret = sg_alloc_table_from_pages(&sh->sgt, pages, count, 0, size, gfp);
+ if (ret) {
+ dev_err(dev, "alloc SG table with size=%#zx failed: %d", size, ret);
+ goto err_free_pages;
+ }
+
+ kfree(pages);
+ sh->mem = mem;
+ return &sh->sgt;
+
+err_free_pages:
+ kfree(pages);
+err_free_mem:
+ vfree(mem);
+err_free_sh:
+ kfree(sh);
+ return NULL;
+}
+
+void gcip_free_noncontiguous(struct sg_table *sgt)
+{
+ struct gcip_sgt_handle *sh = container_of(sgt, struct gcip_sgt_handle, sgt);
+
+ sg_free_table(&sh->sgt);
+ vfree(sh->mem);
+ kfree(sh);
+}
diff --git a/gcip-kernel-driver/drivers/gcip/gcip-dma-fence.c b/gcip-kernel-driver/drivers/gcip/gcip-dma-fence.c
new file mode 100644
index 0000000..ca49526
--- /dev/null
+++ b/gcip-kernel-driver/drivers/gcip/gcip-dma-fence.c
@@ -0,0 +1,196 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * GCIP support of DMA fences.
+ *
+ * Copyright (C) 2023 Google LLC
+ */
+
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/dma-fence.h>
+#include <linux/err.h>
+#include <linux/file.h>
+#include <linux/ktime.h>
+#include <linux/list.h>
+#include <linux/seq_file.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/sync_file.h>
+#include <linux/time.h>
+
+#include <gcip/gcip-dma-fence.h>
+
+#define to_gfence(fence) container_of(fence, struct gcip_dma_fence, fence)
+
+static int _gcip_dma_fence_signal(struct dma_fence *fence, int error, bool ignore_signaled)
+{
+ int ret;
+
+ if (error > 0)
+ error = -error;
+ if (unlikely(error < -MAX_ERRNO))
+ return -EINVAL;
+
+ spin_lock_irq(fence->lock);
+ /* don't signal fence twice */
+ if (unlikely(test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))) {
+ ret = ignore_signaled ? 0 : -EBUSY;
+ goto out_unlock;
+ }
+ if (error)
+ dma_fence_set_error(fence, error);
+ ret = dma_fence_signal_locked(fence);
+
+out_unlock:
+ spin_unlock_irq(fence->lock);
+ return ret;
+}
+
+static const char *sync_status_str(int status)
+{
+ if (status < 0)
+ return "error";
+ if (status > 0)
+ return "signaled";
+ return "active";
+}
+
+struct gcip_dma_fence_manager *gcip_dma_fence_manager_create(struct device *dev)
+{
+ struct gcip_dma_fence_manager *mgr = devm_kzalloc(dev, sizeof(*mgr), GFP_KERNEL);
+
+ if (!mgr)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&mgr->fence_list_head);
+ spin_lock_init(&mgr->fence_list_lock);
+ mgr->dev = dev;
+
+ return mgr;
+}
+
+const char *gcip_dma_fence_get_timeline_name(struct dma_fence *fence)
+{
+ struct gcip_dma_fence *gfence = to_gfence(fence);
+
+ return gfence->timeline_name;
+}
+
+bool gcip_dma_fence_always_true(struct dma_fence *fence)
+{
+ return true;
+}
+
+int gcip_dma_fence_init(struct gcip_dma_fence_manager *mgr, struct gcip_dma_fence *gfence,
+ struct gcip_dma_fence_data *data)
+{
+ unsigned long flags;
+ int fd;
+ struct sync_file *sync_file;
+ int ret;
+
+ strscpy(gfence->timeline_name, data->timeline_name, GCIP_FENCE_TIMELINE_NAME_LEN);
+
+ spin_lock_init(&gfence->lock);
+ INIT_LIST_HEAD(&gfence->fence_list);
+ gfence->mgr = mgr;
+
+ dma_fence_init(&gfence->fence, data->ops, &gfence->lock, dma_fence_context_alloc(1),
+ data->seqno);
+ GCIP_DMA_FENCE_LIST_LOCK(mgr, flags);
+ list_add_tail(&gfence->fence_list, &mgr->fence_list_head);
+ GCIP_DMA_FENCE_LIST_UNLOCK(mgr, flags);
+
+ if (data->after_init) {
+ ret = data->after_init(gfence);
+ if (ret) {
+ dev_err(mgr->dev, "DMA fence init failed on after_init: %d", ret);
+ goto err_put_fence;
+ }
+ }
+ fd = get_unused_fd_flags(O_CLOEXEC);
+ if (fd < 0) {
+ ret = fd;
+ dev_err(mgr->dev, "Failed to get FD: %d", ret);
+ goto err_put_fence;
+ }
+ sync_file = sync_file_create(&gfence->fence);
+ if (!sync_file) {
+ dev_err(mgr->dev, "Failed to create sync file");
+ ret = -ENOMEM;
+ goto err_put_fd;
+ }
+ /* sync_file holds the reference to fence, so we can drop our reference. */
+ dma_fence_put(&gfence->fence);
+
+ fd_install(fd, sync_file->file);
+ data->fence = fd;
+ return 0;
+
+err_put_fd:
+ put_unused_fd(fd);
+err_put_fence:
+ dma_fence_put(&gfence->fence);
+ return ret;
+}
+
+void gcip_dma_fence_exit(struct gcip_dma_fence *gfence)
+{
+ unsigned long flags;
+
+ GCIP_DMA_FENCE_LIST_LOCK(gfence->mgr, flags);
+ list_del(&gfence->fence_list);
+ GCIP_DMA_FENCE_LIST_UNLOCK(gfence->mgr, flags);
+}
+
+int gcip_dma_fence_status(int fence, int *status)
+{
+ struct dma_fence *fencep;
+
+ fencep = sync_file_get_fence(fence);
+ if (!fencep)
+ return -EBADF;
+ *status = dma_fence_get_status(fencep);
+ dma_fence_put(fencep);
+ return 0;
+}
+
+int gcip_dma_fence_signal(int fence, int error, bool ignore_signaled)
+{
+ struct dma_fence *fencep;
+ int ret;
+
+ fencep = sync_file_get_fence(fence);
+ if (!fencep)
+ return -EBADF;
+ ret = _gcip_dma_fence_signal(fencep, error, ignore_signaled);
+ dma_fence_put(fencep);
+ return ret;
+}
+
+int gcip_dma_fenceptr_signal(struct gcip_dma_fence *gfence, int error, bool ignore_signaled)
+{
+ return _gcip_dma_fence_signal(&gfence->fence, error, ignore_signaled);
+}
+
+void gcip_dma_fence_show(struct gcip_dma_fence *gfence, struct seq_file *s)
+{
+ struct dma_fence *fence = &gfence->fence;
+
+ spin_lock_irq(&gfence->lock);
+
+ seq_printf(s, "%s-%s %llu-%llu %s", fence->ops->get_driver_name(fence),
+ fence->ops->get_timeline_name(fence), fence->context, fence->seqno,
+ sync_status_str(dma_fence_get_status_locked(fence)));
+
+ if (test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags)) {
+ struct timespec64 ts = ktime_to_timespec64(fence->timestamp);
+
+ seq_printf(s, " @%lld.%09ld", (s64)ts.tv_sec, ts.tv_nsec);
+ }
+
+ if (fence->error)
+ seq_printf(s, " err=%d", fence->error);
+
+ spin_unlock_irq(&gfence->lock);
+}
diff --git a/gcip-kernel-driver/drivers/gcip/gcip-domain-pool.c b/gcip-kernel-driver/drivers/gcip/gcip-domain-pool.c
new file mode 100644
index 0000000..882aa80
--- /dev/null
+++ b/gcip-kernel-driver/drivers/gcip/gcip-domain-pool.c
@@ -0,0 +1,147 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * GCIP IOMMU domain allocator.
+ *
+ * Copyright (C) 2022 Google LLC
+ */
+
+#include <linux/device.h>
+#include <linux/idr.h>
+#include <linux/iommu.h>
+#include <linux/vmalloc.h>
+
+#include <gcip/gcip-domain-pool.h>
+
+struct dynamic_domain {
+ struct list_head list_entry;
+ struct iommu_domain *domain;
+};
+
+int gcip_domain_pool_init(struct device *dev, struct gcip_domain_pool *pool, unsigned int size)
+{
+ unsigned int i;
+ struct iommu_domain *domain;
+
+ pool->size = size;
+ pool->dev = dev;
+ INIT_LIST_HEAD(&pool->dynamic_domains);
+ mutex_init(&pool->lock);
+
+ if (!size)
+ return 0;
+
+ dev_dbg(pool->dev, "Initializing domain pool with %u domains\n", size);
+
+ ida_init(&pool->idp);
+ pool->array = vzalloc(sizeof(*pool->array) * size);
+ if (!pool->array) {
+ ida_destroy(&pool->idp);
+ return -ENOMEM;
+ }
+ for (i = 0; i < size; i++) {
+ domain = iommu_domain_alloc(dev->bus);
+ if (!domain) {
+ dev_err(pool->dev, "Failed to allocate iommu domain %d of %u\n", i + 1,
+ size);
+ gcip_domain_pool_destroy(pool);
+ return -ENOMEM;
+ }
+
+ pool->array[i] = domain;
+ }
+ return 0;
+}
+
+struct iommu_domain *gcip_domain_pool_alloc(struct gcip_domain_pool *pool)
+{
+ int id;
+ struct dynamic_domain *ddomain;
+
+ if (!pool->size) {
+ ddomain = vzalloc(sizeof(*ddomain));
+ if (!ddomain)
+ return NULL;
+
+ ddomain->domain = iommu_domain_alloc(pool->dev->bus);
+ if (!ddomain->domain) {
+ vfree(ddomain);
+ return NULL;
+ }
+ mutex_lock(&pool->lock);
+ list_add_tail(&ddomain->list_entry, &pool->dynamic_domains);
+ mutex_unlock(&pool->lock);
+ return ddomain->domain;
+ }
+
+ id = ida_alloc_max(&pool->idp, pool->size - 1, GFP_KERNEL);
+
+ if (id < 0) {
+ dev_err(pool->dev, "No more domains available from pool of size %u\n", pool->size);
+ return NULL;
+ }
+
+ dev_dbg(pool->dev, "Allocated domain from pool with id = %d\n", id);
+
+ return pool->array[id];
+}
+
+void gcip_domain_pool_free(struct gcip_domain_pool *pool, struct iommu_domain *domain)
+{
+ int id;
+ struct dynamic_domain *ddomain;
+ struct list_head *cur, *nxt;
+
+ if (!pool->size) {
+ mutex_lock(&pool->lock);
+ list_for_each_safe(cur, nxt, &pool->dynamic_domains) {
+ ddomain = container_of(cur, struct dynamic_domain, list_entry);
+ if (ddomain->domain == domain) {
+ list_del(&ddomain->list_entry);
+ mutex_unlock(&pool->lock);
+ iommu_domain_free(domain);
+ vfree(ddomain);
+ return;
+ }
+ }
+ mutex_unlock(&pool->lock);
+ return;
+ }
+
+ for (id = 0; id < pool->size; id++) {
+ if (pool->array[id] == domain) {
+ dev_dbg(pool->dev, "Released domain from pool with id = %d\n", id);
+ ida_free(&pool->idp, id);
+ return;
+ }
+ }
+ dev_err(pool->dev, "Domain not found in pool\n");
+}
+
+void gcip_domain_pool_destroy(struct gcip_domain_pool *pool)
+{
+ int i;
+ struct dynamic_domain *ddomain;
+ struct list_head *cur, *nxt;
+
+ if (!pool->size) {
+ mutex_lock(&pool->lock);
+ list_for_each_safe(cur, nxt, &pool->dynamic_domains) {
+ ddomain = container_of(cur, struct dynamic_domain, list_entry);
+ list_del(&ddomain->list_entry);
+ iommu_domain_free(ddomain->domain);
+ vfree(ddomain);
+ }
+ mutex_unlock(&pool->lock);
+ return;
+ }
+
+ dev_dbg(pool->dev, "Destroying domain pool with %u domains\n", pool->size);
+
+ for (i = 0; i < pool->size; i++) {
+ if (pool->array[i])
+ iommu_domain_free(pool->array[i]);
+ }
+
+ ida_destroy(&pool->idp);
+ vfree(pool->array);
+}
diff --git a/gcip-kernel-driver/drivers/gcip/gcip-firmware.c b/gcip-kernel-driver/drivers/gcip/gcip-firmware.c
new file mode 100644
index 0000000..52c3940
--- /dev/null
+++ b/gcip-kernel-driver/drivers/gcip/gcip-firmware.c
@@ -0,0 +1,156 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * GCIP firmware interface.
+ *
+ * Copyright (C) 2022 Google LLC
+ */
+
+#include <linux/debugfs.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+
+#include <gcip/gcip-firmware.h>
+#include <gcip/gcip-pm.h>
+
+char *gcip_fw_flavor_str(enum gcip_fw_flavor fw_flavor)
+{
+ switch (fw_flavor) {
+ case GCIP_FW_FLAVOR_BL1:
+ return "stage 2 bootloader";
+ case GCIP_FW_FLAVOR_SYSTEST:
+ return "test";
+ case GCIP_FW_FLAVOR_PROD_DEFAULT:
+ return "prod";
+ case GCIP_FW_FLAVOR_CUSTOM:
+ return "custom";
+ case GCIP_FW_FLAVOR_UNKNOWN:
+ default:
+ return "unknown";
+ }
+}
+
+static int gcip_firmware_tracing_active_get(void *data, u64 *val)
+{
+ struct gcip_fw_tracing *fw_tracing = data;
+
+ mutex_lock(&fw_tracing->lock);
+ *val = fw_tracing->active_level;
+ mutex_unlock(&fw_tracing->lock);
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(fops_gcip_firmware_tracing_active, gcip_firmware_tracing_active_get, NULL,
+ "%llu\n");
+
+static int gcip_firmware_tracing_request_get(void *data, u64 *val)
+{
+ struct gcip_fw_tracing *fw_tracing = data;
+
+ mutex_lock(&fw_tracing->lock);
+ *val = fw_tracing->request_level;
+ mutex_unlock(&fw_tracing->lock);
+
+ return 0;
+}
+
+static int gcip_firmware_tracing_set_level_lock(struct gcip_fw_tracing *fw_tracing)
+{
+ unsigned long active_level;
+ int ret = fw_tracing->set_level(fw_tracing->data, fw_tracing->request_level, &active_level);
+
+ if (ret)
+ dev_warn(fw_tracing->dev, "Failed to set firmware tracing level to %lu: %d",
+ fw_tracing->request_level, ret);
+ else
+ fw_tracing->active_level =
+ (fw_tracing->request_level & GCIP_FW_TRACING_DEFAULT_VOTE) ?
+ GCIP_FW_TRACING_DEFAULT_VOTE :
+ active_level;
+
+ return ret;
+}
+
+static int gcip_firmware_tracing_request_set(void *data, u64 val)
+{
+ struct gcip_fw_tracing *fw_tracing = data;
+ int ret = 0;
+
+ mutex_lock(&fw_tracing->lock);
+
+ fw_tracing->request_level = val;
+ if (!gcip_pm_get_if_powered(fw_tracing->pm, false)) {
+ ret = gcip_firmware_tracing_set_level_lock(fw_tracing);
+ gcip_pm_put(fw_tracing->pm);
+ }
+
+ mutex_unlock(&fw_tracing->lock);
+
+ return ret;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(fops_gcip_firmware_tracing_request, gcip_firmware_tracing_request_get,
+ gcip_firmware_tracing_request_set, "%llu\n");
+
+struct gcip_fw_tracing *gcip_firmware_tracing_create(const struct gcip_fw_tracing_args *args)
+{
+ struct gcip_fw_tracing *fw_tracing;
+
+ if (!args->dev || !args->set_level)
+ return ERR_PTR(-EINVAL);
+
+ fw_tracing = kzalloc(sizeof(*fw_tracing), GFP_KERNEL);
+ if (!fw_tracing)
+ return ERR_PTR(-ENOMEM);
+
+ fw_tracing->dev = args->dev;
+ fw_tracing->pm = args->pm;
+ fw_tracing->set_level = args->set_level;
+ fw_tracing->data = args->data;
+ fw_tracing->active_level = GCIP_FW_TRACING_DEFAULT_VOTE;
+ fw_tracing->request_level = GCIP_FW_TRACING_DEFAULT_VOTE;
+ mutex_init(&fw_tracing->lock);
+
+ fw_tracing->dentry = debugfs_create_dir("fw_tracing", args->dentry);
+ if (IS_ERR(fw_tracing->dentry)) {
+ dev_warn(args->dev, "Failed to create debug FS tracing");
+ kfree(fw_tracing);
+
+ return (struct gcip_fw_tracing *)fw_tracing->dentry;
+ }
+
+ debugfs_create_file("active", 0440, fw_tracing->dentry, fw_tracing,
+ &fops_gcip_firmware_tracing_active);
+ debugfs_create_file("request", 0660, fw_tracing->dentry, fw_tracing,
+ &fops_gcip_firmware_tracing_request);
+
+ return fw_tracing;
+}
+
+void gcip_firmware_tracing_destroy(struct gcip_fw_tracing *fw_tracing)
+{
+ if (!fw_tracing)
+ return;
+
+ debugfs_remove_recursive(fw_tracing->dentry);
+ kfree(fw_tracing);
+}
+
+int gcip_firmware_tracing_restore_on_powering(struct gcip_fw_tracing *fw_tracing)
+{
+ int ret = 0;
+
+ if (!fw_tracing)
+ return 0;
+
+ gcip_pm_lockdep_assert_held(fw_tracing->pm);
+ mutex_lock(&fw_tracing->lock);
+
+ fw_tracing->active_level = GCIP_FW_TRACING_DEFAULT_VOTE;
+ if (!(fw_tracing->request_level & GCIP_FW_TRACING_DEFAULT_VOTE))
+ ret = gcip_firmware_tracing_set_level_lock(fw_tracing);
+
+ mutex_unlock(&fw_tracing->lock);
+
+ return ret;
+}
diff --git a/gcip-kernel-driver/drivers/gcip/gcip-image-config.c b/gcip-kernel-driver/drivers/gcip/gcip-image-config.c
new file mode 100644
index 0000000..98a3546
--- /dev/null
+++ b/gcip-kernel-driver/drivers/gcip/gcip-image-config.c
@@ -0,0 +1,198 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Framework for parsing the firmware image configuration.
+ *
+ * Copyright (C) 2022 Google LLC
+ */
+
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/types.h>
+
+#include <gcip/gcip-image-config.h>
+
+static int setup_iommu_mappings(struct gcip_image_config_parser *parser,
+ struct gcip_image_config *config)
+{
+ int i, ret;
+ dma_addr_t daddr;
+ size_t size;
+ phys_addr_t paddr;
+
+ for (i = 0; i < config->num_iommu_mappings; i++) {
+ daddr = config->iommu_mappings[i].virt_address;
+ if (unlikely(!daddr)) {
+ dev_warn(parser->dev, "Invalid config, device address is zero");
+ ret = -EIO;
+ goto err;
+ }
+ size = gcip_config_to_size(config->iommu_mappings[i].image_config_value);
+ paddr = config->iommu_mappings[i].image_config_value & GCIP_IMG_CFG_ADDR_MASK;
+
+ dev_dbg(parser->dev, "Image config adding IOMMU mapping: %pad -> %pap", &daddr,
+ &paddr);
+
+ if (unlikely(daddr + size <= daddr || paddr + size <= paddr)) {
+ ret = -EOVERFLOW;
+ goto err;
+ }
+ ret = parser->ops->map(parser->data, daddr, paddr, size,
+ GCIP_IMAGE_CONFIG_FLAGS_SECURE);
+ if (ret) {
+ dev_err(parser->dev,
+ "Unable to Map: %d dma_addr: %pad phys_addr: %pap size: %#lx\n",
+ ret, &daddr, &paddr, size);
+ goto err;
+ }
+ }
+
+ return 0;
+
+err:
+ while (i--) {
+ daddr = config->iommu_mappings[i].virt_address;
+ size = gcip_config_to_size(config->iommu_mappings[i].image_config_value);
+ parser->ops->unmap(parser->data, daddr, size, GCIP_IMAGE_CONFIG_FLAGS_SECURE);
+ }
+ return ret;
+}
+
+static void clear_iommu_mappings(struct gcip_image_config_parser *parser,
+ struct gcip_image_config *config)
+{
+ dma_addr_t daddr;
+ size_t size;
+ int i;
+
+ for (i = config->num_iommu_mappings - 1; i >= 0; i--) {
+ daddr = config->iommu_mappings[i].virt_address;
+ size = gcip_config_to_size(config->iommu_mappings[i].image_config_value);
+ dev_dbg(parser->dev, "Image config removing IOMMU mapping: %pad size=%#lx", &daddr,
+ size);
+ parser->ops->unmap(parser->data, daddr, size, GCIP_IMAGE_CONFIG_FLAGS_SECURE);
+ }
+}
+
+static int setup_ns_iommu_mappings(struct gcip_image_config_parser *parser,
+ struct gcip_image_config *config)
+{
+ dma_addr_t daddr;
+ size_t size;
+ int ret, i;
+ phys_addr_t paddr = 0;
+
+ for (i = 0; i < config->num_ns_iommu_mappings; i++) {
+ daddr = config->ns_iommu_mappings[i] & GCIP_IMG_CFG_ADDR_MASK;
+ if (unlikely(!daddr)) {
+ dev_warn(parser->dev, "Invalid config, device address is zero");
+ ret = -EIO;
+ goto err;
+ }
+ size = gcip_ns_config_to_size(config->ns_iommu_mappings[i]);
+ dev_dbg(parser->dev, "Image config adding NS IOMMU mapping: %pad -> %pap", &daddr,
+ &paddr);
+ if (unlikely(daddr + size <= daddr || paddr + size <= paddr)) {
+ ret = -EOVERFLOW;
+ goto err;
+ }
+ ret = parser->ops->map(parser->data, daddr, paddr, size, 0);
+ if (ret)
+ goto err;
+ paddr += size;
+ }
+
+ return 0;
+
+err:
+ while (i--) {
+ size = gcip_ns_config_to_size(config->ns_iommu_mappings[i]);
+ daddr = config->ns_iommu_mappings[i] & GCIP_IMG_CFG_ADDR_MASK;
+ parser->ops->unmap(parser->data, daddr, size, 0);
+ }
+ return ret;
+}
+
+static void clear_ns_iommu_mappings(struct gcip_image_config_parser *parser,
+ struct gcip_image_config *config)
+{
+ dma_addr_t daddr;
+ size_t size;
+ int i;
+
+ for (i = config->num_ns_iommu_mappings - 1; i >= 0; i--) {
+ size = gcip_ns_config_to_size(config->ns_iommu_mappings[i]);
+ daddr = config->ns_iommu_mappings[i] & GCIP_IMG_CFG_ADDR_MASK;
+ dev_dbg(parser->dev, "Image config removing NS IOMMU mapping: %pad size=%#lx",
+ &daddr, size);
+ parser->ops->unmap(parser->data, daddr, size, 0);
+ }
+}
+
+static int map_image_config(struct gcip_image_config_parser *parser,
+ struct gcip_image_config *config)
+{
+ int ret = setup_ns_iommu_mappings(parser, config);
+
+ if (ret)
+ return ret;
+ if (gcip_image_config_is_ns(config)) {
+ ret = setup_iommu_mappings(parser, config);
+ if (ret)
+ clear_ns_iommu_mappings(parser, config);
+ }
+ return ret;
+}
+
+static void unmap_image_config(struct gcip_image_config_parser *parser,
+ struct gcip_image_config *config)
+{
+ if (gcip_image_config_is_ns(config))
+ clear_iommu_mappings(parser, config);
+ clear_ns_iommu_mappings(parser, config);
+}
+
+int gcip_image_config_parser_init(struct gcip_image_config_parser *parser,
+ const struct gcip_image_config_ops *ops, struct device *dev,
+ void *data)
+{
+ if (!ops->map || !ops->unmap) {
+ dev_err(dev, "Missing mandatory operations for image config parser");
+ return -EINVAL;
+ }
+ parser->dev = dev;
+ parser->data = data;
+ parser->ops = ops;
+ memset(&parser->last_config, 0, sizeof(parser->last_config));
+ return 0;
+}
+
+int gcip_image_config_parse(struct gcip_image_config_parser *parser,
+ struct gcip_image_config *config)
+{
+ int ret;
+
+ if (!memcmp(config, &parser->last_config, sizeof(*config)))
+ return 0;
+ unmap_image_config(parser, &parser->last_config);
+ ret = map_image_config(parser, config);
+ if (ret) {
+ dev_err(parser->dev, "Map image config failed: %d", ret);
+ /*
+ * Weird case as the mappings in the last config were just removed - might happen
+ * if the IOMMU driver state is corrupted. We can't help to rescue it so let's
+ * simply log a message.
+ */
+ if (unlikely(map_image_config(parser, &parser->last_config)))
+ dev_err(parser->dev, "Failed to roll back the last image config");
+ return ret;
+ }
+ memcpy(&parser->last_config, config, sizeof(parser->last_config));
+ return 0;
+}
+
+void gcip_image_config_clear(struct gcip_image_config_parser *parser)
+{
+ unmap_image_config(parser, &parser->last_config);
+ memset(&parser->last_config, 0, sizeof(parser->last_config));
+}
diff --git a/gcip-kernel-driver/drivers/gcip/gcip-kci.c b/gcip-kernel-driver/drivers/gcip/gcip-kci.c
new file mode 100644
index 0000000..c3da416
--- /dev/null
+++ b/gcip-kernel-driver/drivers/gcip/gcip-kci.c
@@ -0,0 +1,525 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Kernel Control Interface, implements the protocol between AP kernel and GCIP firmware.
+ *
+ * Copyright (C) 2022 Google LLC
+ */
+
+#include <linux/circ_buf.h>
+#include <linux/device.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/string.h> /* memcpy */
+
+#include <gcip/gcip-kci.h>
+#include <gcip/gcip-mailbox.h>
+
+static u32 gcip_kci_get_cmd_queue_head(struct gcip_mailbox *mailbox)
+{
+ struct gcip_kci *kci = gcip_mailbox_get_data(mailbox);
+
+ return kci->ops->get_cmd_queue_head(kci);
+}
+
+static u32 gcip_kci_get_cmd_queue_tail(struct gcip_mailbox *mailbox)
+{
+ struct gcip_kci *kci = gcip_mailbox_get_data(mailbox);
+
+ return kci->ops->get_cmd_queue_tail(kci);
+}
+
+static void gcip_kci_inc_cmd_queue_tail(struct gcip_mailbox *mailbox, u32 inc)
+{
+ struct gcip_kci *kci = gcip_mailbox_get_data(mailbox);
+
+ lockdep_assert_held(&kci->cmd_queue_lock);
+ kci->ops->inc_cmd_queue_tail(kci, inc);
+}
+
+static int gcip_kci_acquire_cmd_queue_lock(struct gcip_mailbox *mailbox, bool try)
+{
+ struct gcip_kci *kci = gcip_mailbox_get_data(mailbox);
+
+ mutex_lock(&kci->cmd_queue_lock);
+ return 1;
+}
+
+static void gcip_kci_release_cmd_queue_lock(struct gcip_mailbox *mailbox)
+{
+ struct gcip_kci *kci = gcip_mailbox_get_data(mailbox);
+
+ mutex_unlock(&kci->cmd_queue_lock);
+}
+
+static u64 gcip_kci_get_cmd_elem_seq(struct gcip_mailbox *mailbox, void *cmd)
+{
+ struct gcip_kci_command_element *elem = cmd;
+
+ return elem->seq;
+}
+
+static u32 gcip_kci_get_cmd_elem_code(struct gcip_mailbox *mailbox, void *cmd)
+{
+ struct gcip_kci_command_element *elem = cmd;
+
+ return elem->code;
+}
+
+static void gcip_kci_set_cmd_elem_seq(struct gcip_mailbox *mailbox, void *cmd, u64 seq)
+{
+ struct gcip_kci_command_element *elem = cmd;
+
+ if (!(elem->seq & GCIP_KCI_REVERSE_FLAG))
+ elem->seq = seq;
+}
+
+static u32 gcip_kci_get_resp_queue_size(struct gcip_mailbox *mailbox)
+{
+ struct gcip_kci *kci = gcip_mailbox_get_data(mailbox);
+
+ return kci->ops->get_resp_queue_size(kci);
+}
+
+static u32 gcip_kci_get_resp_queue_head(struct gcip_mailbox *mailbox)
+{
+ struct gcip_kci *kci = gcip_mailbox_get_data(mailbox);
+
+ return kci->ops->get_resp_queue_head(kci);
+}
+
+static u32 gcip_kci_get_resp_queue_tail(struct gcip_mailbox *mailbox)
+{
+ struct gcip_kci *kci = gcip_mailbox_get_data(mailbox);
+
+ return kci->ops->get_resp_queue_tail(kci);
+}
+
+static void gcip_kci_inc_resp_queue_head(struct gcip_mailbox *mailbox, u32 inc)
+{
+ struct gcip_kci *kci = gcip_mailbox_get_data(mailbox);
+
+ lockdep_assert_held(&kci->resp_queue_lock);
+ kci->ops->inc_resp_queue_head(kci, inc);
+}
+
+static int gcip_kci_acquire_resp_queue_lock(struct gcip_mailbox *mailbox, bool try)
+{
+ struct gcip_kci *kci = gcip_mailbox_get_data(mailbox);
+
+ if (try)
+ return spin_trylock(&kci->resp_queue_lock);
+
+ spin_lock(&kci->resp_queue_lock);
+ return 1;
+}
+
+static void gcip_kci_release_resp_queue_lock(struct gcip_mailbox *mailbox)
+{
+ struct gcip_kci *kci = gcip_mailbox_get_data(mailbox);
+
+ spin_unlock(&kci->resp_queue_lock);
+}
+
+static u64 gcip_kci_get_resp_elem_seq(struct gcip_mailbox *mailbox, void *resp)
+{
+ struct gcip_kci_response_element *elem = resp;
+
+ return elem->seq;
+}
+
+static void gcip_kci_set_resp_elem_seq(struct gcip_mailbox *mailbox, void *resp, u64 seq)
+{
+ struct gcip_kci_response_element *elem = resp;
+
+ elem->seq = seq;
+}
+
+static u16 gcip_kci_get_resp_elem_status(struct gcip_mailbox *mailbox, void *resp)
+{
+ struct gcip_kci_response_element *elem = resp;
+
+ return elem->status;
+}
+
+static void gcip_kci_set_resp_elem_status(struct gcip_mailbox *mailbox, void *resp, u16 status)
+{
+ struct gcip_kci_response_element *elem = resp;
+
+ elem->status = status;
+}
+
+static void gcip_kci_acquire_wait_list_lock(struct gcip_mailbox *mailbox, bool irqsave,
+ unsigned long *flags)
+{
+ struct gcip_kci *kci = gcip_mailbox_get_data(mailbox);
+
+ if (irqsave)
+ spin_lock_irqsave(&kci->wait_list_lock, *flags);
+ else
+ spin_lock(&kci->wait_list_lock);
+}
+
+static void gcip_kci_release_wait_list_lock(struct gcip_mailbox *mailbox, bool irqrestore,
+ unsigned long flags)
+{
+ struct gcip_kci *kci = gcip_mailbox_get_data(mailbox);
+
+ if (irqrestore)
+ spin_unlock_irqrestore(&kci->wait_list_lock, flags);
+ else
+ spin_unlock(&kci->wait_list_lock);
+}
+
+static int gcip_kci_wait_for_cmd_queue_not_full(struct gcip_mailbox *mailbox)
+{
+ struct gcip_kci *kci = gcip_mailbox_get_data(mailbox);
+ u32 tail = kci->ops->get_cmd_queue_tail(kci);
+ int ret;
+
+ ret = wait_event_timeout(kci->resp_doorbell_waitq,
+ kci->ops->get_cmd_queue_head(kci) !=
+ (tail ^ mailbox->queue_wrap_bit),
+ msecs_to_jiffies(mailbox->timeout));
+ if (!ret)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int gcip_kci_after_enqueue_cmd(struct gcip_mailbox *mailbox, void *cmd)
+{
+ struct gcip_kci *kci = gcip_mailbox_get_data(mailbox);
+ struct gcip_kci_command_element *elem = cmd;
+
+ kci->ops->trigger_doorbell(kci, GCIP_KCI_PUSH_CMD);
+ if (!(elem->seq & GCIP_KCI_REVERSE_FLAG))
+ return 1;
+ return 0;
+}
+
+static void gcip_kci_after_fetch_resps(struct gcip_mailbox *mailbox, u32 num_resps)
+{
+ struct gcip_kci *kci = gcip_mailbox_get_data(mailbox);
+ u32 size = kci->ops->get_resp_queue_size(kci);
+
+ /*
+ * We consumed a lot of responses - ring the doorbell of *cmd* queue to notify the firmware,
+ * which might be waiting us to consume the response queue.
+ */
+ if (num_resps >= size / 2)
+ kci->ops->trigger_doorbell(kci, GCIP_KCI_CONSUME_RESP);
+}
+
+/*
+ * Adds an incoming request from firmware to the circular buffer and schedules the work queue for
+ * processing.
+ */
+static int gcip_reverse_kci_add_resp(struct gcip_kci *kci,
+ const struct gcip_kci_response_element *resp)
+{
+ struct gcip_reverse_kci *rkci = &kci->rkci;
+ unsigned long head, tail, flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&rkci->producer_lock, flags);
+ head = rkci->head;
+ tail = READ_ONCE(rkci->tail);
+ if (CIRC_SPACE(head, tail, rkci->buffer_size) >= 1) {
+ rkci->buffer[head] = *resp;
+ smp_store_release(&rkci->head, (head + 1) & (rkci->buffer_size - 1));
+ schedule_work(&rkci->work);
+ } else {
+ ret = -ENOSPC;
+ }
+ spin_unlock_irqrestore(&rkci->producer_lock, flags);
+
+ return ret;
+}
+
+static bool gcip_kci_before_handle_resp(struct gcip_mailbox *mailbox, const void *resp)
+{
+ struct gcip_kci *kci = gcip_mailbox_get_data(mailbox);
+ const struct gcip_kci_response_element *elem = resp;
+
+ if (elem->seq & GCIP_KCI_REVERSE_FLAG) {
+ int ret = gcip_reverse_kci_add_resp(kci, elem);
+
+ if (ret)
+ dev_warn_ratelimited(kci->dev,
+ "Failed to handle reverse KCI code %u (%d)\n",
+ elem->code, ret);
+ return false;
+ }
+
+ return true;
+}
+
+static const struct gcip_mailbox_ops gcip_mailbox_ops = {
+ .get_cmd_queue_head = gcip_kci_get_cmd_queue_head,
+ .get_cmd_queue_tail = gcip_kci_get_cmd_queue_tail,
+ .inc_cmd_queue_tail = gcip_kci_inc_cmd_queue_tail,
+ .acquire_cmd_queue_lock = gcip_kci_acquire_cmd_queue_lock,
+ .release_cmd_queue_lock = gcip_kci_release_cmd_queue_lock,
+ .get_cmd_elem_seq = gcip_kci_get_cmd_elem_seq,
+ .set_cmd_elem_seq = gcip_kci_set_cmd_elem_seq,
+ .get_cmd_elem_code = gcip_kci_get_cmd_elem_code,
+ .get_resp_queue_size = gcip_kci_get_resp_queue_size,
+ .get_resp_queue_head = gcip_kci_get_resp_queue_head,
+ .get_resp_queue_tail = gcip_kci_get_resp_queue_tail,
+ .inc_resp_queue_head = gcip_kci_inc_resp_queue_head,
+ .acquire_resp_queue_lock = gcip_kci_acquire_resp_queue_lock,
+ .release_resp_queue_lock = gcip_kci_release_resp_queue_lock,
+ .get_resp_elem_seq = gcip_kci_get_resp_elem_seq,
+ .set_resp_elem_seq = gcip_kci_set_resp_elem_seq,
+ .get_resp_elem_status = gcip_kci_get_resp_elem_status,
+ .set_resp_elem_status = gcip_kci_set_resp_elem_status,
+ .acquire_wait_list_lock = gcip_kci_acquire_wait_list_lock,
+ .release_wait_list_lock = gcip_kci_release_wait_list_lock,
+ .wait_for_cmd_queue_not_full = gcip_kci_wait_for_cmd_queue_not_full,
+ .after_enqueue_cmd = gcip_kci_after_enqueue_cmd,
+ .after_fetch_resps = gcip_kci_after_fetch_resps,
+ .before_handle_resp = gcip_kci_before_handle_resp,
+};
+
+/*
+ * Pushes an element to cmd queue and waits for the response.
+ * Returns -ETIMEDOUT if no response is received within kci->mailbox.timeout msecs.
+ *
+ * Returns the code of response, or a negative errno on error.
+ * @resp is updated with the response, as to retrieve returned retval field.
+ */
+int gcip_kci_send_cmd_return_resp(struct gcip_kci *kci, struct gcip_kci_command_element *cmd,
+ struct gcip_kci_response_element *resp)
+{
+ int ret;
+
+ ret = gcip_mailbox_send_cmd(&kci->mailbox, cmd, resp);
+ if (ret || !resp)
+ return ret;
+
+ return resp->code;
+}
+
+int gcip_kci_send_cmd(struct gcip_kci *kci, struct gcip_kci_command_element *cmd)
+{
+ struct gcip_kci_response_element resp;
+
+ /* Don't wait on a response for reverse KCI response. */
+ if (cmd->seq & GCIP_KCI_REVERSE_FLAG)
+ return gcip_kci_send_cmd_return_resp(kci, cmd, NULL);
+ else
+ return gcip_kci_send_cmd_return_resp(kci, cmd, &resp);
+}
+
+/*
+ * Fetches and handles responses, then wakes up threads that are waiting for a response.
+ *
+ * Note: this worker is scheduled in the IRQ handler, to prevent use-after-free or race-condition
+ * bugs, gcip_kci_cancel_work_queues() must be called before free the mailbox.
+ */
+static void gcip_kci_consume_responses_work(struct work_struct *work)
+{
+ struct gcip_kci *kci = container_of(work, struct gcip_kci, work);
+
+ gcip_mailbox_consume_responses_work(&kci->mailbox);
+}
+
+/*
+ * IRQ handler of KCI mailbox.
+ *
+ * Consumes one response (if any) and puts gcip_kci_consume_responses_work() into the system work
+ * queue.
+ */
+void gcip_kci_handle_irq(struct gcip_kci *kci)
+{
+ struct gcip_kci_response_element resp;
+
+ /* Wakes up threads that are waiting for response doorbell to be rung. */
+ wake_up(&kci->resp_doorbell_waitq);
+
+ /*
+ * Quickly consumes one response, which should be enough for usual cases, to prevent the
+ * host from being too busy to execute the scheduled work.
+ */
+ gcip_mailbox_consume_one_response(&kci->mailbox, &resp);
+
+ schedule_work(&kci->work);
+}
+
+static void gcip_kci_update_usage_work(struct work_struct *work)
+{
+ struct gcip_kci *kci = container_of(work, struct gcip_kci, usage_work);
+
+ kci->ops->update_usage(kci);
+}
+
+void gcip_kci_update_usage_async(struct gcip_kci *kci)
+{
+ schedule_work(&kci->usage_work);
+}
+
+/* Removes one element from the circular buffer. */
+static int gcip_reverse_kci_remove_resp(struct gcip_reverse_kci *rkci,
+ struct gcip_kci_response_element *resp)
+{
+ unsigned long head, tail;
+ int ret = 0;
+
+ spin_lock(&rkci->consumer_lock);
+
+ /*
+ * Prevents the compiler from discarding and reloading its cached value additionally forces
+ * the CPU to order against subsequent memory references.
+ * Shamelessly stolen from:
+ * https://www.kernel.org/doc/html/latest/core-api/circular-buffers.html
+ */
+ head = smp_load_acquire(&rkci->head);
+ tail = rkci->tail;
+ if (CIRC_CNT(head, tail, rkci->buffer_size) >= 1) {
+ *resp = rkci->buffer[tail];
+ tail = (tail + 1) & (rkci->buffer_size - 1);
+ ret = 1;
+ smp_store_release(&rkci->tail, tail);
+ }
+ spin_unlock(&rkci->consumer_lock);
+ return ret;
+}
+
+/* Worker for incoming requests from firmware. */
+static void gcip_reverse_kci_work(struct work_struct *work)
+{
+ struct gcip_kci_response_element resp;
+ struct gcip_reverse_kci *rkci = container_of(work, struct gcip_reverse_kci, work);
+ struct gcip_kci *kci = container_of(rkci, struct gcip_kci, rkci);
+
+ while (gcip_reverse_kci_remove_resp(rkci, &resp))
+ kci->ops->reverse_kci_handle_response(kci, &resp);
+}
+
+/* Initializes the Reverse KCI handler. */
+static int gcip_reverse_kci_init(struct gcip_reverse_kci *rkci, struct device *dev, u32 buffer_size)
+{
+ if (rkci->buffer)
+ return 0;
+
+ rkci->buffer_size = buffer_size;
+ rkci->buffer = devm_kcalloc(dev, buffer_size, sizeof(*rkci->buffer), GFP_KERNEL);
+ if (!rkci->buffer)
+ return -ENOMEM;
+
+ spin_lock_init(&rkci->producer_lock);
+ spin_lock_init(&rkci->consumer_lock);
+ INIT_WORK(&rkci->work, gcip_reverse_kci_work);
+
+ return 0;
+}
+
+/* Verifies and sets the KCI operators. */
+static int gcip_kci_set_ops(struct gcip_kci *kci, const struct gcip_kci_ops *ops)
+{
+ if (!ops) {
+ kci->ops = NULL;
+ return 0;
+ }
+
+ if (!ops->get_cmd_queue_head || !ops->get_cmd_queue_tail || !ops->inc_cmd_queue_tail) {
+ dev_err(kci->dev, "Incomplete KCI CMD queue ops.\n");
+ return -EINVAL;
+ }
+
+ if (!ops->get_resp_queue_size || !ops->get_resp_queue_head || !ops->get_resp_queue_tail ||
+ !ops->inc_resp_queue_head) {
+ dev_err(kci->dev, "Incomplete KCI RESP queue ops.\n");
+ return -EINVAL;
+ }
+
+ if (!ops->trigger_doorbell) {
+ dev_err(kci->dev, "Incomplete KCI ops. Missing trigger_doorbell.\n");
+ return -EINVAL;
+ }
+
+ kci->ops = ops;
+
+ return 0;
+}
+
+/* Sets the KCI private data. */
+static inline void gcip_kci_set_data(struct gcip_kci *kci, void *data)
+{
+ kci->data = data;
+}
+
+int gcip_kci_init(struct gcip_kci *kci, const struct gcip_kci_args *args)
+{
+ int ret;
+ struct gcip_mailbox_args mailbox_args;
+
+ if (kci->ops)
+ return 0;
+
+ kci->dev = args->dev;
+ gcip_kci_set_data(kci, args->data);
+
+ ret = gcip_kci_set_ops(kci, args->ops);
+ if (ret)
+ goto err_unset_data;
+
+ ret = gcip_reverse_kci_init(&kci->rkci, kci->dev, args->rkci_buffer_size);
+ if (ret)
+ goto err_unset_ops;
+
+ mailbox_args.dev = args->dev;
+ mailbox_args.queue_wrap_bit = args->queue_wrap_bit;
+ mailbox_args.cmd_queue = args->cmd_queue;
+ mailbox_args.cmd_elem_size = sizeof(struct gcip_kci_command_element);
+ mailbox_args.resp_queue = args->resp_queue;
+ mailbox_args.resp_elem_size = sizeof(struct gcip_kci_response_element);
+ mailbox_args.timeout = args->timeout;
+ mailbox_args.ops = &gcip_mailbox_ops;
+ mailbox_args.data = kci;
+ mailbox_args.ignore_seq_order = false;
+
+ ret = gcip_mailbox_init(&kci->mailbox, &mailbox_args);
+ if (ret)
+ goto err_unset_ops;
+
+ mutex_init(&kci->cmd_queue_lock);
+ spin_lock_init(&kci->resp_queue_lock);
+ spin_lock_init(&kci->wait_list_lock);
+ init_waitqueue_head(&kci->resp_doorbell_waitq);
+ INIT_WORK(&kci->work, gcip_kci_consume_responses_work);
+ INIT_WORK(&kci->usage_work, gcip_kci_update_usage_work);
+
+ return 0;
+err_unset_ops:
+ gcip_kci_set_ops(kci, NULL);
+err_unset_data:
+ gcip_kci_set_data(kci, NULL);
+
+ return ret;
+}
+
+void gcip_kci_cancel_work_queues(struct gcip_kci *kci)
+{
+ cancel_work_sync(&kci->usage_work);
+ cancel_work_sync(&kci->work);
+ cancel_work_sync(&kci->rkci.work);
+}
+
+void gcip_kci_release(struct gcip_kci *kci)
+{
+ kci->rkci.buffer = NULL;
+ gcip_kci_set_ops(kci, NULL);
+ gcip_kci_set_data(kci, NULL);
+
+ /*
+ * Non-empty @kci->wait_list means someone (gcip_kci_send_cmd) is waiting for a response.
+ *
+ * Since this function should only be called when removing a device, it should be impossible
+ * to reach here with gcip_kci_send_cmd() is still waiting (rmmod should fail), add a simple
+ * check here so we can more easily figure it out when this happens.
+ */
+ if (!list_empty(gcip_kci_get_wait_list(kci)))
+ dev_warn(kci->dev, "KCI commands still pending.\n");
+ gcip_mailbox_release(&kci->mailbox);
+}
diff --git a/gcip-kernel-driver/drivers/gcip/gcip-mailbox.c b/gcip-kernel-driver/drivers/gcip/gcip-mailbox.c
new file mode 100644
index 0000000..6d20771
--- /dev/null
+++ b/gcip-kernel-driver/drivers/gcip/gcip-mailbox.c
@@ -0,0 +1,689 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * GCIP Mailbox Interface.
+ *
+ * Copyright (C) 2022 Google LLC
+ */
+
+#include <linux/device.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/string.h> /* memcpy */
+#include <linux/wait.h>
+
+#include <gcip/gcip-mailbox.h>
+
+#if IS_ENABLED(CONFIG_GCIP_TEST)
+#include "unittests/helper/gcip-mailbox-controller.h"
+
+#define TEST_TRIGGER_TIMEOUT_RACE(awaiter) gcip_mailbox_controller_trigger_timeout_race(awaiter)
+#else
+#define TEST_TRIGGER_TIMEOUT_RACE(...)
+#endif
+
+#define GET_CMD_QUEUE_HEAD() mailbox->ops->get_cmd_queue_head(mailbox)
+#define GET_CMD_QUEUE_TAIL() mailbox->ops->get_cmd_queue_tail(mailbox)
+#define INC_CMD_QUEUE_TAIL(inc) mailbox->ops->inc_cmd_queue_tail(mailbox, inc)
+#define ACQUIRE_CMD_QUEUE_LOCK(try) mailbox->ops->acquire_cmd_queue_lock(mailbox, try)
+#define RELEASE_CMD_QUEUE_LOCK() mailbox->ops->release_cmd_queue_lock(mailbox)
+
+#define GET_CMD_ELEM_SEQ(cmd) mailbox->ops->get_cmd_elem_seq(mailbox, cmd)
+#define SET_CMD_ELEM_SEQ(cmd, seq) mailbox->ops->set_cmd_elem_seq(mailbox, cmd, seq)
+#define GET_CMD_ELEM_CODE(cmd) mailbox->ops->get_cmd_elem_code(mailbox, cmd)
+
+#define GET_RESP_QUEUE_SIZE() mailbox->ops->get_resp_queue_size(mailbox)
+#define GET_RESP_QUEUE_HEAD() mailbox->ops->get_resp_queue_head(mailbox)
+#define INC_RESP_QUEUE_HEAD(inc) mailbox->ops->inc_resp_queue_head(mailbox, inc)
+#define GET_RESP_QUEUE_TAIL() mailbox->ops->get_resp_queue_tail(mailbox)
+#define ACQUIRE_RESP_QUEUE_LOCK(try) mailbox->ops->acquire_resp_queue_lock(mailbox, try)
+#define RELEASE_RESP_QUEUE_LOCK() mailbox->ops->release_resp_queue_lock(mailbox)
+
+#define GET_RESP_ELEM_SEQ(resp) mailbox->ops->get_resp_elem_seq(mailbox, resp)
+#define SET_RESP_ELEM_SEQ(resp, seq) mailbox->ops->set_resp_elem_seq(mailbox, resp, seq)
+#define GET_RESP_ELEM_STATUS(resp) mailbox->ops->get_resp_elem_status(mailbox, resp)
+#define SET_RESP_ELEM_STATUS(resp, status) mailbox->ops->set_resp_elem_status(mailbox, resp, status)
+
+#define ACQUIRE_WAIT_LIST_LOCK(irqsave, flags) \
+ mailbox->ops->acquire_wait_list_lock(mailbox, irqsave, flags)
+#define RELEASE_WAIT_LIST_LOCK(irqrestore, flags) \
+ mailbox->ops->release_wait_list_lock(mailbox, irqrestore, flags)
+
+struct gcip_mailbox_wait_list_elem {
+ struct list_head list;
+ void *resp;
+ struct gcip_mailbox_resp_awaiter *awaiter;
+};
+
+static void gcip_mailbox_awaiter_release(struct gcip_mailbox_resp_awaiter *awaiter)
+{
+ if (awaiter->release_data)
+ awaiter->release_data(awaiter->data);
+ kfree(awaiter);
+}
+
+static void gcip_mailbox_awaiter_dec_refs(struct gcip_mailbox_resp_awaiter *awaiter)
+{
+ if (refcount_dec_and_test(&awaiter->refs))
+ gcip_mailbox_awaiter_release(awaiter);
+}
+
+/*
+ * Removes the response previously pushed with gcip_mailbox_push_wait_resp().
+ *
+ * This is used when the kernel gives up waiting for the response.
+ */
+static void gcip_mailbox_del_wait_resp(struct gcip_mailbox *mailbox, void *resp)
+{
+ struct gcip_mailbox_wait_list_elem *cur;
+ unsigned long flags;
+ u64 cur_seq, seq = GET_RESP_ELEM_SEQ(resp);
+
+ ACQUIRE_WAIT_LIST_LOCK(true, &flags);
+
+ list_for_each_entry (cur, &mailbox->wait_list, list) {
+ cur_seq = GET_RESP_ELEM_SEQ(cur->resp);
+ if (cur_seq > seq)
+ break;
+ if (cur_seq == seq) {
+ list_del(&cur->list);
+ if (cur->awaiter) {
+ /* Remove the reference of the arrived handler. */
+ gcip_mailbox_awaiter_dec_refs(cur->awaiter);
+ }
+ kfree(cur);
+ break;
+ }
+ }
+
+ RELEASE_WAIT_LIST_LOCK(true, flags);
+}
+
+/*
+ * Adds @resp to @mailbox->wait_list. If @awaiter is not NULL, the @resp is asynchronous.
+ * Otherwise, the @resp is synchronous.
+ *
+ * wait_list is a FIFO queue, with sequence number in increasing order.
+ *
+ * Returns 0 on success, or -ENOMEM if failed on allocation.
+ */
+static int gcip_mailbox_push_wait_resp(struct gcip_mailbox *mailbox, void *resp,
+ struct gcip_mailbox_resp_awaiter *awaiter)
+{
+ struct gcip_mailbox_wait_list_elem *entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ unsigned long flags;
+ int ret;
+
+ if (!entry)
+ return -ENOMEM;
+
+ if (mailbox->ops->before_enqueue_wait_list) {
+ ret = mailbox->ops->before_enqueue_wait_list(mailbox, resp, awaiter);
+ if (ret) {
+ kfree(entry);
+ return ret;
+ }
+ }
+
+ /* Increase a reference of arrived handler. */
+ if (awaiter)
+ refcount_inc(&awaiter->refs);
+
+ entry->resp = resp;
+ entry->awaiter = awaiter;
+ ACQUIRE_WAIT_LIST_LOCK(true, &flags);
+ list_add_tail(&entry->list, &mailbox->wait_list);
+ RELEASE_WAIT_LIST_LOCK(true, flags);
+
+ return 0;
+}
+
+/*
+ * Pushes @cmd to the command queue of mailbox and returns. @resp should be passed if the request
+ * is synchronous and want to get the response. If @resp is NULL even though the request is
+ * synchronous, the @cmd will be put into the queue, but the caller may not wait the response and
+ * ignore it. If the request is async, @awaiter should be passed too.
+ */
+static int gcip_mailbox_enqueue_cmd(struct gcip_mailbox *mailbox, void *cmd, void *resp,
+ struct gcip_mailbox_resp_awaiter *awaiter)
+{
+ int ret = 0;
+ u32 tail;
+
+ ACQUIRE_CMD_QUEUE_LOCK(false);
+
+ SET_CMD_ELEM_SEQ(cmd, mailbox->cur_seq);
+ /*
+ * The lock ensures mailbox cmd_queue_tail cannot be changed by other processes (this
+ * method should be the only one to modify the value of tail), therefore we can remember
+ * its value here and use it in the condition of wait_event() call.
+ */
+ tail = GET_CMD_QUEUE_TAIL();
+
+ if (mailbox->ops->wait_for_cmd_queue_not_full) {
+ /* Wait until the cmd queue has a space for putting cmd. */
+ ret = mailbox->ops->wait_for_cmd_queue_not_full(mailbox);
+ if (ret)
+ goto out;
+ } else if (GET_CMD_QUEUE_HEAD() == (tail ^ mailbox->queue_wrap_bit)) {
+ /*
+ * Default logic of checking the fullness of cmd_queue. If the cmd_queue is full,
+ * it's up to the caller to retry.
+ */
+ ret = -EAGAIN;
+ goto out;
+ }
+
+ if (resp) {
+ /* Adds @resp to the wait_list only if the cmd can be pushed successfully. */
+ SET_RESP_ELEM_SEQ(resp, GET_CMD_ELEM_SEQ(cmd));
+ SET_RESP_ELEM_STATUS(resp, GCIP_MAILBOX_STATUS_WAITING_RESPONSE);
+ ret = gcip_mailbox_push_wait_resp(mailbox, resp, awaiter);
+ if (ret)
+ goto out;
+ }
+ /* Size of cmd_queue is a multiple of mailbox->cmd_elem_size. */
+ memcpy(mailbox->cmd_queue + mailbox->cmd_elem_size *
+ CIRC_QUEUE_REAL_INDEX(tail, mailbox->queue_wrap_bit),
+ cmd, mailbox->cmd_elem_size);
+ INC_CMD_QUEUE_TAIL(1);
+ if (mailbox->ops->after_enqueue_cmd) {
+ ret = mailbox->ops->after_enqueue_cmd(mailbox, cmd);
+ if (ret < 0) {
+ /*
+ * Currently, as both DSP and EdgeTPU never return errors, do nothing
+ * here. We can decide later how to rollback the status such as
+ * `cmd_queue_tail` when the possibility of returning an error is raised.
+ */
+ dev_warn(mailbox->dev,
+ "after_enqueue_cmd returned an error, but not handled: ret=%d\n",
+ ret);
+ goto out;
+ }
+ mailbox->cur_seq += ret;
+ ret = 0;
+ } else
+ mailbox->cur_seq += 1;
+
+out:
+ RELEASE_CMD_QUEUE_LOCK();
+ if (ret)
+ dev_dbg(mailbox->dev, "%s: ret=%d", __func__, ret);
+
+ return ret;
+}
+
+/*
+ * Handler of a response.
+ * Pops the wait_list until the sequence number of @resp is found, and copies @resp to the found
+ * entry.
+ *
+ * Both entry in wait_list and response handling should have sequence number in increasing order.
+ * Comparing the #seq of head of wait_list with @resp->seq, we have three cases:
+ * 1. #seq > @resp->seq:
+ * - Nothing to do, @resp is not needed and we're done.
+ * 2. #seq == @resp->seq:
+ * - Copy @resp, pop the head and we're done.
+ * 3. #seq < @resp->seq:
+ * - If @mailbox->ignore_seq_order is specified, this is a normal case and the entry is skipped.
+ * - Otherwise, it *should* not happen, this implies the sequence number of either entries in
+ * wait_list or responses are out-of-order, or remote didn't respond to a command. In this
+ * case, the status of response will be set to GCIP_MAILBOX_STATUS_NO_RESPONSE. Then pop until
+ * case 1. or 2.
+ */
+static void gcip_mailbox_handle_response(struct gcip_mailbox *mailbox, void *resp)
+{
+ struct gcip_mailbox_wait_list_elem *cur, *nxt;
+ struct gcip_mailbox_resp_awaiter *awaiter;
+ unsigned long flags;
+ u64 cur_seq, seq = GET_RESP_ELEM_SEQ(resp);
+
+ /* If before_handle_resp is defined and it returns false, don't handle the response */
+ if (mailbox->ops->before_handle_resp && !mailbox->ops->before_handle_resp(mailbox, resp))
+ return;
+
+ SET_RESP_ELEM_STATUS(resp, GCIP_MAILBOX_STATUS_OK);
+ ACQUIRE_WAIT_LIST_LOCK(true, &flags);
+
+ list_for_each_entry_safe (cur, nxt, &mailbox->wait_list, list) {
+ cur_seq = GET_RESP_ELEM_SEQ(cur->resp);
+ if (cur_seq > seq) {
+ /*
+ * This response has already timed out and been removed
+ * from the wait list (or this is an invalid response).
+ * Drop it.
+ */
+ break;
+ }
+ if (cur_seq == seq) {
+ memcpy(cur->resp, resp, mailbox->resp_elem_size);
+ list_del(&cur->list);
+ if (cur->awaiter) {
+ awaiter = cur->awaiter;
+
+ /*
+ * The timedout handler will be fired, but pended by waiting for
+ * acquiring the wait_list_lock.
+ */
+ TEST_TRIGGER_TIMEOUT_RACE(awaiter);
+
+ /*
+ * If canceling timeout_work succeeded, we have to decrease the
+ * reference count here because the timeout handler will not be
+ * called. Otherwise, the timeout handler is already canceled or
+ * pending by race. If it is canceled, the count must be decreased
+ * already, and if it is pending, the timeout handler will decrease
+ * the awaiter reference.
+ */
+ if (cancel_delayed_work(&awaiter->timeout_work))
+ gcip_mailbox_awaiter_dec_refs(awaiter);
+ /*
+ * If `handle_awaiter_arrived` callback is defined, @awaiter
+ * will be released from the implementation side. Otherwise, it
+ * should be freed from here.
+ */
+ if (mailbox->ops->handle_awaiter_arrived)
+ mailbox->ops->handle_awaiter_arrived(mailbox, awaiter);
+ gcip_mailbox_awaiter_dec_refs(awaiter);
+ }
+ kfree(cur);
+ break;
+ }
+ if (!mailbox->ignore_seq_order && cur_seq < seq) {
+ SET_RESP_ELEM_STATUS(cur->resp, GCIP_MAILBOX_STATUS_NO_RESPONSE);
+ list_del(&cur->list);
+ if (cur->awaiter) {
+ /* Remove the reference of the arrived handler. */
+ gcip_mailbox_awaiter_dec_refs(cur->awaiter);
+ }
+ kfree(cur);
+ }
+ }
+
+ RELEASE_WAIT_LIST_LOCK(true, flags);
+}
+
+/*
+ * Fetches elements in the response queue.
+ *
+ * Returns the pointer of fetched response elements.
+ * @total_ptr will be the number of elements fetched.
+ *
+ * Returns -ENOMEM if failed on memory allocation.
+ * Returns NULL if the response queue is empty or there is another worker fetching responses.
+ */
+static void *gcip_mailbox_fetch_responses(struct gcip_mailbox *mailbox, u32 *total_ptr)
+{
+ u32 head;
+ u32 tail;
+ u32 count;
+ u32 i;
+ u32 j;
+ u32 total = 0;
+ const u32 wrap_bit = mailbox->queue_wrap_bit;
+ const u32 size = GET_RESP_QUEUE_SIZE();
+ const u32 elem_size = mailbox->resp_elem_size;
+ void *ret = NULL; /* Array of responses. */
+ void *prev_ptr = NULL; /* Temporary pointer to realloc ret. */
+
+ /* Someone is working on consuming - we can leave early. */
+ if (!ACQUIRE_RESP_QUEUE_LOCK(true))
+ goto out;
+
+ head = GET_RESP_QUEUE_HEAD();
+ /* Loops until our head equals to CSR tail. */
+ while (1) {
+ tail = GET_RESP_QUEUE_TAIL();
+ /*
+ * Make sure the CSR is read and reported properly by checking if any bit higher
+ * than wrap_bit is set and if the tail exceeds resp_queue size.
+ */
+ if (unlikely(tail & ~CIRC_QUEUE_VALID_MASK(wrap_bit) ||
+ CIRC_QUEUE_REAL_INDEX(tail, wrap_bit) >= size)) {
+ dev_err_ratelimited(mailbox->dev, "Invalid response queue tail: %#x\n",
+ tail);
+ break;
+ }
+
+ count = gcip_circ_queue_cnt(head, tail, size, wrap_bit);
+ if (count == 0)
+ break;
+
+ prev_ptr = ret;
+ ret = krealloc(prev_ptr, (total + count) * elem_size, GFP_KERNEL);
+ /*
+ * Out-of-memory, we can return the previously fetched responses if any, or ENOMEM
+ * otherwise.
+ */
+ if (!ret) {
+ if (!prev_ptr)
+ ret = ERR_PTR(-ENOMEM);
+ else
+ ret = prev_ptr;
+ break;
+ }
+ /* Copies responses. */
+ j = CIRC_QUEUE_REAL_INDEX(head, wrap_bit);
+ for (i = 0; i < count; i++) {
+ memcpy(ret + elem_size * total, mailbox->resp_queue + elem_size * j,
+ elem_size);
+ j = (j + 1) % size;
+ total++;
+ }
+ head = gcip_circ_queue_inc(head, count, size, wrap_bit);
+ }
+ INC_RESP_QUEUE_HEAD(total);
+
+ RELEASE_RESP_QUEUE_LOCK();
+
+ if (mailbox->ops->after_fetch_resps)
+ mailbox->ops->after_fetch_resps(mailbox, total);
+out:
+ *total_ptr = total;
+ return ret;
+}
+
+/* Fetches one response from the response queue. */
+static int gcip_mailbox_fetch_one_response(struct gcip_mailbox *mailbox, void *resp)
+{
+ u32 head;
+ u32 tail;
+
+ if (!ACQUIRE_RESP_QUEUE_LOCK(true))
+ return 0;
+
+ head = GET_RESP_QUEUE_HEAD();
+ tail = GET_RESP_QUEUE_TAIL();
+ /* Queue empty. */
+ if (head == tail) {
+ RELEASE_RESP_QUEUE_LOCK();
+ return 0;
+ }
+
+ memcpy(resp,
+ mailbox->resp_queue + CIRC_QUEUE_REAL_INDEX(head, mailbox->queue_wrap_bit) *
+ mailbox->resp_elem_size,
+ mailbox->resp_elem_size);
+ INC_RESP_QUEUE_HEAD(1);
+
+ RELEASE_RESP_QUEUE_LOCK();
+
+ if (mailbox->ops->after_fetch_resps)
+ mailbox->ops->after_fetch_resps(mailbox, 1);
+
+ return 1;
+}
+
+/* Handles the timed out asynchronous commands. */
+static void gcip_mailbox_async_cmd_timeout_work(struct work_struct *work)
+{
+ struct gcip_mailbox_resp_awaiter *awaiter =
+ container_of(work, struct gcip_mailbox_resp_awaiter, timeout_work.work);
+ struct gcip_mailbox *mailbox = awaiter->mailbox;
+
+ /*
+ * This function will acquire the mailbox wait_list_lock. This means if
+ * response processing is in progress, it will complete before this
+ * response can be removed from the wait list.
+ *
+ * Once this function has the wait_list_lock, no future response
+ * processing will begin until this response has been removed.
+ */
+ gcip_mailbox_del_wait_resp(mailbox, awaiter->resp);
+
+ /*
+ * Handle timed out awaiter. If `handle_awaiter_timedout` is defined, @awaiter
+ * will be released from the implementation side. Otherwise, it should be freed from here.
+ */
+ if (mailbox->ops->handle_awaiter_timedout)
+ mailbox->ops->handle_awaiter_timedout(mailbox, awaiter);
+
+ /* Remove the reference of the timedout handler. */
+ gcip_mailbox_awaiter_dec_refs(awaiter);
+}
+
+/* Cleans up all the asynchronous responses which are not responded yet. */
+static void gcip_mailbox_flush_awaiter(struct gcip_mailbox *mailbox)
+{
+ struct gcip_mailbox_wait_list_elem *cur, *nxt;
+ struct gcip_mailbox_resp_awaiter *awaiter;
+ struct list_head resps_to_flush;
+
+ /* If mailbox->ops is NULL, the mailbox is already released. */
+ if (!mailbox->ops)
+ return;
+
+ /*
+ * At this point only async responses should be pending. Flush them all
+ * from the `wait_list` at once so any remaining timeout workers
+ * waiting on `wait_list_lock` will know their responses have been
+ * handled already.
+ */
+ INIT_LIST_HEAD(&resps_to_flush);
+ ACQUIRE_WAIT_LIST_LOCK(false, NULL);
+ list_for_each_entry_safe (cur, nxt, &mailbox->wait_list, list) {
+ list_del(&cur->list);
+ if (cur->awaiter) {
+ list_add_tail(&cur->list, &resps_to_flush);
+ /*
+ * Clear the response's destination queue so that if the
+ * timeout worker is running, it won't try to process
+ * this response after `wait_list_lock` is released.
+ */
+ awaiter = cur->awaiter;
+ if (mailbox->ops->flush_awaiter)
+ mailbox->ops->flush_awaiter(mailbox, awaiter);
+ /* Remove the reference of the arrived handler. */
+ gcip_mailbox_awaiter_dec_refs(cur->awaiter);
+ } else {
+ dev_warn(mailbox->dev,
+ "Unexpected synchronous command pending on mailbox release\n");
+ kfree(cur);
+ }
+ }
+ RELEASE_WAIT_LIST_LOCK(false, 0);
+
+ /*
+ * Cancel the timeout timer of and free any responses that were still in
+ * the `wait_list` above.
+ */
+ list_for_each_entry_safe (cur, nxt, &resps_to_flush, list) {
+ list_del(&cur->list);
+ awaiter = cur->awaiter;
+ /* Cancel the timeout work and remove the reference of the timedout handler. */
+ gcip_mailbox_cancel_awaiter_timeout(awaiter);
+ /* Remove the reference of the caller. */
+ gcip_mailbox_awaiter_dec_refs(cur->awaiter);
+ kfree(cur);
+ }
+}
+
+/* Verifies and sets the mailbox operators. */
+static int gcip_mailbox_set_ops(struct gcip_mailbox *mailbox, const struct gcip_mailbox_ops *ops)
+{
+ if (!ops) {
+ mailbox->ops = NULL;
+ return 0;
+ }
+
+ if (!ops->get_cmd_queue_head || !ops->get_cmd_queue_tail || !ops->inc_cmd_queue_tail ||
+ !ops->acquire_cmd_queue_lock || !ops->release_cmd_queue_lock ||
+ !ops->get_cmd_elem_seq || !ops->set_cmd_elem_seq || !ops->get_cmd_elem_code) {
+ dev_err(mailbox->dev, "Incomplete mailbox CMD queue ops.\n");
+ return -EINVAL;
+ }
+
+ if (!ops->get_resp_queue_size || !ops->get_resp_queue_head || !ops->get_resp_queue_tail ||
+ !ops->inc_resp_queue_head || !ops->acquire_resp_queue_lock ||
+ !ops->release_resp_queue_lock || !ops->get_resp_elem_seq || !ops->set_resp_elem_seq ||
+ !ops->get_resp_elem_status || !ops->set_resp_elem_status) {
+ dev_err(mailbox->dev, "Incomplete mailbox RESP queue ops.\n");
+ return -EINVAL;
+ }
+
+ if (!ops->acquire_wait_list_lock || !ops->release_wait_list_lock) {
+ dev_err(mailbox->dev, "Incomplete mailbox wait_list ops.\n");
+ return -EINVAL;
+ }
+
+ mailbox->ops = ops;
+
+ return 0;
+}
+
+/* Sets the mailbox private data. */
+static inline void gcip_mailbox_set_data(struct gcip_mailbox *mailbox, void *data)
+{
+ mailbox->data = data;
+}
+
+int gcip_mailbox_init(struct gcip_mailbox *mailbox, const struct gcip_mailbox_args *args)
+{
+ int ret;
+
+ mailbox->dev = args->dev;
+ mailbox->queue_wrap_bit = args->queue_wrap_bit;
+ mailbox->cmd_queue = args->cmd_queue;
+ mailbox->cmd_elem_size = args->cmd_elem_size;
+ mailbox->resp_queue = args->resp_queue;
+ mailbox->resp_elem_size = args->resp_elem_size;
+ mailbox->timeout = args->timeout;
+ mailbox->cur_seq = 0;
+ mailbox->ignore_seq_order = args->ignore_seq_order;
+ gcip_mailbox_set_data(mailbox, args->data);
+
+ ret = gcip_mailbox_set_ops(mailbox, args->ops);
+ if (ret)
+ goto err_unset_data;
+
+ INIT_LIST_HEAD(&mailbox->wait_list);
+ init_waitqueue_head(&mailbox->wait_list_waitq);
+
+ return 0;
+
+err_unset_data:
+ gcip_mailbox_set_data(mailbox, NULL);
+
+ return ret;
+}
+
+void gcip_mailbox_release(struct gcip_mailbox *mailbox)
+{
+ gcip_mailbox_flush_awaiter(mailbox);
+ gcip_mailbox_set_ops(mailbox, NULL);
+ gcip_mailbox_set_data(mailbox, NULL);
+}
+
+void gcip_mailbox_consume_responses_work(struct gcip_mailbox *mailbox)
+{
+ void *responses;
+ u32 i;
+ u32 count = 0;
+
+ /* Fetches responses and bumps resp_queue head. */
+ responses = gcip_mailbox_fetch_responses(mailbox, &count);
+ if (count == 0)
+ return;
+ if (IS_ERR(responses)) {
+ dev_err(mailbox->dev, "GCIP mailbox failed on fetching responses: %ld",
+ PTR_ERR(responses));
+ return;
+ }
+
+ for (i = 0; i < count; i++)
+ gcip_mailbox_handle_response(mailbox, responses + mailbox->resp_elem_size * i);
+ /* Responses handled, wake up threads that are waiting for a response. */
+ wake_up(&mailbox->wait_list_waitq);
+ kfree(responses);
+}
+
+int gcip_mailbox_send_cmd(struct gcip_mailbox *mailbox, void *cmd, void *resp)
+{
+ int ret;
+
+ ret = gcip_mailbox_enqueue_cmd(mailbox, cmd, resp, NULL);
+ if (ret)
+ return ret;
+
+ if (!resp)
+ return 0;
+
+ ret = wait_event_timeout(mailbox->wait_list_waitq,
+ GET_RESP_ELEM_STATUS(resp) != GCIP_MAILBOX_STATUS_WAITING_RESPONSE,
+ msecs_to_jiffies(mailbox->timeout));
+ if (!ret) {
+ dev_dbg(mailbox->dev, "event wait timeout");
+ gcip_mailbox_del_wait_resp(mailbox, resp);
+ return -ETIMEDOUT;
+ }
+ if (GET_RESP_ELEM_STATUS(resp) != GCIP_MAILBOX_STATUS_OK) {
+ dev_err(mailbox->dev, "Mailbox cmd %u response status %u", GET_CMD_ELEM_CODE(cmd),
+ GET_RESP_ELEM_STATUS(resp));
+ return -ENOMSG;
+ }
+
+ return 0;
+}
+
+struct gcip_mailbox_resp_awaiter *gcip_mailbox_put_cmd(struct gcip_mailbox *mailbox, void *cmd,
+ void *resp, void *data)
+{
+ struct gcip_mailbox_resp_awaiter *awaiter;
+ int ret;
+
+ awaiter = kzalloc(sizeof(*awaiter), GFP_KERNEL);
+ if (!awaiter)
+ return ERR_PTR(-ENOMEM);
+
+ awaiter->resp = resp;
+ awaiter->mailbox = mailbox;
+ awaiter->data = data;
+ awaiter->release_data = mailbox->ops->release_awaiter_data;
+ /* 2 refs: caller (vd) and timedout handler. */
+ refcount_set(&awaiter->refs, 2);
+
+ INIT_DELAYED_WORK(&awaiter->timeout_work, gcip_mailbox_async_cmd_timeout_work);
+ schedule_delayed_work(&awaiter->timeout_work, msecs_to_jiffies(mailbox->timeout));
+
+ ret = gcip_mailbox_enqueue_cmd(mailbox, cmd, awaiter->resp, awaiter);
+ if (ret)
+ goto err_free_resp;
+
+ return awaiter;
+
+err_free_resp:
+ gcip_mailbox_cancel_awaiter_timeout(awaiter);
+ kfree(awaiter);
+ return ERR_PTR(ret);
+}
+
+void gcip_mailbox_cancel_awaiter(struct gcip_mailbox_resp_awaiter *awaiter)
+{
+ gcip_mailbox_del_wait_resp(awaiter->mailbox, awaiter->resp);
+ gcip_mailbox_cancel_awaiter_timeout(awaiter);
+}
+
+void gcip_mailbox_cancel_awaiter_timeout(struct gcip_mailbox_resp_awaiter *awaiter)
+{
+ if (cancel_delayed_work_sync(&awaiter->timeout_work))
+ gcip_mailbox_awaiter_dec_refs(awaiter);
+}
+
+void gcip_mailbox_release_awaiter(struct gcip_mailbox_resp_awaiter *awaiter)
+{
+ gcip_mailbox_awaiter_dec_refs(awaiter);
+}
+
+void gcip_mailbox_consume_one_response(struct gcip_mailbox *mailbox, void *resp)
+{
+ int ret;
+
+ /* Fetches (at most) one response. */
+ ret = gcip_mailbox_fetch_one_response(mailbox, resp);
+ if (!ret)
+ return;
+
+ gcip_mailbox_handle_response(mailbox, resp);
+
+ /* Responses handled, wakes up threads that are waiting for a response. */
+ wake_up(&mailbox->wait_list_waitq);
+}
diff --git a/gcip-kernel-driver/drivers/gcip/gcip-mem-pool.c b/gcip-kernel-driver/drivers/gcip/gcip-mem-pool.c
new file mode 100644
index 0000000..564991b
--- /dev/null
+++ b/gcip-kernel-driver/drivers/gcip/gcip-mem-pool.c
@@ -0,0 +1,66 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * A simple memory allocator to help allocating reserved memory pools.
+ *
+ * Copyright (C) 2022 Google LLC
+ */
+
+#include <linux/device.h>
+#include <linux/genalloc.h>
+#include <linux/log2.h>
+#include <linux/types.h>
+
+#include <gcip/gcip-mem-pool.h>
+
+int gcip_mem_pool_init(struct gcip_mem_pool *pool, struct device *dev, unsigned long base_addr,
+ size_t size, size_t granule)
+{
+ int ret;
+
+ if (!base_addr || granule == 0)
+ return -EINVAL;
+ if (base_addr % granule || size % granule)
+ return -EINVAL;
+ pool->gen_pool = gen_pool_create(ilog2(granule), -1);
+ if (!pool->gen_pool) {
+ dev_err(dev, "gcip memory pool allocate gen_pool failed");
+ return -ENOMEM;
+ }
+ ret = gen_pool_add(pool->gen_pool, base_addr, size, -1);
+ if (ret) {
+ gen_pool_destroy(pool->gen_pool);
+ pool->gen_pool = NULL;
+ dev_err(dev, "gcip failed to add memory to mem pool: %d", ret);
+ return ret;
+ }
+ pool->dev = dev;
+ pool->granule = granule;
+ pool->base_addr = base_addr;
+ return 0;
+}
+
+void gcip_mem_pool_exit(struct gcip_mem_pool *pool)
+{
+ if (!pool->gen_pool)
+ return;
+ gen_pool_destroy(pool->gen_pool);
+ pool->gen_pool = NULL;
+}
+
+unsigned long gcip_mem_pool_alloc(struct gcip_mem_pool *pool, size_t size)
+{
+ unsigned long addr;
+
+ addr = gen_pool_alloc(pool->gen_pool, size);
+ if (!addr)
+ return 0;
+ dev_dbg(pool->dev, "%s @ size = %#zx addr=%#lx", __func__, size, addr);
+ return addr;
+}
+
+void gcip_mem_pool_free(struct gcip_mem_pool *pool, unsigned long addr, size_t size)
+{
+ dev_dbg(pool->dev, "%s @ size = %#zx addr=%#lx", __func__, size, addr);
+ size = ALIGN(size, pool->granule);
+ gen_pool_free(pool->gen_pool, addr, size);
+}
diff --git a/gcip-kernel-driver/drivers/gcip/gcip-pm.c b/gcip-kernel-driver/drivers/gcip/gcip-pm.c
new file mode 100644
index 0000000..b9907a1
--- /dev/null
+++ b/gcip-kernel-driver/drivers/gcip/gcip-pm.c
@@ -0,0 +1,220 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Power management interface for GCIP devices.
+ *
+ * Copyright (C) 2023 Google LLC
+ */
+
+#include <linux/device.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+
+#include <gcip/gcip-pm.h>
+
+#define GCIP_ASYNC_POWER_DOWN_RETRY_DELAY 200 /* ms */
+
+/* Caller must hold @pm->lock. */
+static void gcip_pm_try_power_down(struct gcip_pm *pm)
+{
+ int ret;
+
+ gcip_pm_lockdep_assert_held(pm);
+
+ ret = pm->power_down(pm->data);
+
+ if (ret == -EAGAIN) {
+ dev_warn(pm->dev, "Power down request denied, retrying in %d ms\n",
+ GCIP_ASYNC_POWER_DOWN_RETRY_DELAY);
+ pm->power_down_pending = true;
+ schedule_delayed_work(&pm->power_down_work,
+ msecs_to_jiffies(GCIP_ASYNC_POWER_DOWN_RETRY_DELAY));
+ } else {
+ if (ret)
+ dev_err(pm->dev, "Power down request failed (%d)\n", ret);
+ pm->power_down_pending = false;
+ }
+}
+
+/* Worker for async power down. */
+static void gcip_pm_async_power_down_work(struct work_struct *work)
+{
+ struct delayed_work *dwork = container_of(work, struct delayed_work, work);
+ struct gcip_pm *pm = container_of(dwork, struct gcip_pm, power_down_work);
+
+ mutex_lock(&pm->lock);
+
+ if (pm->power_down_pending)
+ gcip_pm_try_power_down(pm);
+ else
+ dev_info(pm->dev, "Delayed power down cancelled\n");
+
+ mutex_unlock(&pm->lock);
+}
+
+struct gcip_pm *gcip_pm_create(const struct gcip_pm_args *args)
+{
+ struct gcip_pm *pm;
+ int ret;
+
+ if (!args->dev || !args->power_up || !args->power_down)
+ return ERR_PTR(-EINVAL);
+
+ pm = devm_kzalloc(args->dev, sizeof(*pm), GFP_KERNEL);
+ if (!pm)
+ return ERR_PTR(-ENOMEM);
+
+ pm->dev = args->dev;
+ pm->data = args->data;
+ pm->after_create = args->after_create;
+ pm->before_destroy = args->before_destroy;
+ pm->power_up = args->power_up;
+ pm->power_down = args->power_down;
+
+ mutex_init(&pm->lock);
+ INIT_DELAYED_WORK(&pm->power_down_work, gcip_pm_async_power_down_work);
+
+ if (pm->after_create) {
+ ret = pm->after_create(pm->data);
+ if (ret) {
+ devm_kfree(args->dev, pm);
+ return ERR_PTR(ret);
+ }
+ }
+
+ return pm;
+}
+
+void gcip_pm_destroy(struct gcip_pm *pm)
+{
+ if (!pm)
+ return;
+
+ pm->power_down_pending = false;
+ cancel_delayed_work_sync(&pm->power_down_work);
+
+ if (pm->before_destroy)
+ pm->before_destroy(pm->data);
+
+ devm_kfree(pm->dev, pm);
+}
+
+/*
+ * Increases the counter and calls the power_up callback.
+ *
+ * Returns zero on success.
+ *
+ * Caller holds pm->lock.
+ */
+static int gcip_pm_get_locked(struct gcip_pm *pm)
+{
+ int ret = 0;
+
+ gcip_pm_lockdep_assert_held(pm);
+
+ if (!pm->count) {
+ if (pm->power_down_pending)
+ pm->power_down_pending = false;
+ else
+ ret = pm->power_up(pm->data);
+ }
+
+ if (!ret)
+ pm->count++;
+
+ dev_dbg(pm->dev, "%s: %d\n", __func__, pm->count);
+
+ return ret;
+}
+
+int gcip_pm_get_if_powered(struct gcip_pm *pm, bool blocking)
+{
+ int ret = -EAGAIN;
+
+ if (!pm)
+ return 0;
+
+ /* Fast fails without holding the lock. */
+ if (!pm->count)
+ return ret;
+
+ if (blocking)
+ mutex_lock(&pm->lock);
+ else if (!mutex_trylock(&pm->lock))
+ return ret;
+
+ if (pm->count)
+ ret = gcip_pm_get_locked(pm);
+
+ mutex_unlock(&pm->lock);
+
+ return ret;
+}
+
+int gcip_pm_get(struct gcip_pm *pm)
+{
+ int ret;
+
+ if (!pm)
+ return 0;
+
+ mutex_lock(&pm->lock);
+ ret = gcip_pm_get_locked(pm);
+ mutex_unlock(&pm->lock);
+
+ return ret;
+}
+
+void gcip_pm_put(struct gcip_pm *pm)
+{
+ if (!pm)
+ return;
+
+ mutex_lock(&pm->lock);
+
+ if (WARN_ON(!pm->count))
+ goto unlock;
+
+ if (!--pm->count) {
+ pm->power_down_pending = true;
+ gcip_pm_try_power_down(pm);
+ }
+
+ dev_dbg(pm->dev, "%s: %d\n", __func__, pm->count);
+
+unlock:
+ mutex_unlock(&pm->lock);
+}
+
+int gcip_pm_get_count(struct gcip_pm *pm)
+{
+ if (!pm)
+ return 0;
+
+ return pm->count;
+}
+
+bool gcip_pm_is_powered(struct gcip_pm *pm)
+{
+ /* Assumes powered-on in case of no power interface. */
+ return pm ? gcip_pm_get_count(pm) > 0 : true;
+}
+
+void gcip_pm_shutdown(struct gcip_pm *pm, bool force)
+{
+ if (!pm)
+ return;
+
+ mutex_lock(&pm->lock);
+
+ if (pm->count) {
+ if (!force)
+ goto unlock;
+ dev_warn(pm->dev, "Force shutdown with power up count: %d", pm->count);
+ }
+
+ gcip_pm_try_power_down(pm);
+
+unlock:
+ mutex_unlock(&pm->lock);
+}
diff --git a/gcip-kernel-driver/drivers/gcip/gcip-telemetry.c b/gcip-kernel-driver/drivers/gcip/gcip-telemetry.c
new file mode 100644
index 0000000..1599889
--- /dev/null
+++ b/gcip-kernel-driver/drivers/gcip/gcip-telemetry.c
@@ -0,0 +1,268 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * GCIP telemetry: logging and tracing.
+ *
+ * Copyright (C) 2022 Google LLC
+ */
+
+#include <linux/delay.h>
+#include <linux/dev_printk.h>
+#include <linux/eventfd.h>
+#include <linux/log2.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+
+#include <gcip/gcip-telemetry.h>
+
+int gcip_telemetry_kci(struct gcip_telemetry *tel,
+ int (*send_kci)(struct gcip_telemetry_kci_args *),
+ struct gcip_telemetry_kci_args *args)
+{
+ int err;
+
+ dev_dbg(tel->dev, "Sending KCI %s", tel->name);
+ err = send_kci(args);
+
+ if (err < 0) {
+ dev_err(tel->dev, "KCI %s failed - %d", tel->name, err);
+ return err;
+ }
+
+ if (err > 0) {
+ dev_err(tel->dev, "KCI %s returned %d", tel->name, err);
+ return -EBADMSG;
+ }
+
+ dev_dbg(tel->dev, "KCI %s Succeeded", tel->name);
+
+ return 0;
+}
+
+int gcip_telemetry_set_event(struct gcip_telemetry *tel, u32 eventfd)
+{
+ struct eventfd_ctx *ctx;
+ ulong flags;
+
+ ctx = eventfd_ctx_fdget(eventfd);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
+ write_lock_irqsave(&tel->ctx_lock, flags);
+ if (tel->ctx)
+ eventfd_ctx_put(tel->ctx);
+ tel->ctx = ctx;
+ write_unlock_irqrestore(&tel->ctx_lock, flags);
+
+ return 0;
+}
+
+void gcip_telemetry_unset_event(struct gcip_telemetry *tel)
+{
+ ulong flags;
+
+ write_lock_irqsave(&tel->ctx_lock, flags);
+ if (tel->ctx)
+ eventfd_ctx_put(tel->ctx);
+ tel->ctx = NULL;
+ write_unlock_irqrestore(&tel->ctx_lock, flags);
+}
+
+/* Copy data out of the log buffer with wrapping. */
+static void copy_with_wrap(struct gcip_telemetry_header *header, void *dest, u32 length, u32 size,
+ void *start)
+{
+ const u32 wrap_bit = size + sizeof(*header);
+ u32 remaining = 0;
+ u32 head = header->head & (wrap_bit - 1);
+
+ if (head + length < size) {
+ memcpy(dest, start + head, length);
+ header->head += length;
+ } else {
+ remaining = size - head;
+ memcpy(dest, start + head, remaining);
+ memcpy(dest + remaining, start, length - remaining);
+ header->head = (header->head & wrap_bit) ^ wrap_bit;
+ header->head |= length - remaining;
+ }
+}
+
+void gcip_telemetry_fw_log(struct gcip_telemetry *log)
+{
+ struct device *dev = log->dev;
+ struct gcip_telemetry_header *header = log->header;
+ struct gcip_log_entry_header entry;
+ u8 *start;
+ const size_t queue_size = header->size - sizeof(*header);
+ const size_t max_length = queue_size - sizeof(entry);
+ char *buffer = kmalloc(max_length + 1, GFP_ATOMIC);
+
+ if (!buffer) {
+ header->head = header->tail;
+ return;
+ }
+ start = (u8 *)header + sizeof(*header);
+
+ while (header->head != header->tail) {
+ copy_with_wrap(header, &entry, sizeof(entry), queue_size, start);
+ if (entry.length == 0 || entry.length > max_length) {
+ header->head = header->tail;
+ dev_err(dev, "log queue is corrupted");
+ break;
+ }
+ copy_with_wrap(header, buffer, entry.length, queue_size, start);
+ buffer[entry.length] = 0;
+
+ if (entry.code > GCIP_FW_DMESG_LOG_LEVEL)
+ continue;
+
+ switch (entry.code) {
+ case GCIP_FW_LOG_LEVEL_VERBOSE:
+ case GCIP_FW_LOG_LEVEL_DEBUG:
+ dev_dbg(dev, "%s", buffer);
+ break;
+ case GCIP_FW_LOG_LEVEL_WARN:
+ dev_warn(dev, "%s", buffer);
+ break;
+ case GCIP_FW_LOG_LEVEL_FATAL:
+ case GCIP_FW_LOG_LEVEL_ERROR:
+ dev_err(dev, "%s", buffer);
+ break;
+ case GCIP_FW_LOG_LEVEL_INFO:
+ default:
+ dev_info(dev, "%s", buffer);
+ break;
+ }
+ }
+ kfree(buffer);
+}
+
+void gcip_telemetry_fw_trace(struct gcip_telemetry *trace)
+{
+ struct gcip_telemetry_header *header = trace->header;
+
+ header->head = header->tail;
+}
+
+void gcip_telemetry_irq_handler(struct gcip_telemetry *tel)
+{
+ spin_lock(&tel->state_lock);
+
+ if (tel->state == GCIP_TELEMETRY_ENABLED && tel->header->head != tel->header->tail)
+ schedule_work(&tel->work);
+
+ spin_unlock(&tel->state_lock);
+}
+
+void gcip_telemetry_inc_mmap_count(struct gcip_telemetry *tel, int dif)
+{
+ mutex_lock(&tel->mmap_lock);
+ tel->mmapped_count += dif;
+ mutex_unlock(&tel->mmap_lock);
+}
+
+int gcip_telemetry_mmap_buffer(struct gcip_telemetry *tel, int (*mmap)(void *), void *args)
+{
+ int ret;
+
+ mutex_lock(&tel->mmap_lock);
+
+ if (!tel->mmapped_count) {
+ ret = mmap(args);
+
+ if (!ret)
+ tel->mmapped_count = 1;
+ } else {
+ ret = -EBUSY;
+ dev_warn(tel->dev, "%s is already mmapped %ld times", tel->name,
+ tel->mmapped_count);
+ }
+
+ mutex_unlock(&tel->mmap_lock);
+
+ return ret;
+}
+
+/* Worker for processing log/trace buffers. */
+static void gcip_telemetry_worker(struct work_struct *work)
+{
+ struct gcip_telemetry *tel = container_of(work, struct gcip_telemetry, work);
+ u32 prev_head;
+ ulong flags;
+
+ /*
+ * Loops while telemetry enabled, there is data to be consumed, and the previous iteration
+ * made progress. If another IRQ arrives just after the last head != tail check we should
+ * get another worker schedule.
+ */
+ do {
+ spin_lock_irqsave(&tel->state_lock, flags);
+ if (tel->state != GCIP_TELEMETRY_ENABLED) {
+ spin_unlock_irqrestore(&tel->state_lock, flags);
+ return;
+ }
+
+ prev_head = tel->header->head;
+ if (tel->header->head != tel->header->tail) {
+ read_lock(&tel->ctx_lock);
+ if (tel->ctx)
+ eventfd_signal(tel->ctx, 1);
+ else
+ tel->fallback_fn(tel);
+ read_unlock(&tel->ctx_lock);
+ }
+
+ spin_unlock_irqrestore(&tel->state_lock, flags);
+ msleep(GCIP_TELEMETRY_LOG_RECHECK_DELAY);
+ } while (tel->header->head != tel->header->tail && tel->header->head != prev_head);
+}
+
+int gcip_telemetry_init(struct device *dev, struct gcip_telemetry *tel, const char *name,
+ void *vaddr, const size_t size,
+ void (*fallback_fn)(struct gcip_telemetry *))
+{
+ if (!is_power_of_2(size) || size <= sizeof(struct gcip_telemetry_header)) {
+ dev_err(dev,
+ "Size of GCIP telemetry buffer must be a power of 2 and greater than %zu.",
+ sizeof(struct gcip_telemetry_header));
+ return -EINVAL;
+ }
+
+ rwlock_init(&tel->ctx_lock);
+ tel->name = name;
+ tel->dev = dev;
+
+ tel->header = vaddr;
+ tel->header->head = 0;
+ tel->header->tail = 0;
+ tel->header->size = size;
+ tel->header->entries_dropped = 0;
+
+ tel->ctx = NULL;
+
+ spin_lock_init(&tel->state_lock);
+ INIT_WORK(&tel->work, gcip_telemetry_worker);
+ tel->fallback_fn = fallback_fn;
+ tel->state = GCIP_TELEMETRY_ENABLED;
+ mutex_init(&tel->mmap_lock);
+ tel->mmapped_count = 0;
+
+ return 0;
+}
+
+void gcip_telemetry_exit(struct gcip_telemetry *tel)
+{
+ ulong flags;
+
+ spin_lock_irqsave(&tel->state_lock, flags);
+ /* Prevents racing with the IRQ handler or worker. */
+ tel->state = GCIP_TELEMETRY_INVALID;
+ spin_unlock_irqrestore(&tel->state_lock, flags);
+ cancel_work_sync(&tel->work);
+
+ if (tel->ctx)
+ eventfd_ctx_put(tel->ctx);
+ tel->ctx = NULL;
+}
diff --git a/gcip-kernel-driver/drivers/gcip/gcip-thermal.c b/gcip-kernel-driver/drivers/gcip/gcip-thermal.c
new file mode 100644
index 0000000..bc06cd5
--- /dev/null
+++ b/gcip-kernel-driver/drivers/gcip/gcip-thermal.c
@@ -0,0 +1,517 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Thermal management support for GCIP devices.
+ *
+ * Copyright (C) 2023 Google LLC
+ */
+
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/minmax.h>
+#include <linux/mutex.h>
+#include <linux/notifier.h>
+#include <linux/thermal.h>
+#include <linux/version.h>
+
+#include <gcip/gcip-pm.h>
+#include <gcip/gcip-thermal.h>
+
+#define OF_DATA_NUM_MAX (GCIP_THERMAL_MAX_NUM_STATES * 2)
+
+#define to_cdev(dev) container_of(dev, struct thermal_cooling_device, device)
+#define to_gcip_thermal(dev) ((struct gcip_thermal *)to_cdev(dev)->devdata)
+
+/* Struct for state to rate and state to power mappings. */
+struct gcip_rate_pwr {
+ unsigned long rate;
+ u32 power;
+};
+
+static struct gcip_rate_pwr state_map[GCIP_THERMAL_MAX_NUM_STATES] = { 0 };
+
+static int gcip_thermal_get_max_state(struct thermal_cooling_device *cdev, unsigned long *state)
+{
+ struct gcip_thermal *thermal = cdev->devdata;
+
+ if (!thermal->num_states)
+ return -ENODEV;
+
+ *state = thermal->num_states - 1;
+
+ return 0;
+}
+
+static int gcip_thermal_get_cur_state(struct thermal_cooling_device *cdev, unsigned long *state)
+{
+ struct gcip_thermal *thermal = cdev->devdata;
+
+ mutex_lock(&thermal->lock);
+ *state = thermal->state;
+ mutex_unlock(&thermal->lock);
+
+ return 0;
+}
+
+static int gcip_thermal_set_cur_state(struct thermal_cooling_device *cdev, unsigned long state)
+{
+ struct gcip_thermal *thermal = cdev->devdata;
+ int i, ret = 0;
+
+ if (state >= thermal->num_states) {
+ dev_err(thermal->dev, "Invalid thermal cooling state %lu\n", state);
+ return -EINVAL;
+ }
+
+ mutex_lock(&thermal->lock);
+
+ thermal->vote[GCIP_THERMAL_COOLING_DEVICE] = state;
+ for (i = 0; i < GCIP_THERMAL_MAX_NUM_VOTERS; i++)
+ state = max(state, thermal->vote[i]);
+
+ if (state == thermal->state)
+ goto out;
+
+ if (!gcip_pm_get_if_powered(thermal->pm, false)) {
+ ret = thermal->set_rate(thermal->data, state_map[state].rate);
+ gcip_pm_put(thermal->pm);
+ }
+
+ if (ret)
+ dev_err(thermal->dev, "Failed to set thermal cooling state: %d\n", ret);
+ else
+ thermal->state = state;
+out:
+ mutex_unlock(&thermal->lock);
+
+ return ret;
+}
+
+static int gcip_thermal_rate2power_internal(struct gcip_thermal *thermal, unsigned long rate,
+ u32 *power)
+{
+ int i;
+
+ for (i = 0; i < thermal->num_states; i++) {
+ if (rate == state_map[i].rate) {
+ *power = state_map[i].power;
+ return 0;
+ }
+ }
+
+ dev_err(thermal->dev, "Unknown rate for: %lu\n", rate);
+ *power = 0;
+
+ return -EINVAL;
+}
+
+static int gcip_thermal_get_requested_power(struct thermal_cooling_device *cdev, u32 *power)
+{
+ struct gcip_thermal *thermal = cdev->devdata;
+ unsigned long rate;
+ int ret;
+
+ if (gcip_pm_get_if_powered(thermal->pm, false)) {
+ *power = 0;
+ return 0;
+ }
+
+ mutex_lock(&thermal->lock);
+
+ ret = thermal->get_rate(thermal->data, &rate);
+
+ mutex_unlock(&thermal->lock);
+ gcip_pm_put(thermal->pm);
+
+ if (ret)
+ return ret;
+
+ return gcip_thermal_rate2power_internal(thermal, rate, power);
+}
+
+static int gcip_thermal_state2power(struct thermal_cooling_device *cdev, unsigned long state,
+ u32 *power)
+{
+ struct gcip_thermal *thermal = cdev->devdata;
+
+ if (state >= thermal->num_states) {
+ dev_err(thermal->dev, "Invalid state: %lu\n", state);
+ return -EINVAL;
+ }
+
+ return gcip_thermal_rate2power_internal(thermal, state_map[state].rate, power);
+}
+
+static int gcip_thermal_power2state(struct thermal_cooling_device *cdev, u32 power,
+ unsigned long *state)
+{
+ struct gcip_thermal *thermal = cdev->devdata;
+
+ if (!thermal->num_states)
+ return -ENODEV;
+
+ /*
+ * Argument "power" is the maximum allowed power consumption in mW as defined by the PID
+ * control loop. Checks for the first state that is less than or equal to the current
+ * allowed power. state_map is descending, so lowest power consumption is last value in the
+ * array. Returns lowest state even if it consumes more power than allowed as not all
+ * platforms can handle throttling below an active state.
+ */
+ for (*state = 0; *state < thermal->num_states; (*state)++)
+ if (power >= state_map[*state].power)
+ return 0;
+
+ *state = thermal->num_states - 1;
+
+ return 0;
+}
+
+static const struct thermal_cooling_device_ops gcip_thermal_ops = {
+ .get_max_state = gcip_thermal_get_max_state,
+ .get_cur_state = gcip_thermal_get_cur_state,
+ .set_cur_state = gcip_thermal_set_cur_state,
+ .get_requested_power = gcip_thermal_get_requested_power,
+ .state2power = gcip_thermal_state2power,
+ .power2state = gcip_thermal_power2state,
+};
+
+/* This API was removed, but Android still uses it to update thermal request. */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 12, 0) && IS_ENABLED(CONFIG_ANDROID)
+void thermal_cdev_update(struct thermal_cooling_device *cdev);
+#endif
+
+static void gcip_thermal_update(struct gcip_thermal *thermal)
+{
+ struct thermal_cooling_device *cdev = thermal->cdev;
+
+ cdev->updated = false;
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 12, 0) || IS_ENABLED(CONFIG_ANDROID)
+ thermal_cdev_update(cdev);
+#elif IS_ENABLED(CONFIG_THERMAL)
+ dev_err_once(thermal->dev, "Thermal update not implemented");
+#endif
+}
+
+static ssize_t user_vote_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct gcip_thermal *thermal = to_gcip_thermal(dev);
+ ssize_t ret;
+
+ if (!thermal)
+ return -ENODEV;
+
+ mutex_lock(&thermal->lock);
+ ret = sysfs_emit(buf, "%lu\n", thermal->vote[GCIP_THERMAL_SYSFS]);
+ mutex_unlock(&thermal->lock);
+
+ return ret;
+}
+
+static ssize_t user_vote_store(struct device *dev, struct device_attribute *attr, const char *buf,
+ size_t count)
+{
+ struct gcip_thermal *thermal = to_gcip_thermal(dev);
+ unsigned long state;
+ int ret;
+
+ if (!thermal)
+ return -ENODEV;
+
+ ret = kstrtoul(buf, 0, &state);
+ if (ret)
+ return ret;
+
+ if (state >= thermal->num_states)
+ return -EINVAL;
+
+ mutex_lock(&thermal->lock);
+ thermal->vote[GCIP_THERMAL_SYSFS] = state;
+ mutex_unlock(&thermal->lock);
+
+ gcip_thermal_update(thermal);
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(user_vote);
+
+static int gcip_thermal_rate2state(struct gcip_thermal *thermal, unsigned long rate)
+{
+ int i;
+
+ for (i = 0; i < thermal->num_states; i++) {
+ if (state_map[i].rate <= rate)
+ return i;
+ }
+
+ /* Returns lowest state on an invalid input. */
+ return thermal->num_states - 1;
+}
+
+static int gcip_thermal_notifier(struct notifier_block *nb, unsigned long rate, void *nb_data)
+{
+ struct gcip_thermal *thermal = container_of(nb, struct gcip_thermal, nb);
+ unsigned long state = gcip_thermal_rate2state(thermal, rate);
+
+ dev_dbg(thermal->dev, "Thermal notifier req original: %lu, state: %lu\n", rate, state);
+
+ mutex_lock(&thermal->lock);
+ thermal->vote[GCIP_THERMAL_NOTIFIER_BLOCK] = state;
+ mutex_unlock(&thermal->lock);
+
+ gcip_thermal_update(thermal);
+
+ return NOTIFY_OK;
+}
+
+struct notifier_block *gcip_thermal_get_notifier_block(struct gcip_thermal *thermal)
+{
+ if (IS_ERR_OR_NULL(thermal))
+ return NULL;
+
+ return &thermal->nb;
+}
+
+void gcip_thermal_destroy(struct gcip_thermal *thermal)
+{
+ if (IS_ERR_OR_NULL(thermal))
+ return;
+
+ debugfs_remove_recursive(thermal->dentry);
+ thermal_cooling_device_unregister(thermal->cdev);
+ devm_kfree(thermal->dev, thermal);
+}
+
+static int gcip_thermal_enable_get(void *data, u64 *val)
+{
+ struct gcip_thermal *thermal = (struct gcip_thermal *)data;
+
+ mutex_lock(&thermal->lock);
+ *val = thermal->enabled;
+ mutex_unlock(&thermal->lock);
+
+ return 0;
+}
+
+static int gcip_thermal_enable_set(void *data, u64 val)
+{
+ struct gcip_thermal *thermal = (struct gcip_thermal *)data;
+ int ret = 0;
+
+ mutex_lock(&thermal->lock);
+
+ if (thermal->enabled != (bool)val) {
+ /*
+ * If the device is not powered, the value will be restored by
+ * gcip_thermal_restore_on_powering in next fw boot.
+ */
+ if (!gcip_pm_get_if_powered(thermal->pm, false)) {
+ ret = thermal->control(thermal->data, val);
+ gcip_pm_put(thermal->pm);
+ }
+
+ if (!ret) {
+ thermal->enabled = val;
+ dev_info_ratelimited(thermal->dev, "%s thermal control",
+ thermal->enabled ? "Enable" : "Disable");
+ } else {
+ dev_err(thermal->dev, "Failed to %s thermal control: %d ",
+ val ? "enable" : "disable", ret);
+ }
+ }
+
+ mutex_unlock(&thermal->lock);
+
+ return ret;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(fops_gcip_thermal_enable, gcip_thermal_enable_get, gcip_thermal_enable_set,
+ "%llu\n");
+
+static int gcip_thermal_parse_dvfs_table(struct gcip_thermal *thermal)
+{
+ int row_size, col_size, tbl_size, i;
+ int of_data_int_array[OF_DATA_NUM_MAX];
+
+ if (of_property_read_u32_array(thermal->dev->of_node, GCIP_THERMAL_TABLE_SIZE_NAME,
+ of_data_int_array, 2))
+ goto error;
+
+ row_size = of_data_int_array[0];
+ col_size = of_data_int_array[1];
+ tbl_size = row_size * col_size;
+ if (row_size > GCIP_THERMAL_MAX_NUM_STATES) {
+ dev_err(thermal->dev, "Too many states\n");
+ goto error;
+ }
+
+ if (tbl_size > OF_DATA_NUM_MAX)
+ goto error;
+
+ if (of_property_read_u32_array(thermal->dev->of_node, GCIP_THERMAL_TABLE_NAME,
+ of_data_int_array, tbl_size))
+ goto error;
+
+ thermal->num_states = row_size;
+ for (i = 0; i < row_size; ++i) {
+ int idx = col_size * i;
+
+ state_map[i].rate = of_data_int_array[idx];
+ state_map[i].power = of_data_int_array[idx + 1];
+ }
+
+ return 0;
+
+error:
+ dev_err(thermal->dev, "Failed to parse DVFS table\n");
+
+ return -EINVAL;
+}
+
+static int gcip_thermal_cooling_register(struct gcip_thermal *thermal, const char *type,
+ const char *node_name)
+{
+ struct device_node *node = NULL;
+ int ret;
+
+ ret = gcip_thermal_parse_dvfs_table(thermal);
+ if (ret)
+ return ret;
+
+ if (node_name)
+ node = of_find_node_by_name(NULL, node_name);
+ if (!node)
+ dev_warn(thermal->dev, "Failed to find thermal cooling node\n");
+
+ thermal->cdev = thermal_of_cooling_device_register(node, type, thermal, &gcip_thermal_ops);
+ if (IS_ERR(thermal->cdev))
+ return PTR_ERR(thermal->cdev);
+
+ ret = device_create_file(&thermal->cdev->device, &dev_attr_user_vote);
+ if (ret)
+ thermal_cooling_device_unregister(thermal->cdev);
+
+ return ret;
+}
+
+struct gcip_thermal *gcip_thermal_create(const struct gcip_thermal_args *args)
+{
+ struct gcip_thermal *thermal;
+ int ret;
+
+ if (!args->dev || !args->get_rate || !args->set_rate || !args->control)
+ return ERR_PTR(-EINVAL);
+
+ thermal = devm_kzalloc(args->dev, sizeof(*thermal), GFP_KERNEL);
+ if (!thermal)
+ return ERR_PTR(-ENOMEM);
+
+ thermal->dev = args->dev;
+ thermal->nb.notifier_call = gcip_thermal_notifier;
+ thermal->pm = args->pm;
+ thermal->enabled = true;
+ thermal->data = args->data;
+ thermal->get_rate = args->get_rate;
+ thermal->set_rate = args->set_rate;
+ thermal->control = args->control;
+
+ mutex_init(&thermal->lock);
+
+ ret = gcip_thermal_cooling_register(thermal, args->type, args->node_name);
+ if (ret) {
+ dev_err(args->dev, "Failed to initialize external thermal cooling\n");
+ devm_kfree(args->dev, thermal);
+ return ERR_PTR(ret);
+ }
+
+ thermal->dentry = debugfs_create_dir("cooling", args->dentry);
+ /* Don't let debugfs creation failure abort the init procedure. */
+ if (IS_ERR_OR_NULL(thermal->dentry))
+ dev_warn(args->dev, "Failed to create debugfs for thermal cooling");
+ else
+ debugfs_create_file("enable", 0660, thermal->dentry, thermal,
+ &fops_gcip_thermal_enable);
+
+ return thermal;
+}
+
+int gcip_thermal_suspend_device(struct gcip_thermal *thermal)
+{
+ int ret = 0;
+
+ if (IS_ERR_OR_NULL(thermal))
+ return 0;
+
+ mutex_lock(&thermal->lock);
+
+ /*
+ * Always sets as suspended even when the request cannot be handled for unknown reasons
+ * because we still want to prevent the client from using device.
+ */
+ thermal->device_suspended = true;
+ if (!gcip_pm_get_if_powered(thermal->pm, false)) {
+ ret = thermal->set_rate(thermal->data, 0);
+ gcip_pm_put(thermal->pm);
+ }
+
+ mutex_unlock(&thermal->lock);
+
+ return ret;
+}
+
+int gcip_thermal_resume_device(struct gcip_thermal *thermal)
+{
+ int ret = 0;
+
+ if (IS_ERR_OR_NULL(thermal))
+ return 0;
+
+ mutex_lock(&thermal->lock);
+
+ if (!gcip_pm_get_if_powered(thermal->pm, false)) {
+ ret = thermal->set_rate(thermal->data, state_map[thermal->state].rate);
+ gcip_pm_put(thermal->pm);
+ }
+
+ /*
+ * Unlike gcip_thermal_suspend_device(), only sets the device as resumed if the request is
+ * fulfilled.
+ */
+ if (!ret)
+ thermal->device_suspended = false;
+
+ mutex_unlock(&thermal->lock);
+
+ return ret;
+}
+
+bool gcip_thermal_is_device_suspended(struct gcip_thermal *thermal)
+{
+ if (IS_ERR_OR_NULL(thermal))
+ return false;
+
+ return thermal->device_suspended;
+}
+
+int gcip_thermal_restore_on_powering(struct gcip_thermal *thermal)
+{
+ int ret = 0;
+
+ if (IS_ERR_OR_NULL(thermal))
+ return 0;
+
+ gcip_pm_lockdep_assert_held(thermal->pm);
+ mutex_lock(&thermal->lock);
+
+ if (!thermal->enabled)
+ ret = thermal->control(thermal->data, thermal->enabled);
+ else if (thermal->device_suspended)
+ ret = thermal->set_rate(thermal->data, 0);
+ else if (thermal->state)
+ /* Skips state 0 since it's the default thermal state. */
+ ret = thermal->set_rate(thermal->data, state_map[thermal->state].rate);
+
+ mutex_unlock(&thermal->lock);
+
+ return ret;
+}
diff --git a/gcip-kernel-driver/include/gcip/gcip-alloc-helper.h b/gcip-kernel-driver/include/gcip/gcip-alloc-helper.h
new file mode 100644
index 0000000..17208bf
--- /dev/null
+++ b/gcip-kernel-driver/include/gcip/gcip-alloc-helper.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * GCIP helpers for allocating memories.
+ *
+ * Copyright (C) 2022 Google LLC
+ */
+
+#ifndef __GCIP_ALLOC_HELPER_H__
+#define __GCIP_ALLOC_HELPER_H__
+
+#include <linux/device.h>
+#include <linux/scatterlist.h>
+#include <linux/types.h>
+
+/*
+ * The actual return value from the alloc_noncontiguous function.
+ * The user should only care about @sgt. @pages is used internally for freeing memory.
+ */
+struct gcip_sgt_handle {
+ struct sg_table sgt;
+ void *mem;
+};
+
+/*
+ * Allocates non-contiguous memory with size @size bytes.
+ *
+ * @dev: pointer to device structure. Is used for logging or the NUMA node for page allocation.
+ * @size: Total size in bytes. Will be page aligned.
+ * @gfp: The GFP flag for malloc internal structures.
+ *
+ * Returns the SG table represents the non-contiguous region.
+ * Returns NULL on any error.
+ */
+struct sg_table *gcip_alloc_noncontiguous(struct device *dev, size_t size, gfp_t gfp);
+/* Frees the memory allocated by gcip_alloc_noncontiguous. */
+void gcip_free_noncontiguous(struct sg_table *sgt);
+
+/*
+ * Returns the virtual memory that was used to allocate @sgt.
+ *
+ * @sgt must be the return pointer of gcip_alloc_noncontiguous.
+ */
+static inline void *gcip_noncontiguous_sgt_to_mem(struct sg_table *sgt)
+{
+ struct gcip_sgt_handle *sh = container_of(sgt, struct gcip_sgt_handle, sgt);
+
+ return sh->mem;
+}
+
+#endif /* __GCIP_ALLOC_HELPER_H__ */
diff --git a/gcip-kernel-driver/include/gcip/gcip-common-image-header.h b/gcip-kernel-driver/include/gcip/gcip-common-image-header.h
new file mode 100644
index 0000000..b86b430
--- /dev/null
+++ b/gcip-kernel-driver/include/gcip/gcip-common-image-header.h
@@ -0,0 +1,67 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Common authenticated image format for Google SoCs
+ *
+ * Copyright (C) 2022 Google LLC
+ */
+
+#ifndef __GCIP_COMMON_IMAGE_HEADER_H__
+#define __GCIP_COMMON_IMAGE_HEADER_H__
+
+#include <linux/types.h>
+
+#include "gcip-image-config.h"
+
+#define GCIP_FW_HEADER_SIZE (0x1000)
+
+struct gcip_common_image_sub_header_common {
+ uint32_t magic;
+ uint32_t generation;
+ uint32_t rollback_info;
+ uint32_t length;
+ uint8_t flags[16];
+};
+
+struct gcip_common_image_sub_header_gen1 {
+ uint8_t body_hash[32];
+ uint8_t chip_id[32];
+ uint8_t auth_config[256];
+ struct gcip_image_config image_config;
+};
+
+struct gcip_common_image_sub_header_gen2 {
+ uint8_t body_hash[64];
+ uint8_t chip_id[32];
+ uint8_t auth_config[256];
+ struct gcip_image_config image_config;
+};
+
+struct gcip_common_image_header {
+ uint8_t sig[512];
+ uint8_t pub[512];
+ struct {
+ struct gcip_common_image_sub_header_common common;
+ union {
+ struct gcip_common_image_sub_header_gen1 gen1;
+ struct gcip_common_image_sub_header_gen2 gen2;
+ };
+ };
+};
+
+/*
+ * Returns the image config field from a common image header
+ * or NULL if the header has an invalid generation identifier
+ */
+static inline struct gcip_image_config *
+get_image_config_from_hdr(struct gcip_common_image_header *hdr)
+{
+ switch (hdr->common.generation) {
+ case 1:
+ return &hdr->gen1.image_config;
+ case 2:
+ return &hdr->gen2.image_config;
+ }
+ return NULL;
+}
+
+#endif /* __GCIP_COMMON_IMAGE_HEADER_H__ */
diff --git a/gcip-kernel-driver/include/gcip/gcip-dma-fence.h b/gcip-kernel-driver/include/gcip/gcip-dma-fence.h
new file mode 100644
index 0000000..ad765d2
--- /dev/null
+++ b/gcip-kernel-driver/include/gcip/gcip-dma-fence.h
@@ -0,0 +1,154 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * GCIP support of DMA fences.
+ *
+ * Copyright (C) 2023 Google LLC
+ */
+
+#ifndef __GCIP_DMA_FENCE_H__
+#define __GCIP_DMA_FENCE_H__
+
+#include <linux/device.h>
+#include <linux/dma-fence.h>
+#include <linux/seq_file.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+#define GCIP_FENCE_TIMELINE_NAME_LEN 128
+
+/* Used before accessing the list headed by mgr->fence_list_head. */
+#define GCIP_DMA_FENCE_LIST_LOCK(mgr, flags) spin_lock_irqsave(&(mgr)->fence_list_lock, flags)
+#define GCIP_DMA_FENCE_LIST_UNLOCK(mgr, flags) \
+ spin_unlock_irqrestore(&(mgr)->fence_list_lock, flags)
+
+/*
+ * A macro to loop through all fences under a gcip_dma_fence_manager.
+ * @mgr: struct gcip_dma_fence_manager
+ * @gfence: struct gcip_dma_fence
+ *
+ * This macro must be wrapped by GCIP_DMA_FENCE_LIST_(UN)LOCK.
+ */
+#define gcip_for_each_fence(mgr, gfence) \
+ list_for_each_entry(gfence, &(mgr)->fence_list_head, fence_list)
+
+#define to_gcip_fence(fence) container_of(fence, struct gcip_dma_fence, fence)
+
+struct gcip_dma_fence_manager {
+ /* The list of all fence objects for debugging. */
+ struct list_head fence_list_head;
+ /* Protects the list headed by @fence_list_head. */
+ spinlock_t fence_list_lock;
+ /* For logging. */
+ struct device *dev;
+};
+
+struct gcip_dma_fence {
+ struct dma_fence fence;
+ /* The manager used to init this object. */
+ struct gcip_dma_fence_manager *mgr;
+ char timeline_name[GCIP_FENCE_TIMELINE_NAME_LEN];
+ /* Protects @fence. */
+ spinlock_t lock;
+ /* Is protected by manager->fence_list_lock. */
+ struct list_head fence_list;
+};
+
+struct gcip_dma_fence_data {
+ /*
+ * A null-terminated string with length less than GCIP_FENCE_TIMELINE_NAME_LEN.
+ * The content of this buffer will be copied so it's fine to release this pointer after
+ * the gcip_dma_fence_init() call.
+ */
+ char *timeline_name;
+ /*
+ * The DMA fence operators to initialize the fence with.
+ */
+ const struct dma_fence_ops *ops;
+ /* The sequence number to initialize the fence with. */
+ u32 seqno;
+ /* Output: The fd of the new sync_file with the new fence. */
+ int fence;
+ /*
+ * The callback to be called after @gfence is initialized, before an FD has been installed.
+ * Returns 0 on success. A non-zero return value will revert the initialization of
+ * @gfence and the returned error is returned by gcip_dma_fence_init().
+ *
+ * There is no 'before_exit' callback because the user is supposed to set a custom
+ * dma_fence_ops.release callback which does the revert of after_init and then call
+ * gcip_dma_fence_exit().
+ *
+ * This callback is optional.
+ */
+ int (*after_init)(struct gcip_dma_fence *gfence);
+};
+
+/*
+ * Allocates and returns a GCIP DMA fence manager. Memory is allocated as @dev managed so there is
+ * no release function of the manager.
+ *
+ * Returns a negative errno on error.
+ */
+struct gcip_dma_fence_manager *gcip_dma_fence_manager_create(struct device *dev);
+
+/* Helpers for setting dma_fence_ops. */
+
+/* Returns the timeline name. @fence must be contained within a gcip_dma_fence. */
+const char *gcip_dma_fence_get_timeline_name(struct dma_fence *fence);
+
+/* Always return true. Can be used for the enable_signaling callback. */
+bool gcip_dma_fence_always_true(struct dma_fence *fence);
+
+/* End of helpers for setting dma_fence_ops. */
+
+/*
+ * This function does
+ * 1. Initialize the DMA fence object
+ * 2. Call after_init() if present
+ * 3. Install an FD associates to the created DMA fence
+ *
+ * This function never fails on step 1, so this function returns an error only if after_init() fails
+ * (step 2) or FD allocation fails (step 3).
+ * In either failure case, @ops->release is always called. Therefore @ops->release may need to
+ * distinguish whether after_init() succeeded.
+ *
+ * It's always safe to call gcip_dma_fence_exit() in @ops->release because that function reverts
+ * step 1.
+ */
+int gcip_dma_fence_init(struct gcip_dma_fence_manager *mgr, struct gcip_dma_fence *gfence,
+ struct gcip_dma_fence_data *data);
+
+/*
+ * Reverts gcip_dma_fence_init(). Removes @gfence from the manager's list.
+ * This function will not free @gfence.
+ */
+void gcip_dma_fence_exit(struct gcip_dma_fence *gfence);
+
+/*
+ * Sets @status to the DMA fence status of DMA fence FD @fence.
+ * @status is only set when this function returns 0.
+ *
+ * It is OK if @fence does not refer to a gcip_dma_fence.
+ *
+ * Returns 0 on success. Otherwise a negative errno.
+ */
+int gcip_dma_fence_status(int fence, int *status);
+
+/*
+ * Signals the fence error of DMA fence FD @fence.
+ *
+ * If the fence has been signaled,
+ * - if @ignore_signaled is true, this function does nothing.
+ * - otherwise, returns -EALREADY.
+ *
+ * It is OK if @fence does not refer to a gcip_dma_fence.
+ *
+ * Returns 0 on success. Otherwise a negative errno.
+ */
+int gcip_dma_fence_signal(int fence, int error, bool ignore_signaled);
+/* Identical to gcip_dma_fence_signal except this function accepts gcip_dma_fence as the input. */
+int gcip_dma_fenceptr_signal(struct gcip_dma_fence *gfence, int error, bool ignore_signaled);
+
+/* Prints data of @gfence to the sequence file @s. For debug purpose only. */
+void gcip_dma_fence_show(struct gcip_dma_fence *gfence, struct seq_file *s);
+
+#endif /* __GCIP_DMA_FENCE_H__ */
diff --git a/gcip-kernel-driver/include/gcip/gcip-domain-pool.h b/gcip-kernel-driver/include/gcip/gcip-domain-pool.h
new file mode 100644
index 0000000..3a6ae4b
--- /dev/null
+++ b/gcip-kernel-driver/include/gcip/gcip-domain-pool.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * GCIP IOMMU domain allocator.
+ *
+ * Copyright (C) 2022 Google LLC
+ */
+
+#ifndef __GCIP_DOMAIN_POOL_H__
+#define __GCIP_DOMAIN_POOL_H__
+
+#include <linux/device.h>
+#include <linux/idr.h>
+#include <linux/iommu.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+
+struct gcip_domain_pool {
+ struct ida idp; /* ID allocator to keep track of used domains. */
+ /*
+ * Size of the pool. Can be set to 0, in which case the implementation will fall back to
+ * dynamic domain allocation using the IOMMU API directly.
+ */
+ unsigned int size;
+ struct iommu_domain **array; /* Array holding the pointers to pre-allocated domains. */
+ struct device *dev; /* The device used for logging warnings/errors. */
+ struct list_head dynamic_domains; /* Tracks dynamically allocated domains for cleanup. */
+ struct mutex lock; /* Protects dynamic_domains. */
+};
+
+/*
+ * Initializes a domain pool.
+ *
+ * @dev: pointer to device structure.
+ * @pool: caller-allocated pool structure.
+ * @size: size of the pre-allocated domains pool.
+ * Set to zero to fall back to dynamically allocated domains.
+ *
+ * returns 0 on success or negative error value.
+ */
+int gcip_domain_pool_init(struct device *dev, struct gcip_domain_pool *pool, unsigned int size);
+
+/*
+ * Allocates a domain from the pool
+ * returns NULL on error.
+ */
+struct iommu_domain *gcip_domain_pool_alloc(struct gcip_domain_pool *pool);
+
+/* Releases a domain from the pool. */
+void gcip_domain_pool_free(struct gcip_domain_pool *pool, struct iommu_domain *domain);
+
+/* Cleans up all resources used by the domain pool. */
+void gcip_domain_pool_destroy(struct gcip_domain_pool *pool);
+
+#endif /* __GCIP_DOMAIN_POOL_H__ */
diff --git a/gcip-kernel-driver/include/gcip/gcip-firmware.h b/gcip-kernel-driver/include/gcip/gcip-firmware.h
new file mode 100644
index 0000000..52f5d11
--- /dev/null
+++ b/gcip-kernel-driver/include/gcip/gcip-firmware.h
@@ -0,0 +1,134 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * GCIP firmware interface.
+ *
+ * Copyright (C) 2022 Google LLC
+ */
+
+#ifndef __GCIP_FIRMWARE_H__
+#define __GCIP_FIRMWARE_H__
+
+#include <linux/dcache.h>
+#include <linux/device.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+
+/*
+ * Any tracing level vote with the following bit set will be considered as a default vote.
+ * See go/gcip-firmware-dynamic-tracing for details.
+ */
+#define GCIP_FW_TRACING_DEFAULT_VOTE BIT(8)
+
+enum gcip_fw_status {
+ /* No firmware loaded yet, or last firmware failed to run. */
+ GCIP_FW_INVALID = 0,
+ /* Load in progress. */
+ GCIP_FW_LOADING = 1,
+ /* Current firmware is valid and can be restarted. */
+ GCIP_FW_VALID = 2,
+};
+
+/* Firmware flavors returned via KCI FIRMWARE_INFO command. */
+enum gcip_fw_flavor {
+ /* Unused value for extending enum storage type. */
+ GCIP_FW_FLAVOR_ERROR = -1,
+ /* Used by host when cannot determine the flavor. */
+ GCIP_FW_FLAVOR_UNKNOWN = 0,
+ /* Second-stage bootloader (no longer used). */
+ GCIP_FW_FLAVOR_BL1 = 1,
+ /* Systest app image. */
+ GCIP_FW_FLAVOR_SYSTEST = 2,
+ /* Default production app image. */
+ GCIP_FW_FLAVOR_PROD_DEFAULT = 3,
+ /* Custom image produced by other teams. */
+ GCIP_FW_FLAVOR_CUSTOM = 4,
+};
+
+/* Type of firmware crash. */
+enum gcip_fw_crash_type {
+ /* Type which will be sent by GCIP_RKCI_FIRMWARE_CRASH reverse KCI. */
+ /*Assert happened. */
+ GCIP_FW_CRASH_ASSERT_FAIL = 0,
+ /* Data abort exception. */
+ GCIP_FW_CRASH_DATA_ABORT = 1,
+ /* Prefetch abort exception. */
+ GCIP_FW_CRASH_PREFETCH_ABORT = 2,
+ /* Undefined exception. */
+ GCIP_FW_CRASH_UNDEFINED_EXCEPTION = 3,
+ /* Exception which cannot be recovered by the firmware itself. */
+ GCIP_FW_CRASH_UNRECOVERABLE_FAULT = 4,
+ /* Used in debug dump. */
+ GCIP_FW_CRASH_DUMMY_CRASH_TYPE = 0xFF,
+
+ /* HW watchdog timeout. */
+ GCIP_FW_CRASH_HW_WDG_TIMEOUT = 0x100,
+};
+
+/* Firmware info filled out via KCI FIRMWARE_INFO command. */
+struct gcip_fw_info {
+ uint64_t fw_build_time; /* BuildData::Timestamp() */
+ uint32_t fw_flavor; /* enum gcip_fw_flavor */
+ uint32_t fw_changelist; /* BuildData::Changelist() */
+ uint32_t spare[10];
+};
+
+/* Returns the name of @fw_flavor in string. */
+char *gcip_fw_flavor_str(enum gcip_fw_flavor fw_flavor);
+
+struct gcip_fw_tracing {
+ struct device *dev;
+ struct dentry *dentry;
+ struct gcip_pm *pm;
+
+ /*
+ * Lock to protect the struct members listed below.
+ *
+ * Note that since the request of tracing level adjusting might happen during power state
+ * transitions (i.e., another thread calling gcip_firmware_tracing_restore_on_powering()
+ * with pm lock held), one must either use the non-blocking gcip_pm_get_if_powered() or make
+ * sure there won't be any new power transition after holding this lock to prevent deadlock.
+ */
+ struct mutex lock;
+ /* Actual firmware tracing level. */
+ unsigned long active_level;
+ /* Requested firmware tracing level. */
+ unsigned long request_level;
+
+ /* Private data. See struct gcip_fw_tracing_args.*/
+ void *data;
+
+ /* Callbacks. See struct gcip_fw_tracing_args. */
+ int (*set_level)(void *data, unsigned long level, unsigned long *active_level);
+};
+
+struct gcip_fw_tracing_args {
+ /* Device struct of GCIP device. */
+ struct device *dev;
+ /* GCIP power management. */
+ struct gcip_pm *pm;
+ /* Top-level debugfs directory for the device. */
+ struct dentry *dentry;
+ /* Private data for callbacks listed below. */
+ void *data;
+ /*
+ * Callback to set the tracing level.
+ * The actual tracing level clamped by the firmware should be returned by @active_level.
+ */
+ int (*set_level)(void *data, unsigned long level, unsigned long *active_level);
+};
+
+/* Allocate and initialize the firmware tracing struct. */
+struct gcip_fw_tracing *gcip_firmware_tracing_create(const struct gcip_fw_tracing_args *args);
+
+/* Destroy and free the firmware tracing struct. */
+void gcip_firmware_tracing_destroy(struct gcip_fw_tracing *fw_tracing);
+
+/*
+ * Restore the previous firmware tracing level.
+ *
+ * This function is designed to restore the firmware tracing level during power management calls and
+ * thus it assumes the caller holds the pm lock.
+ */
+int gcip_firmware_tracing_restore_on_powering(struct gcip_fw_tracing *fw_tracing);
+
+#endif /* __GCIP_FIRMWARE_H__ */
diff --git a/gcip-kernel-driver/include/gcip/gcip-image-config.h b/gcip-kernel-driver/include/gcip/gcip-image-config.h
new file mode 100644
index 0000000..df09d39
--- /dev/null
+++ b/gcip-kernel-driver/include/gcip/gcip-image-config.h
@@ -0,0 +1,180 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Framework for parsing the firmware image configuration.
+ *
+ * Copyright (C) 2022 Google LLC
+ */
+
+#ifndef __GCIP_IMAGE_CONFIG_H__
+#define __GCIP_IMAGE_CONFIG_H__
+
+#include <asm/page.h>
+#include <linux/bits.h>
+#include <linux/types.h>
+
+#define GCIP_FW_NUM_VERSIONS 4
+#define GCIP_IMG_CFG_MAX_IOMMU_MAPPINGS 22
+#define GCIP_IMG_CFG_MAX_NS_IOMMU_MAPPINGS 5
+
+#define GCIP_FW_PRIV_LEVEL_GSA 0
+#define GCIP_FW_PRIV_LEVEL_TZ 1
+#define GCIP_FW_PRIV_LEVEL_NS 2
+
+/*
+ * The image configuration attached to the signed firmware.
+ */
+struct gcip_image_config {
+ __u32 carveout_base;
+ __u32 firmware_base;
+ __u32 firmware_size;
+ __u32 firmware_versions[GCIP_FW_NUM_VERSIONS];
+ __u32 config_version;
+ __u32 privilege_level;
+ __u32 remapped_region_start;
+ __u32 remapped_region_size;
+ __u32 num_iommu_mappings;
+ struct {
+ /* Device virtual address */
+ __u32 virt_address;
+ /*
+ * Encodes a 12-bit aligned address and the corresponding size
+ * into a 32-bit value.
+ * Detailed encoding method is defined in gcip-image-config.c.
+ */
+ __u32 image_config_value;
+ } iommu_mappings[GCIP_IMG_CFG_MAX_IOMMU_MAPPINGS];
+ __u32 remapped_data_start;
+ __u32 remapped_data_size;
+ __u32 num_ns_iommu_mappings;
+ __u32 ns_iommu_mappings[GCIP_IMG_CFG_MAX_NS_IOMMU_MAPPINGS];
+} __packed;
+
+#define GCIP_IMAGE_CONFIG_FLAGS_SECURE (1u << 0)
+
+struct gcip_image_config_ops {
+ /*
+ * Adds an IOMMU mapping from @daddr to @paddr with size @size.
+ *
+ * It is ensured that there is no overflow on @paddr + @size before calling this function.
+ *
+ * @flags is a bit-field with the following attributes:
+ * [0:0] - Security. 1 for secure and 0 for non-secure.
+ * [31:1] - Reserved.
+ *
+ * Returns 0 on success. Otherwise a negative errno.
+ * Mandatory.
+ */
+ int (*map)(void *data, dma_addr_t daddr, phys_addr_t paddr, size_t size,
+ unsigned int flags);
+ /*
+ * Removes the IOMMU mapping previously added by @map.
+ *
+ * Mandatory.
+ */
+ void (*unmap)(void *data, dma_addr_t daddr, size_t size, unsigned int flags);
+};
+
+struct gcip_image_config_parser {
+ struct device *dev;
+ void *data; /* User-specify data, will be passed to ops. */
+ const struct gcip_image_config_ops *ops;
+ /* The last image config being successfully parsed. */
+ struct gcip_image_config last_config;
+};
+
+#define GCIP_IMG_CFG_ADDR_SHIFT 12
+#define GCIP_IMG_CFG_MB_SHIFT 20
+#define GCIP_IMG_CFG_SIZE_MODE_BIT BIT(GCIP_IMG_CFG_ADDR_SHIFT - 1)
+#define GCIP_IMG_CFG_SECURE_SIZE_MASK (GCIP_IMG_CFG_SIZE_MODE_BIT - 1u)
+#define GCIP_IMG_CFG_NS_SIZE_MASK (GCIP_IMG_CFG_SIZE_MODE_BIT - 1u)
+#define GCIP_IMG_CFG_ADDR_MASK ~(BIT(GCIP_IMG_CFG_ADDR_SHIFT) - 1u)
+
+/* For decoding the size of ns_iommu_mappings. */
+static inline u32 gcip_ns_config_to_size(u32 cfg)
+{
+ if (cfg & GCIP_IMG_CFG_SIZE_MODE_BIT)
+ return (cfg & GCIP_IMG_CFG_NS_SIZE_MASK) << PAGE_SHIFT;
+
+ return (cfg & GCIP_IMG_CFG_NS_SIZE_MASK) << GCIP_IMG_CFG_MB_SHIFT;
+}
+
+/* For decoding the size of iommu_mappings. */
+static inline u32 gcip_config_to_size(u32 cfg)
+{
+ if (cfg & GCIP_IMG_CFG_SIZE_MODE_BIT)
+ return (cfg & GCIP_IMG_CFG_SECURE_SIZE_MASK) << PAGE_SHIFT;
+
+ return BIT(cfg & GCIP_IMG_CFG_SECURE_SIZE_MASK) << PAGE_SHIFT;
+}
+
+/*
+ * Initializes the image configuration parser.
+ *
+ * @dev is only used for logging.
+ * @data will be passed to operations.
+ *
+ * Returns 0 on success. Returns -EINVAL when any mandatory operations is NULL.
+ */
+int gcip_image_config_parser_init(struct gcip_image_config_parser *parser,
+ const struct gcip_image_config_ops *ops, struct device *dev,
+ void *data);
+
+/*
+ * Parses the image configuration and adds specified IOMMU mappings by calling pre-registered
+ * operations.
+ *
+ * Number of mappings to be added might be different according to the value of
+ * @config->privilege_level:
+ * - GCIP_FW_PRIV_LEVEL_NS:
+ * Both @iommu_mappings and @ns_iommu_mappings will be added. Because GCIP_FW_PRIV_LEVEL_NS means
+ * the firmware will run in non-secure mode and all transactions will go through the non-secure
+ * IOMMU.
+ * - Otherwise:
+ * Only @ns_iommu_mappings are considered. TZ/GSA will be the one who programs secure IOMMU for
+ * those secure IOMMU mappings.
+ *
+ * Before parsing the newly passed @config, the mappings of the last record (stored by @parser
+ * internally) will be reverted. If there is any mapping in the new config fails to be mapped, the
+ * reverted last config will be reverted again. i.e. This function will keep the mapping state the
+ * same as before calling it on any error happens. But if the IOMMU state is somehow corrupted and
+ * hence fails to roll back the reverted last image config, only an error is logged. See the pseudo
+ * code below:
+ *
+ * gcip_image_config_parse(config):
+ * unmap(last_image_config)
+ * if ret = map(config) fails:
+ * LOG("Failed to map image config, rolling back to the last image config.")
+ * if map(last_image_config) fails:
+ * LOG("Failed to roll back the last image config.")
+ * return ret
+ * else:
+ * last_image_config = config
+ * return SUCCESS
+ *
+ * A special case being considered is if the content of @config is identical to the last
+ * successfully parsed image config, this function will return 0 immediately without removing /
+ * adding any mapping.
+ *
+ * Returns 0 on success. Otherwise an errno, which usually would be the one returned by
+ * gcip_image_config_ops.map. On error no new mapping specified in @config is added.
+ */
+int gcip_image_config_parse(struct gcip_image_config_parser *parser,
+ struct gcip_image_config *config);
+
+/*
+ * Clears the mappings specified in the last image config.
+ *
+ * It's valid to call this function without any image config has been successfully parsed, or when
+ * the last image config is already cleared. In which case this function works as no-op.
+ */
+void gcip_image_config_clear(struct gcip_image_config_parser *parser);
+
+/*
+ * Returns whether the privilege level specified by @config is non-secure.
+ */
+static inline bool gcip_image_config_is_ns(struct gcip_image_config *config)
+{
+ return config->privilege_level == GCIP_FW_PRIV_LEVEL_NS;
+}
+
+#endif /* __GCIP_IMAGE_CONFIG_H__ */
diff --git a/gcip-kernel-driver/include/gcip/gcip-kci.h b/gcip-kernel-driver/include/gcip/gcip-kci.h
new file mode 100644
index 0000000..eb83550
--- /dev/null
+++ b/gcip-kernel-driver/include/gcip/gcip-kci.h
@@ -0,0 +1,394 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Kernel Control Interface, implements the protocol between AP kernel and GCIP firmware.
+ *
+ * Copyright (C) 2022 Google LLC
+ */
+
+#ifndef __GCIP_KCI_H__
+#define __GCIP_KCI_H__
+
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+
+#include <gcip/gcip-mailbox.h>
+
+/*
+ * The status field in a firmware response is set to this by us when the response is fetched from
+ * the queue.
+ */
+#define GCIP_KCI_STATUS_OK GCIP_MAILBOX_STATUS_OK
+/*
+ * gcip_kci#mailbox.wait_list uses this value to record the status of responses that haven't been
+ * received yet.
+ */
+#define GCIP_KCI_STATUS_WAITING_RESPONSE GCIP_MAILBOX_STATUS_WAITING_RESPONSE
+/*
+ * Used when an expected response is not received, see the documentation of
+ * gcip_mailbox_handle_response() for details.
+ */
+#define GCIP_KCI_STATUS_NO_RESPONSE GCIP_MAILBOX_STATUS_NO_RESPONSE
+
+/*
+ * Command/response sequence numbers capped at half the range of the 64-bit value range. The second
+ * half is reserved for incoming requests from firmware.
+ * These are tagged with the MSB set.
+ */
+#define GCIP_KCI_REVERSE_FLAG (0x8000000000000000ull)
+
+/* Command/response queue elements for KCI. */
+
+struct gcip_kci_dma_descriptor {
+ u64 address;
+ u32 size;
+ u32 flags;
+};
+
+struct gcip_kci_command_element {
+ /*
+ * Set by gcip_kci_push_cmd() in case of KCI cmd and copied from the RKCI cmd in case of
+ * RKCI response.
+ */
+ u64 seq;
+ u16 code;
+ u16 reserved[3]; /* Explicit padding, does not affect alignment. */
+ struct gcip_kci_dma_descriptor dma;
+} __packed;
+
+struct gcip_kci_response_element {
+ u64 seq;
+ u16 code;
+ /*
+ * Reserved for host use - firmware can't touch this.
+ * If a value is written here it will be discarded and overwritten during response
+ * processing. However, when repurposed as an RKCI command, the FW can set this field.
+ */
+ u16 status;
+ /*
+ * Return value is not currently needed by KCI command responses.
+ * For reverse KCI commands this is set as value2.
+ */
+ u32 retval;
+} __packed;
+
+/*
+ * Definition of code in command elements.
+ * Code for KCI is a 16-bit unsigned integer.
+ */
+enum gcip_kci_code {
+ GCIP_KCI_CODE_ACK = 0,
+ GCIP_KCI_CODE_UNMAP_BUFFER = 1,
+ GCIP_KCI_CODE_MAP_LOG_BUFFER = 2,
+ GCIP_KCI_CODE_JOIN_GROUP = 3,
+ GCIP_KCI_CODE_LEAVE_GROUP = 4,
+ GCIP_KCI_CODE_MAP_TRACE_BUFFER = 5,
+ GCIP_KCI_CODE_SHUTDOWN = 7,
+ GCIP_KCI_CODE_GET_DEBUG_DUMP = 8,
+ GCIP_KCI_CODE_OPEN_DEVICE = 9,
+ GCIP_KCI_CODE_CLOSE_DEVICE = 10,
+ GCIP_KCI_CODE_FIRMWARE_INFO = 11,
+ GCIP_KCI_CODE_GET_USAGE = 12,
+ GCIP_KCI_CODE_NOTIFY_THROTTLING = 13,
+ GCIP_KCI_CODE_BLOCK_BUS_SPEED_CONTROL = 14,
+ GCIP_KCI_CODE_ALLOCATE_VMBOX = 15,
+ GCIP_KCI_CODE_RELEASE_VMBOX = 16,
+ GCIP_KCI_CODE_LINK_OFFLOAD_VMBOX = 17,
+ GCIP_KCI_CODE_UNLINK_OFFLOAD_VMBOX = 18,
+ GCIP_KCI_CODE_FIRMWARE_TRACING_LEVEL = 19,
+ GCIP_KCI_CODE_THERMAL_CONTROL = 20,
+
+ GCIP_KCI_CODE_RKCI_ACK = 256,
+};
+
+/*
+ * Definition of reverse KCI request code ranges.
+ * Code for reverse KCI is a 16-bit unsigned integer.
+ * The first half is reserved for the chip specific codes and the generic codes can use the
+ * second half.
+ */
+enum gcip_reverse_kci_code {
+ GCIP_RKCI_CHIP_CODE_FIRST = 0,
+ GCIP_RKCI_PM_QOS_REQUEST,
+ GCIP_RKCI_CHANGE_BTS_SCENARIO,
+ GCIP_RKCI_PM_QOS_BTS_REQUEST,
+ GCIP_RKCI_DSP_CORE_TELEMETRY_TRY_READ,
+ GCIP_RKCI_CLIENT_FATAL_ERROR_NOTIFY,
+ GCIP_RKCI_CHIP_CODE_LAST = 0x7FFF,
+ GCIP_RKCI_GENERIC_CODE_FIRST = 0x8000,
+ GCIP_RKCI_FIRMWARE_CRASH = GCIP_RKCI_GENERIC_CODE_FIRST + 0,
+ GCIP_RKCI_JOB_LOCKUP = GCIP_RKCI_GENERIC_CODE_FIRST + 1,
+ GCIP_RKCI_GENERIC_CODE_LAST = 0xFFFF,
+};
+
+/*
+ * Definition of code in response elements.
+ * It is a 16-bit unsigned integer.
+ */
+enum gcip_kci_error {
+ GCIP_KCI_ERROR_OK = 0, /* Not an error; returned on success. */
+ GCIP_KCI_ERROR_CANCELLED = 1,
+ GCIP_KCI_ERROR_UNKNOWN = 2,
+ GCIP_KCI_ERROR_INVALID_ARGUMENT = 3,
+ GCIP_KCI_ERROR_DEADLINE_EXCEEDED = 4,
+ GCIP_KCI_ERROR_NOT_FOUND = 5,
+ GCIP_KCI_ERROR_ALREADY_EXISTS = 6,
+ GCIP_KCI_ERROR_PERMISSION_DENIED = 7,
+ GCIP_KCI_ERROR_RESOURCE_EXHAUSTED = 8,
+ GCIP_KCI_ERROR_FAILED_PRECONDITION = 9,
+ GCIP_KCI_ERROR_ABORTED = 10,
+ GCIP_KCI_ERROR_OUT_OF_RANGE = 11,
+ GCIP_KCI_ERROR_UNIMPLEMENTED = 12,
+ GCIP_KCI_ERROR_INTERNAL = 13,
+ GCIP_KCI_ERROR_UNAVAILABLE = 14,
+ GCIP_KCI_ERROR_DATA_LOSS = 15,
+ GCIP_KCI_ERROR_UNAUTHENTICATED = 16,
+};
+
+/* Type of the chip of the offload vmbox to be linked. */
+enum gcip_kci_offload_chip_type {
+ GCIP_KCI_OFFLOAD_CHIP_TYPE_TPU = 0,
+};
+
+/*
+ * Reason for triggering the CMD doorbell.
+ * The CMD doorbell is triggered either when a CMD is pushed or the RESP that might blocks the FW is
+ * consumed.
+ */
+enum gcip_kci_doorbell_reason {
+ GCIP_KCI_PUSH_CMD,
+ GCIP_KCI_CONSUME_RESP,
+};
+
+/* Struct to hold a circular buffer for incoming KCI responses. */
+struct gcip_reverse_kci {
+ /* Reverse kci buffer head. */
+ unsigned long head;
+ /* Reverse kci buffer tail. */
+ unsigned long tail;
+ /*
+ * Maximum number of outstanding KCI requests from firmware.
+ * This is used to size a circular buffer, so it must be a power of 2.
+ */
+ u32 buffer_size;
+ struct gcip_kci_response_element *buffer;
+ /* Lock to push elements in the buffer from the interrupt handler. */
+ spinlock_t producer_lock;
+ /* Lock to pop elements from the buffer in the worker. */
+ spinlock_t consumer_lock;
+ /* Worker to handle responses. */
+ struct work_struct work;
+};
+
+struct gcip_kci;
+
+/*
+ * KCI operators.
+ * For in_interrupt() context, see the implementation of gcip_kci_handle_irq for details.
+ */
+struct gcip_kci_ops {
+ /* Mandatory. */
+ /*
+ * Gets the head of mailbox command queue.
+ * Context: normal.
+ */
+ u32 (*get_cmd_queue_head)(struct gcip_kci *kci);
+ /*
+ * Gets the tail of mailbox command queue.
+ * Context: normal.
+ */
+ u32 (*get_cmd_queue_tail)(struct gcip_kci *kci);
+ /*
+ * Increases the tail of mailbox command queue by @inc.
+ * Context: normal.
+ */
+ void (*inc_cmd_queue_tail)(struct gcip_kci *kci, u32 inc);
+
+ /*
+ * Gets the size of mailbox response queue.
+ * Context: normal.
+ */
+ u32 (*get_resp_queue_size)(struct gcip_kci *kci);
+ /*
+ * Gets the head of mailbox response queue.
+ * Context: normal and in_interrupt().
+ */
+ u32 (*get_resp_queue_head)(struct gcip_kci *kci);
+ /*
+ * Gets the tail of mailbox response queue.
+ * Context: normal and in_interrupt().
+ */
+ u32 (*get_resp_queue_tail)(struct gcip_kci *kci);
+ /*
+ * Increases the head of mailbox response queue by @inc.
+ * Context: normal and in_interrupt().
+ */
+ void (*inc_resp_queue_head)(struct gcip_kci *kci, u32 inc);
+ /*
+ * Rings the doorbell.
+ * Context: normal.
+ */
+ void (*trigger_doorbell)(struct gcip_kci *kci, enum gcip_kci_doorbell_reason);
+
+ /* Optional. */
+ /*
+ * Reverse KCI handler called by the worker. Only required if reverse kci is enabled.
+ * Context: normal.
+ */
+ void (*reverse_kci_handle_response)(struct gcip_kci *kci,
+ struct gcip_kci_response_element *resp);
+ /*
+ * Usage updater called by the worker.
+ * Context: normal.
+ */
+ int (*update_usage)(struct gcip_kci *kci);
+};
+
+struct gcip_kci {
+ /* Device used for logging and memory allocation. */
+ struct device *dev;
+ /* Mailbox used by KCI. */
+ struct gcip_mailbox mailbox;
+ /* Protects cmd_queue. */
+ struct mutex cmd_queue_lock;
+ /* Protects resp_queue. */
+ spinlock_t resp_queue_lock;
+ /* Queue for waiting for the response doorbell to be rung. */
+ wait_queue_head_t resp_doorbell_waitq;
+ /* Protects wait_list. */
+ spinlock_t wait_list_lock;
+ /* Worker of consuming responses. */
+ struct work_struct work;
+ /* Handler for reverse (firmware -> kernel) requests. */
+ struct gcip_reverse_kci rkci;
+ /* Worker that sends update usage KCI. */
+ struct work_struct usage_work;
+ /* KCI operators. */
+ const struct gcip_kci_ops *ops;
+ /* Private data. */
+ void *data;
+};
+
+/*
+ * Arguments for gcip_kci_init.
+ *
+ * For the following arguments, see struct gcip_kci and struct gcip_reverse_kci for details.
+ * : `dev`, `rkci_buffer_size`, `ops` and `data`.
+ *
+ * For the following arguments, see struct gcip_mailbox for details. They will be passed to the
+ * struct gcip_mailbox using struct gcip_mailbox_args internally.
+ * : `dev`, `cmd_queue`, `resp_queue`, `queue_wrap_bit` and `timeout`.
+ */
+struct gcip_kci_args {
+ struct device *dev;
+ void *cmd_queue;
+ void *resp_queue;
+ u32 queue_wrap_bit;
+ u32 rkci_buffer_size;
+ u32 timeout;
+ const struct gcip_kci_ops *ops;
+ void *data;
+};
+
+/* Initializes a KCI object. */
+int gcip_kci_init(struct gcip_kci *kci, const struct gcip_kci_args *args);
+
+/* Cancels KCI and reverse KCI workers and workers that may send KCIs. */
+void gcip_kci_cancel_work_queues(struct gcip_kci *kci);
+
+/*
+ * Release KCI.
+ * Caller must call gcip_kci_cancel_work_queues before calling gcip_kci_release.
+ */
+void gcip_kci_release(struct gcip_kci *kci);
+
+/*
+ * Pushes an element to cmd queue and waits for the response.
+ * Returns -ETIMEDOUT if no response is received within kci->mailbox.timeout.
+ *
+ * Returns the code of response, or a negative errno on error.
+ */
+int gcip_kci_send_cmd(struct gcip_kci *kci, struct gcip_kci_command_element *cmd);
+
+/*
+ * Pushes an element to cmd queue and waits for the response.
+ * Returns -ETIMEDOUT if no response is received within kci->mailbox.timeout msecs.
+ *
+ * Returns the code of response, or a negative errno on error.
+ * @resp is updated with the response, as to retrieve returned retval field.
+ */
+int gcip_kci_send_cmd_return_resp(struct gcip_kci *kci, struct gcip_kci_command_element *cmd,
+ struct gcip_kci_response_element *resp);
+
+/*
+ * Interrupt handler.
+ * This function should be called when the interrupt of KCI mailbox is fired.
+ */
+void gcip_kci_handle_irq(struct gcip_kci *kci);
+
+/*
+ * Schedules a usage update worker.
+ *
+ * For functions that don't require the usage to be updated immediately, use this function instead
+ * of update_usage in struct gcip_kci_ops.
+ */
+void gcip_kci_update_usage_async(struct gcip_kci *kci);
+
+/* Gets the KCI private data. */
+static inline void *gcip_kci_get_data(struct gcip_kci *kci)
+{
+ return kci->data;
+}
+
+/* Returns the element size according to @type. */
+static inline u32 gcip_kci_queue_element_size(enum gcip_mailbox_queue_type type)
+{
+ if (type == GCIP_MAILBOX_CMD_QUEUE)
+ return sizeof(struct gcip_kci_command_element);
+ else
+ return sizeof(struct gcip_kci_response_element);
+}
+
+static inline u64 gcip_kci_get_cur_seq(struct gcip_kci *kci)
+{
+ return gcip_mailbox_get_cur_seq(&kci->mailbox);
+}
+
+static inline struct gcip_kci_command_element *gcip_kci_get_cmd_queue(struct gcip_kci *kci)
+{
+ return (struct gcip_kci_command_element *)gcip_mailbox_get_cmd_queue(&kci->mailbox);
+}
+
+static inline struct gcip_kci_response_element *gcip_kci_get_resp_queue(struct gcip_kci *kci)
+{
+ return (struct gcip_kci_response_element *)gcip_mailbox_get_resp_queue(&kci->mailbox);
+}
+
+static inline u64 gcip_kci_get_queue_wrap_bit(struct gcip_kci *kci)
+{
+ return gcip_mailbox_get_queue_wrap_bit(&kci->mailbox);
+}
+
+static inline struct list_head *gcip_kci_get_wait_list(struct gcip_kci *kci)
+{
+ return gcip_mailbox_get_wait_list(&kci->mailbox);
+}
+
+static inline u32 gcip_kci_get_timeout(struct gcip_kci *kci)
+{
+ return gcip_mailbox_get_timeout(&kci->mailbox);
+}
+
+static inline unsigned long gcip_rkci_get_head(struct gcip_kci *kci)
+{
+ return kci->rkci.head;
+}
+
+static inline unsigned long gcip_rkci_get_tail(struct gcip_kci *kci)
+{
+ return kci->rkci.tail;
+}
+
+#endif /* __GCIP_KCI_H__ */
diff --git a/gcip-kernel-driver/include/gcip/gcip-mailbox.h b/gcip-kernel-driver/include/gcip/gcip-mailbox.h
new file mode 100644
index 0000000..c88d2d7
--- /dev/null
+++ b/gcip-kernel-driver/include/gcip/gcip-mailbox.h
@@ -0,0 +1,554 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * GCIP Mailbox Interface.
+ *
+ * Copyright (C) 2022 Google LLC
+ */
+
+#ifndef __GCIP_MAILBOX_H__
+#define __GCIP_MAILBOX_H__
+
+#include <linux/compiler.h>
+#include <linux/mutex.h>
+#include <linux/refcount.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
+
+#define CIRC_QUEUE_WRAPPED(idx, wrap_bit) ((idx)&wrap_bit)
+#define CIRC_QUEUE_INDEX_MASK(wrap_bit) (wrap_bit - 1)
+#define CIRC_QUEUE_VALID_MASK(wrap_bit) (CIRC_QUEUE_INDEX_MASK(wrap_bit) | wrap_bit)
+#define CIRC_QUEUE_REAL_INDEX(idx, wrap_bit) ((idx)&CIRC_QUEUE_INDEX_MASK(wrap_bit))
+
+#define CIRC_QUEUE_MAX_SIZE(wrap_bit) (wrap_bit - 1)
+
+/*
+ * The status field in a firmware response is set to this by us when the response is fetched from
+ * the queue.
+ */
+#define GCIP_MAILBOX_STATUS_OK (0)
+/*
+ * gcip_mailbox#wait_list uses this value to record the status of responses that haven't been
+ * received yet.
+ */
+#define GCIP_MAILBOX_STATUS_WAITING_RESPONSE (1)
+/*
+ * Used when an expected response is not received, see the documentation of
+ * gcip_mailbox_consume_wait_list() for details.
+ */
+#define GCIP_MAILBOX_STATUS_NO_RESPONSE (2)
+
+/* To specify the operation is toward cmd or resp queue. */
+enum gcip_mailbox_queue_type { GCIP_MAILBOX_CMD_QUEUE, GCIP_MAILBOX_RESP_QUEUE };
+
+/* Utilities of circular queue operations */
+
+/*
+ * Returns the number of elements in a circular queue given its @head, @tail,
+ * and @queue_size.
+ */
+static inline u32 gcip_circ_queue_cnt(u32 head, u32 tail, u32 queue_size, u32 wrap_bit)
+{
+ u32 ret;
+
+ if (CIRC_QUEUE_WRAPPED(tail, wrap_bit) != CIRC_QUEUE_WRAPPED(head, wrap_bit))
+ ret = queue_size - CIRC_QUEUE_REAL_INDEX(head, wrap_bit) +
+ CIRC_QUEUE_REAL_INDEX(tail, wrap_bit);
+ else
+ ret = tail - head;
+
+ if (unlikely(ret > queue_size))
+ return 0;
+
+ return ret;
+}
+
+/* Increases @index of a circular queue by @inc. */
+static inline u32 gcip_circ_queue_inc(u32 index, u32 inc, u32 queue_size, u32 wrap_bit)
+{
+ u32 new_index = CIRC_QUEUE_REAL_INDEX(index, wrap_bit) + inc;
+
+ if (unlikely(new_index >= queue_size))
+ return (index + inc - queue_size) ^ wrap_bit;
+ else
+ return index + inc;
+}
+
+/*
+ * Checks if @size is a valid circular queue size, which should be a positive
+ * number and less than or equal to MAX_QUEUE_SIZE.
+ */
+static inline bool gcip_valid_circ_queue_size(u32 size, u32 wrap_bit)
+{
+ if (!size || size > CIRC_QUEUE_MAX_SIZE(wrap_bit))
+ return false;
+ return true;
+}
+
+struct gcip_mailbox;
+
+/* Wrapper struct for responses consumed by a thread other than the one which sent the command. */
+struct gcip_mailbox_resp_awaiter {
+ /* Response. */
+ void *resp;
+ /* The work which will be executed when the timeout occurs. */
+ struct delayed_work timeout_work;
+ /*
+ * If this response times out, this pointer to the owning mailbox is
+ * needed to delete this response from the list of pending responses.
+ */
+ struct gcip_mailbox *mailbox;
+ /* User-defined data. */
+ void *data;
+ /* Reference count. */
+ refcount_t refs;
+ /*
+ * The callback for releasing the @data.
+ * It will be set as @release_awaiter_data of struct gcip_mailbox_ops.
+ */
+ void (*release_data)(void *data);
+};
+
+/*
+ * Mailbox operators.
+ * For in_interrupt() context, see the implementation of gcip_mailbox_handle_irq for details.
+ */
+struct gcip_mailbox_ops {
+ /* Mandatory. */
+ /*
+ * Gets the head of mailbox command queue.
+ * Context: normal.
+ */
+ u32 (*get_cmd_queue_head)(struct gcip_mailbox *mailbox);
+ /*
+ * Gets the tail of mailbox command queue.
+ * Context: normal.
+ */
+ u32 (*get_cmd_queue_tail)(struct gcip_mailbox *mailbox);
+ /*
+ * Increases the tail of mailbox command queue by @inc.
+ * Context: normal.
+ */
+ void (*inc_cmd_queue_tail)(struct gcip_mailbox *mailbox, u32 inc);
+ /*
+ * Acquires the lock of cmd_queue. If @try is true, "_trylock" functions can be used, but
+ * also it can be ignored. Returns 1 if succeed, 0 if failed. This callback will be called
+ * in the following situations.
+ * - Enqueue a command to the cmd_queue.
+ * The lock can be mutex lock or spin lock and it will be released by calling
+ * `release_cmd_queue_lock` callback.
+ * Context: normal.
+ */
+ int (*acquire_cmd_queue_lock)(struct gcip_mailbox *mailbox, bool try);
+ /*
+ * Releases the lock of cmd_queue which is acquired by calling `acquire_cmd_queue_lock`.
+ * Context: normal.
+ */
+ void (*release_cmd_queue_lock)(struct gcip_mailbox *mailbox);
+ /*
+ * Gets the sequence number of @cmd queue element.
+ * Context: normal.
+ */
+ u64 (*get_cmd_elem_seq)(struct gcip_mailbox *mailbox, void *cmd);
+ /*
+ * Sets the sequence number of @cmd queue element.
+ * Context: normal.
+ */
+ void (*set_cmd_elem_seq)(struct gcip_mailbox *mailbox, void *cmd, u64 seq);
+ /*
+ * Gets the code of @cmd queue element.
+ * Context: normal.
+ */
+ u32 (*get_cmd_elem_code)(struct gcip_mailbox *mailbox, void *cmd);
+
+ /*
+ * Gets the size of mailbox response queue.
+ * Context: normal.
+ */
+ u32 (*get_resp_queue_size)(struct gcip_mailbox *mailbox);
+ /*
+ * Gets the head of mailbox response queue.
+ * Context: normal and in_interrupt().
+ */
+ u32 (*get_resp_queue_head)(struct gcip_mailbox *mailbox);
+ /*
+ * Gets the tail of mailbox response queue.
+ * Context: normal and in_interrupt().
+ */
+ u32 (*get_resp_queue_tail)(struct gcip_mailbox *mailbox);
+ /*
+ * Increases the head of mailbox response queue by @inc.
+ * Context: normal and in_interrupt().
+ */
+ void (*inc_resp_queue_head)(struct gcip_mailbox *mailbox, u32 inc);
+ /*
+ * Acquires the lock of resp_queue. If @try is true, "_trylock" functions can be used, but
+ * also it can be ignored. Returns 1 if succeed, 0 if failed. This callback will be called
+ * in the following situations.
+ * - Fetch response(s) from the resp_queue.
+ * The lock can be a mutex lock or a spin lock. However, if @try is considered and the
+ * "_trylock" is used, it must be a spin lock only.
+ * The lock will be released by calling `release_resp_queue_lock` callback.
+ * Context: normal and in_interrupt().
+ */
+ int (*acquire_resp_queue_lock)(struct gcip_mailbox *mailbox, bool try);
+ /*
+ * Releases the lock of resp_queue which is acquired by calling `acquire_resp_queue_lock`.
+ * Context: normal and in_interrupt().
+ */
+ void (*release_resp_queue_lock)(struct gcip_mailbox *mailbox);
+ /*
+ * Gets the sequence number of @resp queue element.
+ * Context: normal and in_interrupt().
+ */
+ u64 (*get_resp_elem_seq)(struct gcip_mailbox *mailbox, void *resp);
+ /*
+ * Sets the sequence number of @resp queue element.
+ * Context: normal and in_interrupt().
+ */
+ void (*set_resp_elem_seq)(struct gcip_mailbox *mailbox, void *resp, u64 seq);
+ /*
+ * Gets the status of @resp queue element.
+ * Context: normal and in_interrupt().
+ */
+ u16 (*get_resp_elem_status)(struct gcip_mailbox *mailbox, void *resp);
+ /*
+ * Sets the status of @resp queue element.
+ * Context: normal and in_interrupt().
+ */
+ void (*set_resp_elem_status)(struct gcip_mailbox *mailbox, void *resp, u16 status);
+
+ /*
+ * Acquires the lock of wait_list. If @irqsave is true, "_irqsave" functions can be used to
+ * store the irq state to @flags, but also it can be ignored.
+ * This callback will be called in following situations.
+ * - Push a waiting response to the @mailbox->wait_list.
+ * - Delete a waiting response from the @mailbox->wait_list.
+ * - Handle an arrived response and delete it from the @mailbox->wait_list.
+ * - Flush the asynchronous responses in the @mailbox->wait_list when release the @mailbox.
+ * The lock can be a mutex lock or a spin lock. However, if @irqsave is considered and
+ * "_irqsave" is used, it must be spin lock only.
+ * The lock will be released by calling `release_wait_list_lock` callback.
+ * Context: normal and in_interrupt().
+ */
+ void (*acquire_wait_list_lock)(struct gcip_mailbox *mailbox, bool irqsave,
+ unsigned long *flags);
+ /*
+ * Releases the lock of wait_list which is acquired by calling `acquire_wait_list_lock`.
+ * If @irqsave is true, restores @flags from `acquire_wait_list_lock` to the irq state.
+ * Or it can be ignored, if @irqsave was not considered in the `acquire_wait_list_lock`.
+ * Context: normal and in_interrupt().
+ */
+ void (*release_wait_list_lock)(struct gcip_mailbox *mailbox, bool irqrestore,
+ unsigned long flags);
+
+ /* Optional. */
+ /*
+ * Waits for the cmd queue of @mailbox has a available space for putting the command. If
+ * the queue has a space, returns 0. Otherwise, returns error as non-zero value. It depends
+ * on the implementation details, but it is okay to return right away with error when the
+ * queue is full. If this callback returns an error, `gcip_mailbox_send_cmd` function or
+ * `gcip_mailbox_put_cmd` function will return that error too. This callback is called with
+ * the `cmd_queue_lock` being held.
+ *
+ * Note: if this callback is NULL, it will simply check the fullness of cmd_queue and
+ * return -EAGAIN error right away if it is full. Please refer the implementation of the
+ * `gcip_mailbox_enqueue_cmd` function.
+ *
+ * Context: normal.
+ */
+ int (*wait_for_cmd_queue_not_full)(struct gcip_mailbox *mailbox);
+ /*
+ * This callback will be called before putting the @resp into @mailbox->wait_list and
+ * putting @cmd of @resp into the command queue. After this callback returns, the consumer
+ * is able to start processing it and the mailbox is going to wait for it. Therefore, this
+ * callback is the final checkpoint of deciding whether it is good to wait for the response
+ * or not. If you don't want to wait for it, return a non-zero value error.
+ *
+ * If the implement side has its own wait queue, this callback is suitable to put @resp or
+ * @awaiter into that.
+ *
+ * If @resp is synchronous, @awaiter will be NULL.
+ *
+ * Context: normal.
+ */
+ int (*before_enqueue_wait_list)(struct gcip_mailbox *mailbox, void *resp,
+ struct gcip_mailbox_resp_awaiter *awaiter);
+ /*
+ * This callback will be called after putting the @cmd to the command queue. It can be used
+ * for triggering the doorbell. Also, @mailbox->cur_seq will be increased by the return
+ * value. If error occurs, returns negative value and @mailbox->cur_seq will not be changed
+ * in that case. If this callback is not defined, @mailbox->cur_seq will be increased by 1
+ * each time cmd enters the queue. This is called with the `cmd_queue_lock` being held.
+ * Context: normal.
+ */
+ int (*after_enqueue_cmd)(struct gcip_mailbox *mailbox, void *cmd);
+ /*
+ * This callback will be called after fetching responses. It can be used for triggering
+ * a signal to break up waiting consuming the response queue. This is called without
+ * holding any locks.
+ * - @num_resps: the number of fetched responses.
+ * Context: normal and in_interrupt().
+ */
+ void (*after_fetch_resps)(struct gcip_mailbox *mailbox, u32 num_resps);
+ /*
+ * Before handling each fetched responses, this callback will be called. If this callback
+ * is not defined or returns true, the mailbox will handle the @resp normally. If the @resp
+ * should not be handled, returns false. This is called without holding any locks.
+ * Context: normal and in_interrupt().
+ */
+ bool (*before_handle_resp)(struct gcip_mailbox *mailbox, const void *resp);
+ /*
+ * Handles the asynchronous response which arrives well. How to handle it depends on the
+ * chip implementation. However, @awaiter should be released by calling the
+ * `gcip_mailbox_release_awaiter` function when the kernel driver doesn't need
+ * @awaiter anymore. This is called with the `wait_list_lock` being held.
+ * Context: normal and in_interrupt().
+ */
+ void (*handle_awaiter_arrived)(struct gcip_mailbox *mailbox,
+ struct gcip_mailbox_resp_awaiter *awaiter);
+ /*
+ * Handles the timed out asynchronous response. How to handle it depends on the chip
+ * implementation. However, @awaiter should be released by calling the
+ * `gcip_mailbox_release_awaiter` function when the kernel driver doesn't need
+ * @awaiter anymore. This is called without holding any locks.
+ * Context: normal and in_interrupt().
+ */
+ void (*handle_awaiter_timedout)(struct gcip_mailbox *mailbox,
+ struct gcip_mailbox_resp_awaiter *awaiter);
+ /*
+ * Cleans up asynchronous response which is not arrived yet, but also not timed out.
+ * The @awaiter should be marked as unprocessable to make it not to be processed by
+ * the `handle_awaiter_arrived` or `handle_awaiter_timedout` callbacks in race
+ * conditions. Don't have to release @awaiter of this function by calling the
+ * `gcip_mailbox_release_awaiter` function. It will be released internally. This is
+ * called with the `wait_list_lock` being held.
+ * Context: normal.
+ */
+ void (*flush_awaiter)(struct gcip_mailbox *mailbox,
+ struct gcip_mailbox_resp_awaiter *awaiter);
+ /*
+ * Releases the @data which was passed to the `gcip_mailbox_put_cmd` function. This is
+ * called without holding any locks.
+ * Context: normal and in_interrupt().
+ */
+ void (*release_awaiter_data)(void *data);
+};
+
+struct gcip_mailbox {
+ /* Device used for logging and memory allocation. */
+ struct device *dev;
+ /* Warp bit for both cmd and resp queue. */
+ u64 queue_wrap_bit;
+ /* Cmd sequence number. */
+ u64 cur_seq;
+
+ /* Cmd queue pointer. */
+ void *cmd_queue;
+ /* Size of element of cmd queue. */
+ u32 cmd_elem_size;
+
+ /* Resp queue pointer. */
+ void *resp_queue;
+ /* Size of element of resp queue. */
+ u32 resp_elem_size;
+
+ /* List of commands that need to wait for responses. */
+ struct list_head wait_list;
+ /* Queue for waiting for the wait_list to be consumed. */
+ wait_queue_head_t wait_list_waitq;
+
+ /* Mailbox timeout in milliseconds. */
+ u32 timeout;
+ /* Mailbox operators. */
+ const struct gcip_mailbox_ops *ops;
+ /* User-defined data. */
+ void *data;
+
+ /*
+ * The flag to specify sequence numbers of command responses are not
+ * required to be in order.
+ */
+ bool ignore_seq_order;
+};
+
+/* Arguments for gcip_mailbox_init. See struct gcip_mailbox for details. */
+struct gcip_mailbox_args {
+ struct device *dev;
+ u32 queue_wrap_bit;
+
+ void *cmd_queue;
+ u32 cmd_elem_size;
+
+ void *resp_queue;
+ u32 resp_elem_size;
+
+ u32 timeout;
+ const struct gcip_mailbox_ops *ops;
+ void *data;
+
+ bool ignore_seq_order;
+};
+
+/* Initializes a mailbox object. */
+int gcip_mailbox_init(struct gcip_mailbox *mailbox, const struct gcip_mailbox_args *args);
+
+/* Releases a mailbox object which is initialized by gcip_mailbox_init */
+void gcip_mailbox_release(struct gcip_mailbox *mailbox);
+
+/*
+ * Fetches and handles responses, then wakes up threads that are waiting for a response.
+ * To consume response queue and get responses, this function should be used as deferred work
+ * such as `struct work_struct` or `struct kthread_work`.
+ *
+ * Note: this worker is scheduled in the IRQ handler, to prevent use-after-free or race-condition
+ * bugs, cancel all works before free the mailbox.
+ */
+void gcip_mailbox_consume_responses_work(struct gcip_mailbox *mailbox);
+
+/*
+ * Pushes an element to cmd queue and waits for the response (synchronous).
+ * Returns -ETIMEDOUT if no response is received within mailbox->timeout msecs.
+ *
+ * Returns the code of response, or a negative errno on error.
+ * @resp is updated with the response, as to retrieve returned retval field.
+ */
+int gcip_mailbox_send_cmd(struct gcip_mailbox *mailbox, void *cmd, void *resp);
+
+/*
+ * Executes @cmd command asynchronously. This function returns an instance of
+ * `struct gcip_mailbox_resp_awaiter` which handles the arrival and time-out of the response.
+ * The implementation side can cancel the asynchronous response by calling the
+ * `gcip_mailbox_cancel_awaiter` or `gcip_mailbox_cancel_awaiter_timeout` function with it.
+ *
+ * Arrived asynchronous response will be handled by `handle_awaiter_arrived` callback and timed out
+ * asynchronous response will be handled by `handle_awaiter_timedout` callback. Those callbacks
+ * will pass the @awaiter as a parameter which is the same with the return of this function.
+ * The response can be accessed from `resp` member of it. Also, the @data passed to this function
+ * can be accessed from `data` member variable of it. The @awaiter must be released by calling
+ * the `gcip_mailbox_release_awaiter` function when it is not needed anymore.
+ *
+ * If the mailbox is released before the response arrives, all the waiting asynchronous responses
+ * will be flushed. In this case, the `flush_awaiter` callback will be called for that response
+ * and @awaiter don't have to be released by the implementation side.
+ * (i.e, the `gcip_mailbox_release_awaiter` function will be called internally.)
+ *
+ * The caller defines the way of cleaning up the @data to the `release_awaiter_data` callback.
+ * This callback will be called when the `gcip_mailbox_release_awaiter` function is called or
+ * the response is flushed.
+ *
+ * If this function fails to request the command, it will return the error pointer. In this case,
+ * the caller should free @data explicitly. (i.e, the callback `release_awaiter_data` will not
+ * be.)
+ *
+ * Note: the asynchronous responses fetched from @resp_queue should be released by calling the
+ * `gcip_mailbox_release_awaiter` function.
+ *
+ * Note: if the life cycle of the mailbox is longer than the caller part, you should make sure
+ * that the callbacks don't access the variables of caller part after the release of it.
+ *
+ * Note: if you don't need the result of the response (e.g., if you pass @resp as NULL), you
+ * can release the returned awaiter right away by calling the `gcip_mailbox_release_awaiter`
+ * function.
+ */
+struct gcip_mailbox_resp_awaiter *gcip_mailbox_put_cmd(struct gcip_mailbox *mailbox, void *cmd,
+ void *resp, void *data);
+
+/*
+ * Cancels awaiting the asynchronous response.
+ * This function will remove @awaiter from the waiting list to make it not to be handled by the
+ * arrived callback. Also, it will cancel the timeout work of @awaiter synchronously. Therefore,
+ * AFTER the return of this function, you can guarantee that arrived or timedout callback will
+ * not be called for @awaiter.
+ *
+ * However, by the race condition, you must note that arrived or timedout callback can be executed
+ * BEFORE this function returns. (i.e, this function and arrived/timedout callback is called at the
+ * same time but the callback acquired the lock earlier.)
+ *
+ * Note: this function will cancel or wait for the completion of arrived or timedout callbacks
+ * synchronously. Therefore, make sure that the caller side doesn't hold any locks which can be
+ * acquired by the arrived or timedout callbacks.
+ *
+ * If you already got a response of @awaiter and want to ensure that timedout handler is finished,
+ * you can use the `gcip_mailbox_cancel_awaiter_timeout` function instead.
+ */
+void gcip_mailbox_cancel_awaiter(struct gcip_mailbox_resp_awaiter *awaiter);
+
+/*
+ * Cancels the timeout work of the asynchronous response. In normally, the response arrives and
+ * the timeout is canceled, or the response timed out and the timeout handler executes. However,
+ * rarely, the response handler cancels the timeout handler while it has been already in progress.
+ * To handle this and ensure any in-process timeout handler has been able to exit cleanly, it is
+ * recommended to call this function after fetching the asynchronous response even though the
+ * response arrived successfully.
+ *
+ * Note: this function will cancel or wait for the completion of timedout callbacks synchronously.
+ * Therefore, make sure that the caller side doesn't hold any locks which can be acquired by the
+ * timedout callbacks.
+ *
+ * If you haven't gotten a response of @awaiter yet and want to make it not to be processed by
+ * arrived and timedout callbacks, use the `gcip_mailbox_cancel_awaiter` function.
+ */
+void gcip_mailbox_cancel_awaiter_timeout(struct gcip_mailbox_resp_awaiter *awaiter);
+
+/*
+ * Releases @awaiter. Every fetched (arrived or timed out) asynchronous responses should be
+ * released by calling this. It will call the `release_awaiter_data` callback internally.
+ */
+void gcip_mailbox_release_awaiter(struct gcip_mailbox_resp_awaiter *awaiter);
+
+/*
+ * Consume one response and handle it. This can be used for consuming one response quickly and then
+ * schedule `gcip_mailbox_consume_responses_work` work in the IRQ handler of mailbox.
+ */
+void gcip_mailbox_consume_one_response(struct gcip_mailbox *mailbox, void *resp);
+
+/* Getters for member variables of the `struct gcip_mailbox`. */
+
+static inline u64 gcip_mailbox_get_cur_seq(struct gcip_mailbox *mailbox)
+{
+ return mailbox->cur_seq;
+}
+
+static inline void *gcip_mailbox_get_cmd_queue(struct gcip_mailbox *mailbox)
+{
+ return mailbox->cmd_queue;
+}
+
+static inline u32 gcip_mailbox_get_cmd_elem_size(struct gcip_mailbox *mailbox)
+{
+ return mailbox->cmd_elem_size;
+}
+
+static inline void *gcip_mailbox_get_resp_queue(struct gcip_mailbox *mailbox)
+{
+ return mailbox->resp_queue;
+}
+
+static inline u32 gcip_mailbox_get_resp_elem_size(struct gcip_mailbox *mailbox)
+{
+ return mailbox->resp_elem_size;
+}
+
+static inline u64 gcip_mailbox_get_queue_wrap_bit(struct gcip_mailbox *mailbox)
+{
+ return mailbox->queue_wrap_bit;
+}
+
+static inline struct list_head *gcip_mailbox_get_wait_list(struct gcip_mailbox *mailbox)
+{
+ return &mailbox->wait_list;
+}
+
+static inline u32 gcip_mailbox_get_timeout(struct gcip_mailbox *mailbox)
+{
+ return mailbox->timeout;
+}
+
+static inline void *gcip_mailbox_get_data(struct gcip_mailbox *mailbox)
+{
+ return mailbox->data;
+}
+
+#endif /* __GCIP_MAILBOX_H__ */
diff --git a/gcip-kernel-driver/include/gcip/gcip-mem-pool.h b/gcip-kernel-driver/include/gcip/gcip-mem-pool.h
new file mode 100644
index 0000000..44ea5f5
--- /dev/null
+++ b/gcip-kernel-driver/include/gcip/gcip-mem-pool.h
@@ -0,0 +1,70 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * A simple memory allocator to help allocating reserved memory pools.
+ *
+ * Copyright (C) 2022 Google LLC
+ */
+
+#ifndef __GCIP_MEM_POOL_H__
+#define __GCIP_MEM_POOL_H__
+
+#include <linux/device.h>
+#include <linux/genalloc.h>
+#include <linux/types.h>
+
+struct gcip_mem_pool {
+ struct device *dev;
+ struct gen_pool *gen_pool;
+ unsigned long base_addr;
+ size_t granule;
+};
+
+/*
+ * Initializes the memory pool object.
+ *
+ * @pool: The memory pool object to be initialized.
+ * @dev: Used for logging only.
+ * @base_addr: The base address of the pool. Must be greater than 0 and a multiple of @granule.
+ * @size: The size of the pool. @size should be a multiple of @granule.
+ * @granule: The granule when invoking the allocator. Should be a power of 2.
+ *
+ * Returns 0 on success, a negative errno otherwise.
+ *
+ * Call gcip_mem_pool_exit() to release the resources of @pool.
+ */
+int gcip_mem_pool_init(struct gcip_mem_pool *pool, struct device *dev, unsigned long base_addr,
+ size_t size, size_t granule);
+/*
+ * Releases resources of @pool.
+ *
+ * Note: you must release (by calling gcip_mem_pool_free) all allocations before calling this
+ * function.
+ */
+void gcip_mem_pool_exit(struct gcip_mem_pool *pool);
+
+/*
+ * Allocates and returns the allocated address.
+ *
+ * @size: Size to be allocated.
+ *
+ * Returns the allocated address. Returns 0 on allocation failure.
+ */
+unsigned long gcip_mem_pool_alloc(struct gcip_mem_pool *pool, size_t size);
+/*
+ * Returns the address previously allocated by gcip_mem_pool_alloc().
+ *
+ * The size and address must match what previously passed to / returned by gcip_mem_pool_alloc().
+ */
+void gcip_mem_pool_free(struct gcip_mem_pool *pool, unsigned long addr, size_t size);
+
+/*
+ * Returns the offset between @addr and @base_addr passed to gcip_mem_pool_init().
+ *
+ * @addr must be a value returned by gcip_mem_pool_alloc().
+ */
+static inline size_t gcip_mem_pool_offset(struct gcip_mem_pool *pool, unsigned long addr)
+{
+ return addr - pool->base_addr;
+}
+
+#endif /* __GCIP_MEM_POOL_H__ */
diff --git a/gcip-kernel-driver/include/gcip/gcip-pm.h b/gcip-kernel-driver/include/gcip/gcip-pm.h
new file mode 100644
index 0000000..4842598
--- /dev/null
+++ b/gcip-kernel-driver/include/gcip/gcip-pm.h
@@ -0,0 +1,155 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Power management support for GCIP devices.
+ *
+ * Copyright (C) 2023 Google LLC
+ */
+
+#ifndef __GCIP_PM_H__
+#define __GCIP_PM_H__
+
+#include <linux/device.h>
+#include <linux/mutex.h>
+#include <linux/workqueue.h>
+
+struct gcip_pm {
+ struct device *dev;
+ /* Worker to handle async power down retry. */
+ struct delayed_work power_down_work;
+
+ /* Lock to protect the members listed below. */
+ struct mutex lock;
+ /* Power up counter. Protected by @lock */
+ int count;
+ /* Flag indicating a deferred power down is pending. Protected by @lock */
+ bool power_down_pending;
+
+ /* Callbacks. See struct gcip_pm_args. */
+ void *data;
+ int (*after_create)(void *data);
+ void (*before_destroy)(void *data);
+ int (*power_up)(void *data);
+ int (*power_down)(void *data);
+};
+
+struct gcip_pm_args {
+ /* Device struct for logging. */
+ struct device *dev;
+
+ /* Private data for the callbacks listed below. */
+ void *data;
+ /*
+ * Device-specific power up.
+ * Called with @pm->lock hold and nesting is handled at generic layer.
+ */
+ int (*power_up)(void *data);
+ /*
+ * Device-specific power down.
+ * Called with @pm->lock hold and nesting is handled at generic layer.
+ * Returning -EAGAIN will trigger a retry after GCIP_ASYNC_POWER_DOWN_RETRY_DELAY ms.
+ */
+ int (*power_down)(void *data);
+ /* Optional. For initial setup after the interface initialized. */
+ int (*after_create)(void *data);
+ /* Optional. For clean-up before the interface is destroyed. */
+ void (*before_destroy)(void *data);
+};
+
+/* Allocates and initializes a power management interface for the GCIP device. */
+struct gcip_pm *gcip_pm_create(const struct gcip_pm_args *args);
+
+/* Destroys and frees the power management interface. */
+void gcip_pm_destroy(struct gcip_pm *pm);
+
+/*
+ * These mimic the pm_runtime_{get|put} functions to keep a reference count of requests in order to
+ * keep the device up and turn it off.
+ * Note that we don't keep track of system suspend/resume state since the system power management
+ * will respect the parent-child sequencing to use a bottom-up order to suspend devices and a
+ * top-down order to resume devices. No one would have the ability to acquire or release a wakelock
+ * when the device is suspending or resuming.
+ */
+
+/*
+ * Increases @pm->count if the device is already powered on.
+ *
+ * Caller should call gcip_pm_put() to decrease @pm->count if this function returns 0.
+ * If @blocking is true, it will wait until the ongoing power state transition finishes (i.e.,
+ * gcip_pm_{get,put,shutdown} called by other thread returns) and then check the power state.
+ * If @blocking is false, return -EAGAIN immediately when there is a ongoing power state transition.
+ *
+ * Returns 0 on success; otherwise -EAGAIN if the device is off or in power state transition when
+ * @blocking is false.
+ */
+int gcip_pm_get_if_powered(struct gcip_pm *pm, bool blocking);
+
+/*
+ * Increases @pm->count and powers up the device if previous @pm->count was zero.
+ *
+ * Returns 0 on success; otherwise negative error values.
+ */
+int gcip_pm_get(struct gcip_pm *pm);
+
+/*
+ * Decreases @pm->count and powers off the device if @pm->count reaches zero.
+ * If .power_down fails, async work will be scheduled to retry after
+ * GCIP_ASYNC_POWER_DOWN_RETRY_DELAY ms.
+ */
+void gcip_pm_put(struct gcip_pm *pm);
+
+/* Gets the power up counter. Note that this is checked without PM lock. */
+int gcip_pm_get_count(struct gcip_pm *pm);
+
+/* Checks if device is already on. Note that this is checked without PM lock. */
+bool gcip_pm_is_powered(struct gcip_pm *pm);
+
+/* Shuts down the device if @pm->count equals to 0 or @force is true. */
+void gcip_pm_shutdown(struct gcip_pm *pm, bool force);
+
+/* Make sure @pm->lock is hold. */
+static inline void gcip_pm_lockdep_assert_held(struct gcip_pm *pm)
+{
+ if (!pm)
+ return;
+
+ lockdep_assert_held(&pm->lock);
+}
+
+/*
+ * Lock the PM lock.
+ * Since all the PM requests will be blocked until gcip_pm_unlock is called, one should use the
+ * gcip_pm_{get,get_if_powered,put} if possible and uses this only if a power state transition can
+ * not be triggered, e.g., in a workqueue that will be canceled during power off or crash handler.
+ */
+static inline void gcip_pm_lock(struct gcip_pm *pm)
+{
+ if (!pm)
+ return;
+
+ mutex_lock(&pm->lock);
+}
+
+/*
+ * Lock the PM lock.
+ * Same as gcip_pm_lock, but returns 1 if the lock has been acquired successfully, and 0 on
+ * contention.
+ */
+static inline int gcip_pm_trylock(struct gcip_pm *pm)
+{
+ if (!pm)
+ return 1;
+
+ return mutex_trylock(&pm->lock);
+}
+
+/* Unlock the PM lock. */
+static inline void gcip_pm_unlock(struct gcip_pm *pm)
+{
+ if (!pm)
+ return;
+
+ lockdep_assert_held(&pm->lock);
+ mutex_unlock(&pm->lock);
+}
+
+#endif /* __GCIP_PM_H__ */
diff --git a/gcip-kernel-driver/include/gcip/gcip-telemetry.h b/gcip-kernel-driver/include/gcip/gcip-telemetry.h
new file mode 100644
index 0000000..ad26ee9
--- /dev/null
+++ b/gcip-kernel-driver/include/gcip/gcip-telemetry.h
@@ -0,0 +1,124 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * GCIP telemetry: logging and tracing.
+ *
+ * Copyright (C) 2022 Google LLC
+ */
+
+#ifndef __GCIP_TELEMETRY_H__
+#define __GCIP_TELEMETRY_H__
+
+#include <linux/device.h>
+#include <linux/eventfd.h>
+#include <linux/mutex.h>
+#include <linux/rwlock_types.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+/* Log level codes used by gcip firmware. */
+#define GCIP_FW_LOG_LEVEL_VERBOSE (2)
+#define GCIP_FW_LOG_LEVEL_DEBUG (1)
+#define GCIP_FW_LOG_LEVEL_INFO (0)
+#define GCIP_FW_LOG_LEVEL_WARN (-1)
+#define GCIP_FW_LOG_LEVEL_ERROR (-2)
+#define GCIP_FW_LOG_LEVEL_FATAL (-3)
+
+#define GCIP_FW_DMESG_LOG_LEVEL (GCIP_FW_LOG_LEVEL_WARN)
+
+/* When log data arrives, recheck for more log data after this delay. */
+#define GCIP_TELEMETRY_LOG_RECHECK_DELAY 200 /* ms */
+
+enum gcip_telemetry_state {
+ GCIP_TELEMETRY_DISABLED = 0,
+ GCIP_TELEMETRY_ENABLED = 1,
+ GCIP_TELEMETRY_INVALID = -1,
+};
+
+/* To specify the target of operation. */
+enum gcip_telemetry_type {
+ GCIP_TELEMETRY_LOG = 0,
+ GCIP_TELEMETRY_TRACE = 1,
+};
+
+struct gcip_telemetry_header {
+ u32 head;
+ u32 size;
+ u32 reserved0[14]; /* Place head and tail into different cache lines */
+ u32 tail;
+ u32 entries_dropped; /* Number of entries dropped due to buffer full */
+ u32 reserved1[14]; /* Pad to 128 bytes in total */
+};
+
+struct gcip_log_entry_header {
+ s16 code;
+ u16 length;
+ u64 timestamp;
+ u16 crc16;
+} __packed;
+
+struct gcip_telemetry {
+ /* Device used for logging and memory allocation. */
+ struct device *dev;
+
+ /*
+ * State transitioning is to prevent racing in IRQ handlers. e.g. the interrupt comes when
+ * the kernel is releasing buffers.
+ */
+ enum gcip_telemetry_state state;
+ spinlock_t state_lock; /* protects state */
+
+ struct gcip_telemetry_header *header;
+
+ struct eventfd_ctx *ctx; /* signal this to notify the runtime */
+ rwlock_t ctx_lock; /* protects ctx */
+ const char *name; /* for debugging */
+
+ struct work_struct work; /* worker for handling data */
+ /* Fallback function to call for default log/trace handling. */
+ void (*fallback_fn)(struct gcip_telemetry *tel);
+ struct mutex mmap_lock; /* protects mmapped_count */
+ long mmapped_count; /* number of VMAs that are mapped to this telemetry buffer */
+};
+
+struct gcip_kci;
+
+struct gcip_telemetry_kci_args {
+ struct gcip_kci *kci;
+ u64 addr;
+ u32 size;
+};
+
+/* Sends telemetry KCI through send kci callback and args. */
+int gcip_telemetry_kci(struct gcip_telemetry *tel,
+ int (*send_kci)(struct gcip_telemetry_kci_args *),
+ struct gcip_telemetry_kci_args *args);
+/* Sets the eventfd for telemetry. */
+int gcip_telemetry_set_event(struct gcip_telemetry *tel, u32 eventfd);
+/* Unsets the eventfd for telemetry. */
+void gcip_telemetry_unset_event(struct gcip_telemetry *tel);
+/* Fallback to log messages from host CPU to dmesg. */
+void gcip_telemetry_fw_log(struct gcip_telemetry *log);
+/* Fallback to consumes the trace buffer. */
+void gcip_telemetry_fw_trace(struct gcip_telemetry *trace);
+/* Interrupt handler to schedule the worker when the buffer is not empty. */
+void gcip_telemetry_irq_handler(struct gcip_telemetry *tel);
+/* Increases the telemetry mmap count. */
+void gcip_telemetry_inc_mmap_count(struct gcip_telemetry *tel, int dif);
+/* Mmaps the telemetry buffer through mmap callback and args. */
+int gcip_telemetry_mmap_buffer(struct gcip_telemetry *tel, int (*mmap)(void *), void *args);
+/*
+ * Initializes struct gcip_telemetry.
+ *
+ * @vaddr: Virtual address of the queue buffer.
+ * @size: Size of the queue buffer. Must be power of 2 and greater than the size of struct
+ * gcip_telemetry_header.
+ * @fallback_fn: Fallback function to call for default log/trace handling.
+ */
+int gcip_telemetry_init(struct device *dev, struct gcip_telemetry *tel, const char *name,
+ void *vaddr, const size_t size,
+ void (*fallback_fn)(struct gcip_telemetry *));
+/* Exits and sets the telemetry state to GCIP_TELEMETRY_INVALID. */
+void gcip_telemetry_exit(struct gcip_telemetry *tel);
+
+#endif /* __GCIP_TELEMETRY_H__ */
diff --git a/gcip-kernel-driver/include/gcip/gcip-thermal.h b/gcip-kernel-driver/include/gcip/gcip-thermal.h
new file mode 100644
index 0000000..7c9ebc4
--- /dev/null
+++ b/gcip-kernel-driver/include/gcip/gcip-thermal.h
@@ -0,0 +1,118 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Thermal management support for GCIP devices.
+ *
+ * Copyright (C) 2023 Google LLC
+ */
+
+#ifndef __GCIP_THERMAL_H__
+#define __GCIP_THERMAL_H__
+
+#include <linux/device.h>
+#include <linux/mutex.h>
+#include <linux/notifier.h>
+#include <linux/thermal.h>
+
+#define GCIP_THERMAL_TABLE_SIZE_NAME "gcip-dvfs-table-size"
+#define GCIP_THERMAL_TABLE_NAME "gcip-dvfs-table"
+#define GCIP_THERMAL_MAX_NUM_STATES 10
+
+enum gcip_thermal_voter {
+ GCIP_THERMAL_COOLING_DEVICE,
+ GCIP_THERMAL_SYSFS,
+ GCIP_THERMAL_NOTIFIER_BLOCK,
+
+ /* Keeps as the last entry for the total number of voters. */
+ GCIP_THERMAL_MAX_NUM_VOTERS,
+};
+
+struct gcip_thermal {
+ struct device *dev;
+ struct thermal_cooling_device *cdev;
+ struct notifier_block nb;
+ struct dentry *dentry;
+ struct gcip_pm *pm;
+
+ /*
+ * Lock to protect the struct members listed below.
+ *
+ * Note that since the request of thermal state adjusting might happen during power state
+ * transitions (i.e., another thread calling gcip_thermal_restore_on_powering() with pm lock
+ * held), one must either use the non-blocking gcip_pm_get_if_powered() or make sure there
+ * won't be any new power transition after holding this thermal lock to prevent deadlock.
+ */
+ struct mutex lock;
+ unsigned long num_states;
+ unsigned long state;
+ unsigned long vote[GCIP_THERMAL_MAX_NUM_VOTERS];
+ bool device_suspended;
+ bool enabled;
+
+ /* Private data. See struct gcip_thermal_args.*/
+ void *data;
+
+ /* Callbacks. See struct gcip_thermal_args. */
+ int (*get_rate)(void *data, unsigned long *rate);
+ int (*set_rate)(void *data, unsigned long rate);
+ int (*control)(void *data, bool enable);
+};
+
+/* Arguments for devm_gcip_thermal_create. */
+struct gcip_thermal_args {
+ /* Device struct of GCIP device. */
+ struct device *dev;
+ /* GCIP power management. */
+ struct gcip_pm *pm;
+ /* Top-level debugfs directory for the device. */
+ struct dentry *dentry;
+ /* Name of the thermal cooling-device node in device tree. */
+ const char *node_name;
+ /* Thermal cooling device type for thermal_of_cooling_device_register() . */
+ const char *type;
+ /* Private data for callbacks listed below. */
+ void *data;
+ /*
+ * Callbacks listed below are called only if the device is powered and with the guarantee
+ * that there won't be any new power transition during the call (i.e., after
+ * gcip_pm_get_if_powered() succeeds or during the power up triggered by gcip_pm_get())
+ * to prevent deadlock since they are called with thermal lock held. See the note about
+ * thermal lock in struct gcip_thermal.
+ */
+ /* Callback to get the device clock rate. */
+ int (*get_rate)(void *data, unsigned long *rate);
+ /*
+ * Callback to set the device clock rate.
+ * Might be called with pm lock held in gcip_thermal_restore_on_powering().
+ */
+ int (*set_rate)(void *data, unsigned long rate);
+ /*
+ * Callback to enable/disable the thermal control.
+ * Might be called with pm lock held in gcip_thermal_restore_on_powering().
+ */
+ int (*control)(void *data, bool enable);
+};
+
+/* Gets the notifier_block struct for thermal throttling requests. */
+struct notifier_block *gcip_thermal_get_notifier_block(struct gcip_thermal *thermal);
+/* Allocates and initializes GCIP thermal struct. */
+struct gcip_thermal *gcip_thermal_create(const struct gcip_thermal_args *args);
+/* Destroys and frees GCIP thermal struct. */
+void gcip_thermal_destroy(struct gcip_thermal *thermal);
+/* Suspends the device due to thermal request. */
+int gcip_thermal_suspend_device(struct gcip_thermal *thermal);
+/* Resumes the device and restores previous thermal state. */
+int gcip_thermal_resume_device(struct gcip_thermal *thermal);
+/*
+ * Checks whether the device is suspended by thermal.
+ * Note that it's checked without thermal lock and state might change subsequently.
+ */
+bool gcip_thermal_is_device_suspended(struct gcip_thermal *thermal);
+/*
+ * Restores the previous thermal state.
+ *
+ * This function is designed to restore the thermal state during power management calls and thus it
+ * assumes the caller holds the pm lock.
+ */
+int gcip_thermal_restore_on_powering(struct gcip_thermal *thermal);
+
+#endif /* __GCIP_THERMAL_H__ */
diff --git a/gsx01-mailbox-driver.c b/gsx01-mailbox-driver.c
new file mode 100644
index 0000000..9876998
--- /dev/null
+++ b/gsx01-mailbox-driver.c
@@ -0,0 +1,70 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * GXP hardware-based mailbox csr driver implementation for GSX01.
+ *
+ * Copyright (C) 2022 Google LLC
+ */
+
+#include <asm/barrier.h>
+#include <linux/bitops.h>
+#include <linux/interrupt.h>
+#include <linux/kthread.h>
+#include <linux/of_irq.h>
+#include <linux/spinlock.h>
+
+#include "gxp-config.h"
+#include "gxp-mailbox-driver.h"
+#include "gxp-mailbox.h"
+
+#include "gxp-mailbox-driver.c"
+
+static u32 csr_read(struct gxp_mailbox *mailbox, uint reg_offset)
+{
+ return readl(mailbox->csr_reg_base + reg_offset);
+}
+
+static void csr_write(struct gxp_mailbox *mailbox, uint reg_offset, u32 value)
+{
+ writel(value, mailbox->csr_reg_base + reg_offset);
+}
+
+void gxp_mailbox_reset_hw(struct gxp_mailbox *mailbox)
+{
+ csr_write(mailbox, MBOX_MCUCTLR_OFFSET, 1);
+}
+
+void gxp_mailbox_generate_device_interrupt(struct gxp_mailbox *mailbox,
+ u32 int_mask)
+{
+ /*
+ * Ensure all memory writes have been committed to memory before
+ * signalling to the device to read from them. This avoids the scenario
+ * where the interrupt trigger write gets delivered to the MBX HW before
+ * the DRAM transactions made it to DRAM since they're Normal
+ * transactions and can be re-ordered and backed off behind other
+ * transfers.
+ */
+ wmb();
+
+ csr_write(mailbox, MBOX_INTGR0_OFFSET, int_mask);
+}
+
+u32 gxp_mailbox_get_device_mask_status(struct gxp_mailbox *mailbox)
+{
+ return csr_read(mailbox, MBOX_INTMSR0_OFFSET);
+}
+
+void gxp_mailbox_clear_host_interrupt(struct gxp_mailbox *mailbox, u32 int_mask)
+{
+ csr_write(mailbox, MBOX_INTCR1_OFFSET, int_mask);
+}
+
+void gxp_mailbox_mask_host_interrupt(struct gxp_mailbox *mailbox, u32 int_mask)
+{
+ csr_write(mailbox, MBOX_INTMR1_OFFSET, int_mask);
+}
+
+u32 gxp_mailbox_get_host_mask_status(struct gxp_mailbox *mailbox)
+{
+ return csr_read(mailbox, MBOX_INTMSR1_OFFSET);
+}
diff --git a/gxp-bpm.c b/gxp-bpm.c
index 50a41f1..90cf0e9 100644
--- a/gxp-bpm.c
+++ b/gxp-bpm.c
@@ -25,32 +25,32 @@ void gxp_bpm_configure(struct gxp_dev *gxp, u8 core, u32 bpm_offset, u32 event)
{
u32 val = ((event & BPM_EVENT_TYPE_MASK) << BPM_EVENT_TYPE_BIT) |
BPM_ENABLE;
- u32 bpm_base = GXP_REG_INST_BPM + bpm_offset;
+ u32 bpm_base = GXP_CORE_REG_INST_BPM(core) + bpm_offset;
/* Configure event */
- gxp_write_32_core(gxp, core, bpm_base + BPM_CNTR_CONFIG_OFFSET, val);
+ gxp_write_32(gxp, bpm_base + BPM_CNTR_CONFIG_OFFSET, val);
/* Arm counter */
- gxp_write_32_core(gxp, core, bpm_base + BPM_CONFIG_OFFSET, BPM_ENABLE);
+ gxp_write_32(gxp, bpm_base + BPM_CONFIG_OFFSET, BPM_ENABLE);
}
void gxp_bpm_start(struct gxp_dev *gxp, u8 core)
{
- gxp_write_32_core(gxp, core, GXP_REG_PROFILING_CONDITION,
- BPM_ENABLE << BPM_START_BIT);
+ gxp_write_32(gxp, GXP_CORE_REG_PROFILING_CONDITION(core),
+ BPM_ENABLE << BPM_START_BIT);
}
void gxp_bpm_stop(struct gxp_dev *gxp, u8 core)
{
- gxp_write_32_core(gxp, core, GXP_REG_PROFILING_CONDITION,
- BPM_ENABLE << BPM_STOP_BIT);
+ gxp_write_32(gxp, GXP_CORE_REG_PROFILING_CONDITION(core),
+ BPM_ENABLE << BPM_STOP_BIT);
}
u32 gxp_bpm_read_counter(struct gxp_dev *gxp, u8 core, u32 bpm_offset)
{
- u32 bpm_base = GXP_REG_INST_BPM + bpm_offset;
+ u32 bpm_base = GXP_CORE_REG_INST_BPM(core) + bpm_offset;
/* Disarm counter */
- gxp_write_32_core(gxp, core, bpm_base + BPM_CONFIG_OFFSET, BPM_DISABLE);
+ gxp_write_32(gxp, bpm_base + BPM_CONFIG_OFFSET, BPM_DISABLE);
/* Read final counter value */
- return gxp_read_32_core(gxp, core, bpm_base + BPM_SNAPSHOT_CNTR_OFFSET);
+ return gxp_read_32(gxp, bpm_base + BPM_SNAPSHOT_CNTR_OFFSET);
}
diff --git a/gxp-client.c b/gxp-client.c
index c9184d7..a776542 100644
--- a/gxp-client.c
+++ b/gxp-client.c
@@ -9,12 +9,14 @@
#include <linux/slab.h>
#include <linux/types.h>
+#include <gcip/gcip-pm.h>
+
#include "gxp-client.h"
#include "gxp-dma.h"
#include "gxp-internal.h"
#include "gxp-pm.h"
#include "gxp-vd.h"
-#include "gxp-wakelock.h"
+#include "gxp.h"
struct gxp_client *gxp_client_create(struct gxp_dev *gxp)
{
@@ -25,13 +27,13 @@ struct gxp_client *gxp_client_create(struct gxp_dev *gxp)
return ERR_PTR(-ENOMEM);
client->gxp = gxp;
- init_rwsem(&client->semaphore);
+ lockdep_register_key(&client->key);
+ __init_rwsem(&client->semaphore, "&client->semaphore", &client->key);
client->has_block_wakelock = false;
client->has_vd_wakelock = false;
- client->requested_power_state = AUR_OFF;
- client->requested_memory_power_state = 0;
+ client->requested_states = off_states;
client->vd = NULL;
- client->requested_low_clkmux = false;
+
return client;
}
@@ -40,37 +42,290 @@ void gxp_client_destroy(struct gxp_client *client)
struct gxp_dev *gxp = client->gxp;
int core;
- down_write(&gxp->vd_semaphore);
-
- if (client->vd && client->vd->state != GXP_VD_OFF)
+ if (client->vd && client->vd->state != GXP_VD_OFF) {
+ down_write(&gxp->vd_semaphore);
gxp_vd_stop(client->vd);
+ up_write(&gxp->vd_semaphore);
+ }
+
+ if (client->vd && client->has_block_wakelock) {
+ down_write(&gxp->vd_semaphore);
+ gxp_vd_block_unready(client->vd);
+ up_write(&gxp->vd_semaphore);
+ }
for (core = 0; core < GXP_NUM_CORES; core++) {
if (client->mb_eventfds[core])
gxp_eventfd_put(client->mb_eventfds[core]);
}
- up_write(&gxp->vd_semaphore);
-
-#if (IS_ENABLED(CONFIG_GXP_TEST) || IS_ENABLED(CONFIG_ANDROID)) && !IS_ENABLED(CONFIG_GXP_GEM5)
+#if (IS_ENABLED(CONFIG_GXP_TEST) || IS_ENABLED(CONFIG_ANDROID)) && \
+ !IS_ENABLED(CONFIG_GXP_GEM5)
if (client->tpu_file) {
+ if (client->vd) {
+ if (gxp->before_unmap_tpu_mbx_queue)
+ gxp->before_unmap_tpu_mbx_queue(gxp, client);
+ if (gxp_is_direct_mode(gxp))
+ gxp_dma_unmap_tpu_buffer(gxp,
+ client->vd->domain,
+ client->mbx_desc);
+ }
fput(client->tpu_file);
client->tpu_file = NULL;
- gxp_dma_unmap_tpu_buffer(gxp, client->vd, client->mbx_desc);
}
#endif
if (client->has_block_wakelock) {
- gxp_wakelock_release(client->gxp);
+ gcip_pm_put(client->gxp->power_mgr->pm);
gxp_pm_update_requested_power_states(
- gxp, client->requested_power_state,
- client->requested_low_clkmux, AUR_OFF, false,
- client->requested_memory_power_state,
- AUR_MEM_UNDEFINED);
+ gxp, client->requested_states, off_states);
}
- if (client->vd)
+ if (client->vd) {
+ down_write(&gxp->vd_semaphore);
gxp_vd_release(client->vd);
+ up_write(&gxp->vd_semaphore);
+ }
+ lockdep_unregister_key(&client->key);
kfree(client);
}
+
+static int gxp_set_secure_vd(struct gxp_virtual_device *vd)
+{
+ struct gxp_dev *gxp = vd->gxp;
+
+ if (gxp_is_direct_mode(gxp))
+ return 0;
+
+ mutex_lock(&gxp->secure_vd_lock);
+ if (gxp->secure_vd) {
+ mutex_unlock(&gxp->secure_vd_lock);
+ return -EEXIST;
+ }
+ vd->is_secure = true;
+ gxp->secure_vd = vd;
+ mutex_unlock(&gxp->secure_vd_lock);
+
+ return 0;
+}
+
+int gxp_client_allocate_virtual_device(struct gxp_client *client,
+ uint core_count, u8 flags)
+{
+ struct gxp_dev *gxp = client->gxp;
+ struct gxp_virtual_device *vd;
+ int ret;
+
+ lockdep_assert_held(&client->semaphore);
+ if (client->vd) {
+ dev_err(gxp->dev,
+ "Virtual device was already allocated for client\n");
+ return -EINVAL;
+ }
+
+ down_write(&gxp->vd_semaphore);
+ vd = gxp_vd_allocate(gxp, core_count);
+ if (IS_ERR(vd)) {
+ ret = PTR_ERR(vd);
+ dev_err(gxp->dev,
+ "Failed to allocate virtual device for client (%d)\n",
+ ret);
+ goto error;
+ }
+ if (flags & GXP_ALLOCATE_VD_SECURE) {
+ ret = gxp_set_secure_vd(vd);
+ if (ret)
+ goto error_vd_release;
+ }
+ if (client->has_block_wakelock) {
+ ret = gxp_vd_block_ready(vd);
+ if (ret)
+ goto error_vd_release;
+ }
+ up_write(&gxp->vd_semaphore);
+
+ client->vd = vd;
+ return 0;
+
+error_vd_release:
+ gxp_vd_release(vd);
+error:
+ up_write(&gxp->vd_semaphore);
+ return ret;
+}
+
+static int gxp_client_request_power_states(struct gxp_client *client,
+ struct gxp_power_states requested_states)
+{
+ struct gxp_dev *gxp = client->gxp;
+ int ret;
+
+ if (gxp->request_power_states) {
+ ret = gxp->request_power_states(client, requested_states);
+ if (ret != -EOPNOTSUPP)
+ return ret;
+ }
+ gxp_pm_update_requested_power_states(gxp, client->requested_states,
+ requested_states);
+ client->requested_states = requested_states;
+ return 0;
+}
+
+int gxp_client_acquire_block_wakelock(struct gxp_client *client,
+ bool *acquired_wakelock)
+{
+ struct gxp_dev *gxp = client->gxp;
+ int ret;
+
+ lockdep_assert_held(&client->semaphore);
+ if (!client->has_block_wakelock) {
+ ret = gcip_pm_get(gxp->power_mgr->pm);
+ if (ret)
+ return ret;
+ *acquired_wakelock = true;
+ if (client->vd) {
+ down_write(&gxp->vd_semaphore);
+ ret = gxp_vd_block_ready(client->vd);
+ up_write(&gxp->vd_semaphore);
+ if (ret)
+ goto err_wakelock_release;
+ }
+ } else {
+ *acquired_wakelock = false;
+ }
+ client->has_block_wakelock = true;
+
+ /*
+ * Update client's TGID+PID in case the process that opened
+ * /dev/gxp is not the one that called this IOCTL.
+ */
+ client->tgid = current->tgid;
+ client->pid = current->pid;
+
+ return 0;
+
+err_wakelock_release:
+ if (*acquired_wakelock) {
+ gcip_pm_put(gxp->power_mgr->pm);
+ *acquired_wakelock = false;
+ }
+ return ret;
+}
+
+void gxp_client_release_block_wakelock(struct gxp_client *client)
+{
+ struct gxp_dev *gxp = client->gxp;
+
+ lockdep_assert_held(&client->semaphore);
+ if (!client->has_block_wakelock)
+ return;
+
+ gxp_client_release_vd_wakelock(client);
+
+ if (client->vd) {
+ down_write(&gxp->vd_semaphore);
+ gxp_vd_block_unready(client->vd);
+ up_write(&gxp->vd_semaphore);
+ }
+
+ gcip_pm_put(gxp->power_mgr->pm);
+ client->has_block_wakelock = false;
+}
+
+int gxp_client_acquire_vd_wakelock(struct gxp_client *client,
+ struct gxp_power_states requested_states)
+{
+ struct gxp_dev *gxp = client->gxp;
+ int ret = 0;
+ enum gxp_virtual_device_state orig_state;
+
+ lockdep_assert_held(&client->semaphore);
+ if (!client->has_block_wakelock) {
+ dev_err(gxp->dev,
+ "Must hold BLOCK wakelock to acquire VIRTUAL_DEVICE wakelock\n");
+ return -EINVAL;
+ }
+
+ if (client->vd->state == GXP_VD_UNAVAILABLE) {
+ dev_err(gxp->dev,
+ "Cannot acquire VIRTUAL_DEVICE wakelock on a broken virtual device\n");
+ return -ENODEV;
+ }
+
+ if (!client->has_vd_wakelock) {
+ down_write(&gxp->vd_semaphore);
+ orig_state = client->vd->state;
+ if (client->vd->state == GXP_VD_READY || client->vd->state == GXP_VD_OFF)
+ ret = gxp_vd_run(client->vd);
+ else
+ ret = gxp_vd_resume(client->vd);
+ up_write(&gxp->vd_semaphore);
+ }
+
+ if (ret)
+ goto out;
+
+ ret = gxp_client_request_power_states(client, requested_states);
+ if (ret)
+ goto out_release_vd_wakelock;
+
+ client->has_vd_wakelock = true;
+ return 0;
+
+out_release_vd_wakelock:
+ if (!client->has_vd_wakelock) {
+ down_write(&gxp->vd_semaphore);
+ if (orig_state == GXP_VD_READY || orig_state == GXP_VD_OFF)
+ gxp_vd_stop(client->vd);
+ else
+ gxp_vd_suspend(client->vd);
+ up_write(&gxp->vd_semaphore);
+ }
+out:
+ return ret;
+}
+
+void gxp_client_release_vd_wakelock(struct gxp_client *client)
+{
+ struct gxp_dev *gxp = client->gxp;
+
+ lockdep_assert_held(&client->semaphore);
+ if (!client->has_vd_wakelock)
+ return;
+
+ /*
+ * Currently VD state will not be GXP_VD_UNAVAILABLE if
+ * has_vd_wakelock is true. Add this check just in case
+ * GXP_VD_UNAVAILABLE will occur in more scenarios in the
+ * future.
+ */
+ if (client->vd->state == GXP_VD_UNAVAILABLE)
+ return;
+
+ down_write(&gxp->vd_semaphore);
+ gxp_vd_suspend(client->vd);
+ up_write(&gxp->vd_semaphore);
+
+ gxp_client_request_power_states(client, off_states);
+ client->has_vd_wakelock = false;
+}
+
+bool gxp_client_has_available_vd(struct gxp_client *client, const char *name)
+{
+ struct gxp_dev *gxp = client->gxp;
+
+ lockdep_assert_held(&client->semaphore);
+ if (!client->vd) {
+ dev_err(gxp->dev,
+ "%s requires the client allocate a VIRTUAL_DEVICE\n",
+ name);
+ return false;
+ }
+ if (client->vd->state == GXP_VD_UNAVAILABLE) {
+ dev_err(gxp->dev, "Cannot do %s on a broken virtual device\n",
+ name);
+ return false;
+ }
+ return true;
+}
diff --git a/gxp-client.h b/gxp-client.h
index 0d1f860..964aa76 100644
--- a/gxp-client.h
+++ b/gxp-client.h
@@ -12,8 +12,9 @@
#include <linux/sched.h>
#include <linux/types.h>
-#include "gxp-internal.h"
#include "gxp-eventfd.h"
+#include "gxp-internal.h"
+#include "gxp-pm.h"
#include "gxp-vd.h"
/* Holds state belonging to a client */
@@ -27,14 +28,12 @@ struct gxp_client {
* lock this semaphore for reading for the duration of that operation.
*/
struct rw_semaphore semaphore;
+ struct lock_class_key key;
bool has_block_wakelock;
bool has_vd_wakelock;
- /* Value is one of the GXP_POWER_STATE_* values from gxp.h. */
- uint requested_power_state;
- /* Value is one of the MEMORY_POWER_STATE_* values from gxp.h. */
- uint requested_memory_power_state;
- bool requested_low_clkmux;
+
+ struct gxp_power_states requested_states;
struct gxp_virtual_device *vd;
struct file *tpu_file;
@@ -48,11 +47,12 @@ struct gxp_client {
pid_t pid;
/*
- * Indicates whether the driver needs to disable telemetry when this
- * client closes. For when the client fails to disable telemetry itself.
+ * Indicates whether the driver needs to disable core telemetry when
+ * this client closes. For when the client fails to disable core
+ * telemetry itself.
*/
- bool enabled_telemetry_logging;
- bool enabled_telemetry_tracing;
+ bool enabled_core_telemetry_logging;
+ bool enabled_core_telemetry_tracing;
};
/*
@@ -65,5 +65,78 @@ struct gxp_client *gxp_client_create(struct gxp_dev *gxp);
* TPU mailboxes it holds.
*/
void gxp_client_destroy(struct gxp_client *client);
+/**
+ * gxp_client_allocate_virtual_device() - Allocates a virtual device for the
+ * client.
+ *
+ * @client: The client to allocate a virtual device
+ * @core_count: The requested core count of the virtual device.
+ * @flags: The flags passed from the runtime's request.
+ *
+ * The caller must have locked client->semaphore.
+ *
+ * Return:
+ * * 0 - Success
+ * * -EINVAL - A virtual device of the client has been allocated
+ * * Otherwise - Errno returned by virtual device allocation
+ */
+int gxp_client_allocate_virtual_device(struct gxp_client *client,
+ uint core_count, u8 flags);
+/**
+ * gxp_client_acquire_block_wakelock() - Acquires a block wakelock.
+ *
+ * @client: The client to acquire wakelock.
+ * @acquired_wakelock: True if block wakelock has been acquired by this client.
+ *
+ * The caller must have locked client->semaphore.
+ *
+ * Return:
+ * * 0 - Success
+ * * Otherwise - Errno returned by block wakelock acquisition
+ */
+int gxp_client_acquire_block_wakelock(struct gxp_client *client,
+ bool *acquired_wakelock);
+/**
+ * gxp_client_release_block_wakelock() - Releases the holded block wakelock and
+ * revokes the power votes.
+ *
+ * The caller must have locked client->semaphore.
+ */
+void gxp_client_release_block_wakelock(struct gxp_client *client);
+/**
+ * gxp_client_acquire_vd_wakelock() - Acquires a VD wakelock for the current
+ * virtual device to start the virtual device or resume it if it's suspended.
+ * Also the client can request the power votes tied with the acquired wakelock.
+ *
+ * @client: The client to acquire wakelock and request power votes.
+ * @requested_states: The requested power states.
+ *
+ * The caller must have locked client->semaphore.
+ *
+ * Return:
+ * * 0 - Success
+ * * -EINVAL - No holded block wakelock
+ * * -ENODEV - VD state is unavailable
+ */
+int gxp_client_acquire_vd_wakelock(struct gxp_client *client,
+ struct gxp_power_states requested_states);
+/**
+ * gxp_client_release_vd_wakelock() - Releases the holded VD wakelock to suspend
+ * the current virtual device.
+ *
+ * The caller must have locked client->semaphore.
+ */
+void gxp_client_release_vd_wakelock(struct gxp_client *client);
+
+/**
+ * gxp_client_has_available_vd() - Returns whether @client has an available
+ * virtual device.
+ *
+ * @client: The client to check.
+ * @name: The string used for logging when the client has an invalid VD.
+ *
+ * The caller must have locked client->semaphore.
+ */
+bool gxp_client_has_available_vd(struct gxp_client *client, const char *name);
#endif /* __GXP_CLIENT_H__ */
diff --git a/gxp-common-platform.c b/gxp-common-platform.c
new file mode 100644
index 0000000..7514b11
--- /dev/null
+++ b/gxp-common-platform.c
@@ -0,0 +1,2247 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * GXP platform driver utilities.
+ *
+ * Copyright (C) 2022 Google LLC
+ */
+
+#if IS_ENABLED(CONFIG_SUBSYSTEM_COREDUMP)
+#include <linux/platform_data/sscoredump.h>
+#endif
+
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/uidgid.h>
+
+#include <gcip/gcip-dma-fence.h>
+#include <gcip/gcip-pm.h>
+
+#include "gxp-client.h"
+#include "gxp-config.h"
+#include "gxp-core-telemetry.h"
+#include "gxp-debug-dump.h"
+#include "gxp-debugfs.h"
+#include "gxp-dma-fence.h"
+#include "gxp-dma.h"
+#include "gxp-dmabuf.h"
+#include "gxp-domain-pool.h"
+#include "gxp-firmware.h"
+#include "gxp-firmware-data.h"
+#include "gxp-firmware-loader.h"
+#include "gxp-internal.h"
+#include "gxp-lpm.h"
+#include "gxp-mailbox.h"
+#include "gxp-mailbox-driver.h"
+#include "gxp-mapping.h"
+#include "gxp-notification.h"
+#include "gxp-pm.h"
+#include "gxp-thermal.h"
+#include "gxp-vd.h"
+#include "gxp.h"
+
+#if HAS_TPU_EXT
+#include <soc/google/tpu-ext.h>
+#endif
+
+#if GXP_USE_LEGACY_MAILBOX
+#include "gxp-mailbox-impl.h"
+#else
+#include "gxp-dci.h"
+#endif
+
+static struct gxp_dev *gxp_debug_pointer;
+
+/* Caller needs to hold client->semaphore for reading */
+static bool check_client_has_available_vd_wakelock(struct gxp_client *client,
+ char *ioctl_name)
+{
+ struct gxp_dev *gxp = client->gxp;
+
+ lockdep_assert_held_read(&client->semaphore);
+ if (!client->has_vd_wakelock) {
+ dev_err(gxp->dev,
+ "%s requires the client hold a VIRTUAL_DEVICE wakelock\n",
+ ioctl_name);
+ return false;
+ }
+ if (client->vd->state == GXP_VD_UNAVAILABLE) {
+ dev_err(gxp->dev, "Cannot do %s on a broken virtual device\n",
+ ioctl_name);
+ return false;
+ }
+ return true;
+}
+
+#if IS_ENABLED(CONFIG_SUBSYSTEM_COREDUMP)
+
+static struct sscd_platform_data gxp_sscd_pdata;
+
+static void gxp_sscd_release(struct device *dev)
+{
+ pr_debug("%s\n", __func__);
+}
+
+static struct platform_device gxp_sscd_dev = {
+ .name = GXP_DRIVER_NAME,
+ .driver_override = SSCD_NAME,
+ .id = -1,
+ .dev = {
+ .platform_data = &gxp_sscd_pdata,
+ .release = gxp_sscd_release,
+ },
+};
+
+static void gxp_common_platform_reg_sscd(void)
+{
+ /* Registers SSCD platform device */
+ if (gxp_debug_dump_is_enabled()) {
+ if (platform_device_register(&gxp_sscd_dev))
+ pr_err("Unable to register SSCD platform device\n");
+ }
+}
+
+static void gxp_common_platform_unreg_sscd(void)
+{
+ if (gxp_debug_dump_is_enabled())
+ platform_device_unregister(&gxp_sscd_dev);
+}
+
+#else /* CONFIG_SUBSYSTEM_COREDUMP */
+
+static void gxp_common_platform_reg_sscd(void)
+{
+}
+
+static void gxp_common_platform_unreg_sscd(void)
+{
+}
+
+#endif /* CONFIG_SUBSYSTEM_COREDUMP */
+
+/* Mapping from GXP_POWER_STATE_* to enum aur_power_state in gxp-pm.h */
+static const uint aur_state_array[GXP_NUM_POWER_STATES] = {
+ AUR_OFF, AUR_UUD, AUR_SUD, AUR_UD, AUR_NOM,
+ AUR_READY, AUR_UUD_PLUS, AUR_SUD_PLUS, AUR_UD_PLUS
+};
+/* Mapping from MEMORY_POWER_STATE_* to enum aur_memory_power_state in gxp-pm.h */
+static const uint aur_memory_state_array[MEMORY_POWER_STATE_MAX + 1] = {
+ AUR_MEM_UNDEFINED, AUR_MEM_MIN, AUR_MEM_VERY_LOW, AUR_MEM_LOW,
+ AUR_MEM_HIGH, AUR_MEM_VERY_HIGH, AUR_MEM_MAX
+};
+
+static int gxp_open(struct inode *inode, struct file *file)
+{
+ struct gxp_client *client;
+ struct gxp_dev *gxp = container_of(file->private_data, struct gxp_dev,
+ misc_dev);
+ int ret = 0;
+
+ /* If this is the first call to open(), load the firmware files */
+ ret = gxp_firmware_loader_load_if_needed(gxp);
+ if (ret)
+ return ret;
+
+ client = gxp_client_create(gxp);
+ if (IS_ERR(client))
+ return PTR_ERR(client);
+
+ client->tgid = current->tgid;
+ client->pid = current->pid;
+
+ file->private_data = client;
+
+ mutex_lock(&gxp->client_list_lock);
+ list_add(&client->list_entry, &gxp->client_list);
+ mutex_unlock(&gxp->client_list_lock);
+
+ return ret;
+}
+
+static int gxp_release(struct inode *inode, struct file *file)
+{
+ struct gxp_client *client = file->private_data;
+
+ /*
+ * If open failed and no client was created then no clean-up is needed.
+ */
+ if (!client)
+ return 0;
+
+ if (client->enabled_core_telemetry_logging)
+ gxp_core_telemetry_disable(client->gxp,
+ GXP_TELEMETRY_TYPE_LOGGING);
+ if (client->enabled_core_telemetry_tracing)
+ gxp_core_telemetry_disable(client->gxp,
+ GXP_TELEMETRY_TYPE_TRACING);
+
+ mutex_lock(&client->gxp->client_list_lock);
+ list_del(&client->list_entry);
+ mutex_unlock(&client->gxp->client_list_lock);
+
+ gxp_client_destroy(client);
+
+ return 0;
+}
+
+static inline enum dma_data_direction mapping_flags_to_dma_dir(u32 flags)
+{
+ switch (flags & 0x3) {
+ case 0x0: /* 0b00 */
+ return DMA_BIDIRECTIONAL;
+ case 0x1: /* 0b01 */
+ return DMA_TO_DEVICE;
+ case 0x2: /* 0b10 */
+ return DMA_FROM_DEVICE;
+ }
+
+ return DMA_NONE;
+}
+
+static int gxp_map_buffer(struct gxp_client *client,
+ struct gxp_map_ioctl __user *argp)
+{
+ struct gxp_dev *gxp = client->gxp;
+ struct gxp_map_ioctl ibuf;
+ struct gxp_mapping *map;
+ int ret = 0;
+
+ if (copy_from_user(&ibuf, argp, sizeof(ibuf)))
+ return -EFAULT;
+
+ if (ibuf.size == 0)
+ return -EINVAL;
+
+ if (ibuf.host_address % L1_CACHE_BYTES || ibuf.size % L1_CACHE_BYTES) {
+ dev_err(gxp->dev,
+ "Mapped buffers must be cache line aligned and padded.\n");
+ return -EINVAL;
+ }
+
+ down_read(&client->semaphore);
+
+ if (!gxp_client_has_available_vd(client, "GXP_MAP_BUFFER")) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ map = gxp_mapping_create(gxp, client->vd->domain, ibuf.host_address,
+ ibuf.size,
+ ibuf.flags,
+ mapping_flags_to_dma_dir(ibuf.flags));
+ if (IS_ERR(map)) {
+ ret = PTR_ERR(map);
+ dev_err(gxp->dev, "Failed to create mapping (ret=%d)\n", ret);
+ goto out;
+ }
+
+ ret = gxp_vd_mapping_store(client->vd, map);
+ if (ret) {
+ dev_err(gxp->dev, "Failed to store mapping (ret=%d)\n", ret);
+ goto error_destroy;
+ }
+
+ ibuf.device_address = map->device_address;
+
+ if (copy_to_user(argp, &ibuf, sizeof(ibuf))) {
+ ret = -EFAULT;
+ goto error_remove;
+ }
+
+ gxp_mapping_iova_log(client, map,
+ GXP_IOVA_LOG_MAP | GXP_IOVA_LOG_BUFFER);
+
+ /*
+ * The virtual device acquired its own reference to the mapping when
+ * it was stored in the VD's records. Release the reference from
+ * creating the mapping since this function is done using it.
+ */
+ gxp_mapping_put(map);
+
+out:
+ up_read(&client->semaphore);
+
+ return ret;
+
+error_remove:
+ gxp_vd_mapping_remove(client->vd, map);
+error_destroy:
+ gxp_mapping_put(map);
+ up_read(&client->semaphore);
+ return ret;
+}
+
+static int gxp_unmap_buffer(struct gxp_client *client,
+ struct gxp_map_ioctl __user *argp)
+{
+ struct gxp_dev *gxp = client->gxp;
+ struct gxp_map_ioctl ibuf;
+ struct gxp_mapping *map;
+ int ret = 0;
+
+ if (copy_from_user(&ibuf, argp, sizeof(ibuf)))
+ return -EFAULT;
+
+ down_read(&client->semaphore);
+
+ if (!client->vd) {
+ dev_err(gxp->dev,
+ "GXP_UNMAP_BUFFER requires the client allocate a VIRTUAL_DEVICE\n");
+ ret = -ENODEV;
+ goto out;
+ }
+
+ map = gxp_vd_mapping_search(client->vd,
+ (dma_addr_t)ibuf.device_address);
+ if (!map) {
+ dev_err(gxp->dev,
+ "Mapping not found for provided device address %#llX\n",
+ ibuf.device_address);
+ ret = -EINVAL;
+ goto out;
+ } else if (!map->host_address) {
+ dev_err(gxp->dev, "dma-bufs must be unmapped via GXP_UNMAP_DMABUF\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ WARN_ON(map->host_address != ibuf.host_address);
+
+ gxp_vd_mapping_remove(client->vd, map);
+ gxp_mapping_iova_log(client, map,
+ GXP_IOVA_LOG_UNMAP | GXP_IOVA_LOG_BUFFER);
+
+ /* Release the reference from gxp_vd_mapping_search() */
+ gxp_mapping_put(map);
+
+out:
+ up_read(&client->semaphore);
+
+ return ret;
+}
+
+static int gxp_sync_buffer(struct gxp_client *client,
+ struct gxp_sync_ioctl __user *argp)
+{
+ struct gxp_dev *gxp = client->gxp;
+ struct gxp_sync_ioctl ibuf;
+ struct gxp_mapping *map;
+ int ret;
+
+ if (copy_from_user(&ibuf, argp, sizeof(ibuf)))
+ return -EFAULT;
+
+ down_read(&client->semaphore);
+
+ if (!client->vd) {
+ dev_err(gxp->dev,
+ "GXP_SYNC_BUFFER requires the client allocate a VIRTUAL_DEVICE\n");
+ ret = -ENODEV;
+ goto out;
+ }
+
+ map = gxp_vd_mapping_search(client->vd,
+ (dma_addr_t)ibuf.device_address);
+ if (!map) {
+ dev_err(gxp->dev,
+ "Mapping not found for provided device address %#llX\n",
+ ibuf.device_address);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = gxp_mapping_sync(map, ibuf.offset, ibuf.size,
+ ibuf.flags == GXP_SYNC_FOR_CPU);
+
+ /* Release the reference from gxp_vd_mapping_search() */
+ gxp_mapping_put(map);
+
+out:
+ up_read(&client->semaphore);
+
+ return ret;
+}
+
+static int gxp_mailbox_command(struct gxp_client *client,
+ struct gxp_mailbox_command_ioctl __user *argp)
+{
+ struct gxp_dev *gxp = client->gxp;
+ struct gxp_mailbox_command_ioctl ibuf;
+ int virt_core, phys_core;
+ int ret = 0;
+ struct gxp_power_states power_states;
+
+ if (copy_from_user(&ibuf, argp, sizeof(ibuf))) {
+ dev_err(gxp->dev,
+ "Unable to copy ioctl data from user-space\n");
+ return -EFAULT;
+ }
+ if (ibuf.gxp_power_state == GXP_POWER_STATE_OFF) {
+ dev_err(gxp->dev,
+ "GXP_POWER_STATE_OFF is not a valid value when executing a mailbox command\n");
+ return -EINVAL;
+ }
+ if (ibuf.gxp_power_state < GXP_POWER_STATE_OFF ||
+ ibuf.gxp_power_state >= GXP_NUM_POWER_STATES) {
+ dev_err(gxp->dev, "Requested power state is invalid\n");
+ return -EINVAL;
+ }
+ if (ibuf.memory_power_state < MEMORY_POWER_STATE_UNDEFINED ||
+ ibuf.memory_power_state > MEMORY_POWER_STATE_MAX) {
+ dev_err(gxp->dev, "Requested memory power state is invalid\n");
+ return -EINVAL;
+ }
+
+ if (ibuf.gxp_power_state == GXP_POWER_STATE_READY) {
+ dev_warn_once(
+ gxp->dev,
+ "GXP_POWER_STATE_READY is deprecated, please set GXP_POWER_LOW_FREQ_CLKMUX with GXP_POWER_STATE_UUD state");
+ ibuf.gxp_power_state = GXP_POWER_STATE_UUD;
+ }
+
+ if (ibuf.power_flags & GXP_POWER_NON_AGGRESSOR)
+ dev_warn_once(
+ gxp->dev,
+ "GXP_POWER_NON_AGGRESSOR is deprecated, no operation here");
+
+ /* Caller must hold VIRTUAL_DEVICE wakelock */
+ down_read(&client->semaphore);
+
+ if (!check_client_has_available_vd_wakelock(client,
+ "GXP_MAILBOX_COMMAND")) {
+ ret = -ENODEV;
+ goto out_unlock_client_semaphore;
+ }
+
+ down_read(&gxp->vd_semaphore);
+
+ virt_core = ibuf.virtual_core_id;
+ phys_core = gxp_vd_virt_core_to_phys_core(client->vd, virt_core);
+ if (phys_core < 0) {
+ dev_err(gxp->dev,
+ "Mailbox command failed: Invalid virtual core id (%u)\n",
+ virt_core);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (!gxp_is_fw_running(gxp, phys_core)) {
+ dev_err(gxp->dev,
+ "Cannot process mailbox command for core %d when firmware isn't running\n",
+ phys_core);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (gxp->mailbox_mgr->mailboxes[phys_core] == NULL) {
+ dev_err(gxp->dev, "Mailbox not initialized for core %d\n",
+ phys_core);
+ ret = -EIO;
+ goto out;
+ }
+
+ power_states.power = aur_state_array[ibuf.gxp_power_state];
+ power_states.memory = aur_memory_state_array[ibuf.memory_power_state];
+ power_states.low_clkmux = (ibuf.power_flags & GXP_POWER_LOW_FREQ_CLKMUX) != 0;
+
+ ret = gxp->mailbox_mgr->execute_cmd_async(
+ client, gxp->mailbox_mgr->mailboxes[phys_core], virt_core,
+ GXP_MBOX_CODE_DISPATCH, 0, ibuf.device_address, ibuf.size,
+ ibuf.flags, power_states, &ibuf.sequence_number);
+ if (ret) {
+ dev_err(gxp->dev, "Failed to enqueue mailbox command (ret=%d)\n",
+ ret);
+ goto out;
+ }
+
+ if (copy_to_user(argp, &ibuf, sizeof(ibuf))) {
+ dev_err(gxp->dev, "Failed to copy back sequence number!\n");
+ ret = -EFAULT;
+ goto out;
+ }
+
+out:
+ up_read(&gxp->vd_semaphore);
+out_unlock_client_semaphore:
+ up_read(&client->semaphore);
+
+ return ret;
+}
+
+static int gxp_mailbox_response(struct gxp_client *client,
+ struct gxp_mailbox_response_ioctl __user *argp)
+{
+ struct gxp_dev *gxp = client->gxp;
+ struct gxp_mailbox_response_ioctl ibuf;
+ int virt_core;
+ int ret = 0;
+
+ if (copy_from_user(&ibuf, argp, sizeof(ibuf)))
+ return -EFAULT;
+
+ /* Caller must hold VIRTUAL_DEVICE wakelock */
+ down_read(&client->semaphore);
+
+ if (!check_client_has_available_vd_wakelock(client,
+ "GXP_MAILBOX_RESPONSE")) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ virt_core = ibuf.virtual_core_id;
+ if (virt_core >= client->vd->num_cores) {
+ dev_err(gxp->dev, "Mailbox response failed: Invalid virtual core id (%u)\n",
+ virt_core);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = gxp->mailbox_mgr->wait_async_resp(client, virt_core,
+ &ibuf.sequence_number, NULL,
+ &ibuf.cmd_retval,
+ &ibuf.error_code);
+ if (ret)
+ goto out;
+
+ if (copy_to_user(argp, &ibuf, sizeof(ibuf)))
+ ret = -EFAULT;
+
+out:
+ up_read(&client->semaphore);
+
+ return ret;
+}
+
+static int gxp_get_specs(struct gxp_client *client,
+ struct gxp_specs_ioctl __user *argp)
+{
+ struct buffer_data *logging_buff_data;
+ struct gxp_dev *gxp = client->gxp;
+ struct gxp_specs_ioctl ibuf = {
+ .core_count = GXP_NUM_CORES,
+ .features = !gxp_is_direct_mode(client->gxp),
+ .telemetry_buffer_size = 0,
+ .secure_telemetry_buffer_size =
+ (u8)(SECURE_CORE_TELEMETRY_BUFFER_SIZE /
+ GXP_CORE_TELEMETRY_BUFFER_UNIT_SIZE),
+ .memory_per_core = client->gxp->memory_per_core,
+ };
+
+ if (!IS_ERR_OR_NULL(gxp->core_telemetry_mgr)) {
+ logging_buff_data = gxp->core_telemetry_mgr->logging_buff_data;
+ if (!IS_ERR_OR_NULL(logging_buff_data)) {
+ ibuf.telemetry_buffer_size =
+ (u8)(logging_buff_data->size /
+ GXP_CORE_TELEMETRY_BUFFER_UNIT_SIZE);
+ }
+ }
+
+ if (copy_to_user(argp, &ibuf, sizeof(ibuf)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int gxp_allocate_vd(struct gxp_client *client,
+ struct gxp_virtual_device_ioctl __user *argp)
+{
+ struct gxp_dev *gxp = client->gxp;
+ struct gxp_virtual_device_ioctl ibuf;
+ int ret = 0;
+
+ if (copy_from_user(&ibuf, argp, sizeof(ibuf)))
+ return -EFAULT;
+
+ if (ibuf.core_count == 0 || ibuf.core_count > GXP_NUM_CORES) {
+ dev_err(gxp->dev, "Invalid core count (%u)\n", ibuf.core_count);
+ return -EINVAL;
+ }
+
+ if (ibuf.memory_per_core > gxp->memory_per_core) {
+ dev_err(gxp->dev, "Invalid memory-per-core (%u)\n",
+ ibuf.memory_per_core);
+ return -EINVAL;
+ }
+
+ down_write(&client->semaphore);
+ ret = gxp_client_allocate_virtual_device(client, ibuf.core_count,
+ ibuf.flags);
+ up_write(&client->semaphore);
+ if (ret)
+ return ret;
+
+ ibuf.vdid = client->vd->vdid;
+ if (copy_to_user(argp, &ibuf, sizeof(ibuf))) {
+ /*
+ * VD will be released once the client FD has been closed, we
+ * don't need to release VD here as this branch should never
+ * happen in usual cases.
+ */
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int
+gxp_etm_trace_start_command(struct gxp_client *client,
+ struct gxp_etm_trace_start_ioctl __user *argp)
+{
+ struct gxp_dev *gxp = client->gxp;
+ struct gxp_etm_trace_start_ioctl ibuf;
+ int phys_core;
+ int ret = 0;
+
+ if (copy_from_user(&ibuf, argp, sizeof(ibuf)))
+ return -EFAULT;
+
+ ibuf.trace_ram_enable &= ETM_TRACE_LSB_MASK;
+ ibuf.atb_enable &= ETM_TRACE_LSB_MASK;
+
+ if (!ibuf.trace_ram_enable && !ibuf.atb_enable)
+ return -EINVAL;
+
+ if (!(ibuf.sync_msg_period == 0 ||
+ (ibuf.sync_msg_period <= ETM_TRACE_SYNC_MSG_PERIOD_MAX &&
+ ibuf.sync_msg_period >= ETM_TRACE_SYNC_MSG_PERIOD_MIN &&
+ is_power_of_2(ibuf.sync_msg_period))))
+ return -EINVAL;
+
+ if (ibuf.pc_match_mask_length > ETM_TRACE_PC_MATCH_MASK_LEN_MAX)
+ return -EINVAL;
+
+ /* Caller must hold VIRTUAL_DEVICE wakelock */
+ down_read(&client->semaphore);
+
+ if (!check_client_has_available_vd_wakelock(
+ client, "GXP_ETM_TRACE_START_COMMAND")) {
+ ret = -ENODEV;
+ goto out_unlock_client_semaphore;
+ }
+
+ down_read(&gxp->vd_semaphore);
+
+ phys_core =
+ gxp_vd_virt_core_to_phys_core(client->vd, ibuf.virtual_core_id);
+ if (phys_core < 0) {
+ dev_err(gxp->dev, "Trace start failed: Invalid virtual core id (%u)\n",
+ ibuf.virtual_core_id);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /*
+ * TODO (b/185260919): Pass the etm trace configuration to system FW
+ * once communication channel between kernel and system FW is ready
+ * (b/185819530).
+ */
+
+out:
+ up_read(&gxp->vd_semaphore);
+out_unlock_client_semaphore:
+ up_read(&client->semaphore);
+
+ return ret;
+}
+
+static int gxp_etm_trace_sw_stop_command(struct gxp_client *client,
+ __u16 __user *argp)
+{
+ struct gxp_dev *gxp = client->gxp;
+ u16 virtual_core_id;
+ int phys_core;
+ int ret = 0;
+
+ if (copy_from_user(&virtual_core_id, argp, sizeof(virtual_core_id)))
+ return -EFAULT;
+
+ /* Caller must hold VIRTUAL_DEVICE wakelock */
+ down_read(&client->semaphore);
+
+ if (!check_client_has_available_vd_wakelock(
+ client, "GXP_ETM_TRACE_SW_STOP_COMMAND")) {
+ ret = -ENODEV;
+ goto out_unlock_client_semaphore;
+ }
+
+ down_read(&gxp->vd_semaphore);
+
+ phys_core = gxp_vd_virt_core_to_phys_core(client->vd, virtual_core_id);
+ if (phys_core < 0) {
+ dev_err(gxp->dev, "Trace stop via software trigger failed: Invalid virtual core id (%u)\n",
+ virtual_core_id);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /*
+ * TODO (b/185260919): Pass the etm stop signal to system FW once
+ * communication channel between kernel and system FW is ready
+ * (b/185819530).
+ */
+
+out:
+ up_read(&gxp->vd_semaphore);
+out_unlock_client_semaphore:
+ up_read(&client->semaphore);
+
+ return ret;
+}
+
+static int gxp_etm_trace_cleanup_command(struct gxp_client *client,
+ __u16 __user *argp)
+{
+ struct gxp_dev *gxp = client->gxp;
+ u16 virtual_core_id;
+ int phys_core;
+ int ret = 0;
+
+ if (copy_from_user(&virtual_core_id, argp, sizeof(virtual_core_id)))
+ return -EFAULT;
+
+ /* Caller must hold VIRTUAL_DEVICE wakelock */
+ down_read(&client->semaphore);
+
+ if (!check_client_has_available_vd_wakelock(
+ client, "GXP_ETM_TRACE_CLEANUP_COMMAND")) {
+ ret = -ENODEV;
+ goto out_unlock_client_semaphore;
+ }
+
+ down_read(&gxp->vd_semaphore);
+
+ phys_core = gxp_vd_virt_core_to_phys_core(client->vd, virtual_core_id);
+ if (phys_core < 0) {
+ dev_err(gxp->dev, "Trace cleanup failed: Invalid virtual core id (%u)\n",
+ virtual_core_id);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /*
+ * TODO (b/185260919): Pass the etm clean up signal to system FW once
+ * communication channel between kernel and system FW is ready
+ * (b/185819530).
+ */
+
+out:
+ up_read(&gxp->vd_semaphore);
+out_unlock_client_semaphore:
+ up_read(&client->semaphore);
+
+ return ret;
+}
+
+static int
+gxp_etm_get_trace_info_command(struct gxp_client *client,
+ struct gxp_etm_get_trace_info_ioctl __user *argp)
+{
+ struct gxp_dev *gxp = client->gxp;
+ struct gxp_etm_get_trace_info_ioctl ibuf;
+ int phys_core;
+ u32 *trace_header;
+ u32 *trace_data;
+ int ret = 0;
+
+ if (copy_from_user(&ibuf, argp, sizeof(ibuf)))
+ return -EFAULT;
+
+ if (ibuf.type > 1)
+ return -EINVAL;
+
+ /* Caller must hold VIRTUAL_DEVICE wakelock */
+ down_read(&client->semaphore);
+
+ if (!check_client_has_available_vd_wakelock(
+ client, "GXP_ETM_GET_TRACE_INFO_COMMAND")) {
+ ret = -ENODEV;
+ goto out_unlock_client_semaphore;
+ }
+
+ down_read(&gxp->vd_semaphore);
+
+ phys_core = gxp_vd_virt_core_to_phys_core(client->vd, ibuf.virtual_core_id);
+ if (phys_core < 0) {
+ dev_err(gxp->dev, "Get trace info failed: Invalid virtual core id (%u)\n",
+ ibuf.virtual_core_id);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ trace_header = kzalloc(GXP_TRACE_HEADER_SIZE, GFP_KERNEL);
+ if (!trace_header) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ trace_data = kzalloc(GXP_TRACE_RAM_SIZE, GFP_KERNEL);
+ if (!trace_data) {
+ ret = -ENOMEM;
+ goto out_free_header;
+ }
+
+ /*
+ * TODO (b/185260919): Get trace information from system FW once
+ * communication channel between kernel and system FW is ready
+ * (b/185819530).
+ */
+
+ if (copy_to_user((void __user *)ibuf.trace_header_addr, trace_header,
+ GXP_TRACE_HEADER_SIZE)) {
+ ret = -EFAULT;
+ goto out_free_data;
+ }
+
+ if (ibuf.type == 1) {
+ if (copy_to_user((void __user *)ibuf.trace_data_addr,
+ trace_data, GXP_TRACE_RAM_SIZE)) {
+ ret = -EFAULT;
+ goto out_free_data;
+ }
+ }
+
+out_free_data:
+ kfree(trace_data);
+out_free_header:
+ kfree(trace_header);
+
+out:
+ up_read(&gxp->vd_semaphore);
+out_unlock_client_semaphore:
+ up_read(&client->semaphore);
+
+ return ret;
+}
+
+static int gxp_enable_core_telemetry(struct gxp_client *client,
+ __u8 __user *argp)
+{
+ struct gxp_dev *gxp = client->gxp;
+ __u8 type;
+ int ret;
+
+ if (copy_from_user(&type, argp, sizeof(type)))
+ return -EFAULT;
+
+ if (type != GXP_TELEMETRY_TYPE_LOGGING &&
+ type != GXP_TELEMETRY_TYPE_TRACING)
+ return -EINVAL;
+
+ ret = gxp_core_telemetry_enable(gxp, type);
+
+ /*
+ * Record what core telemetry types this client enabled so they can be
+ * cleaned-up if the client closes without disabling them.
+ */
+ if (!ret && type == GXP_TELEMETRY_TYPE_LOGGING)
+ client->enabled_core_telemetry_logging = true;
+ if (!ret && type == GXP_TELEMETRY_TYPE_TRACING)
+ client->enabled_core_telemetry_tracing = true;
+
+ return ret;
+}
+
+static int gxp_disable_core_telemetry(struct gxp_client *client,
+ __u8 __user *argp)
+{
+ struct gxp_dev *gxp = client->gxp;
+ __u8 type;
+ int ret;
+
+ if (copy_from_user(&type, argp, sizeof(type)))
+ return -EFAULT;
+
+ if (type != GXP_TELEMETRY_TYPE_LOGGING &&
+ type != GXP_TELEMETRY_TYPE_TRACING)
+ return -EINVAL;
+
+ ret = gxp_core_telemetry_disable(gxp, type);
+
+ if (!ret && type == GXP_TELEMETRY_TYPE_LOGGING)
+ client->enabled_core_telemetry_logging = false;
+ if (!ret && type == GXP_TELEMETRY_TYPE_TRACING)
+ client->enabled_core_telemetry_tracing = false;
+
+ return ret;
+}
+
+#if HAS_TPU_EXT
+
+/*
+ * Map TPU mailboxes to IOVA.
+ * This function will be called only when the device is in the direct mode.
+ */
+static int map_tpu_mbx_queue(struct gxp_client *client,
+ struct gxp_tpu_mbx_queue_ioctl *ibuf)
+{
+ struct gxp_dev *gxp = client->gxp;
+ struct edgetpu_ext_mailbox_info *mbx_info;
+ struct edgetpu_ext_client_info gxp_tpu_info;
+ u32 phys_core_list = 0;
+ u32 core_count;
+ int ret = 0;
+
+ down_read(&gxp->vd_semaphore);
+
+ phys_core_list = client->vd->core_list;
+ core_count = hweight_long(phys_core_list);
+
+ mbx_info = kmalloc(
+ sizeof(struct edgetpu_ext_mailbox_info) +
+ core_count *
+ sizeof(struct edgetpu_ext_mailbox_descriptor),
+ GFP_KERNEL);
+ if (!mbx_info) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /*
+ * TODO(b/249440369): Pass @client->tpu_file file pointer. For the backward compatibility,
+ * keep sending @ibuf->tpu_fd here.
+ */
+ gxp_tpu_info.tpu_fd = ibuf->tpu_fd;
+ gxp_tpu_info.mbox_map = phys_core_list;
+ gxp_tpu_info.attr =
+ (struct edgetpu_mailbox_attr __user *)ibuf->attr_ptr;
+ ret = edgetpu_ext_driver_cmd(gxp->tpu_dev.dev,
+ EDGETPU_EXTERNAL_CLIENT_TYPE_DSP,
+ ALLOCATE_EXTERNAL_MAILBOX, &gxp_tpu_info,
+ mbx_info);
+ if (ret) {
+ dev_err(gxp->dev, "Failed to allocate ext TPU mailboxes %d",
+ ret);
+ goto out_free;
+ }
+
+ /* Align queue size to page size for iommu map. */
+ mbx_info->cmdq_size = ALIGN(mbx_info->cmdq_size, PAGE_SIZE);
+ mbx_info->respq_size = ALIGN(mbx_info->respq_size, PAGE_SIZE);
+
+ ret = gxp_dma_map_tpu_buffer(gxp, client->vd->domain, phys_core_list,
+ mbx_info);
+ if (ret) {
+ dev_err(gxp->dev, "Failed to map TPU mailbox buffer %d", ret);
+ goto err_free_tpu_mbx;
+ }
+ client->mbx_desc.phys_core_list = phys_core_list;
+ client->mbx_desc.cmdq_size = mbx_info->cmdq_size;
+ client->mbx_desc.respq_size = mbx_info->respq_size;
+
+ goto out_free;
+
+err_free_tpu_mbx:
+ edgetpu_ext_driver_cmd(gxp->tpu_dev.dev,
+ EDGETPU_EXTERNAL_CLIENT_TYPE_DSP,
+ FREE_EXTERNAL_MAILBOX, &gxp_tpu_info, NULL);
+out_free:
+ kfree(mbx_info);
+out:
+ up_read(&gxp->vd_semaphore);
+
+ return ret;
+}
+
+/*
+ * Unmap TPU mailboxes from IOVA.
+ * This function will be called only when the device is in the direct mode.
+ */
+static void unmap_tpu_mbx_queue(struct gxp_client *client,
+ struct gxp_tpu_mbx_queue_ioctl *ibuf)
+{
+ struct gxp_dev *gxp = client->gxp;
+ struct edgetpu_ext_client_info gxp_tpu_info;
+
+ gxp_dma_unmap_tpu_buffer(gxp, client->vd->domain, client->mbx_desc);
+ gxp_tpu_info.tpu_fd = ibuf->tpu_fd;
+ edgetpu_ext_driver_cmd(gxp->tpu_dev.dev,
+ EDGETPU_EXTERNAL_CLIENT_TYPE_DSP,
+ FREE_EXTERNAL_MAILBOX, &gxp_tpu_info, NULL);
+}
+
+static int gxp_map_tpu_mbx_queue(struct gxp_client *client,
+ struct gxp_tpu_mbx_queue_ioctl __user *argp)
+{
+ struct gxp_dev *gxp = client->gxp;
+ struct gxp_tpu_mbx_queue_ioctl ibuf;
+ int ret = 0;
+
+ if (!gxp->tpu_dev.mbx_paddr) {
+ dev_err(gxp->dev, "TPU is not available for interop\n");
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&ibuf, argp, sizeof(ibuf)))
+ return -EFAULT;
+
+ down_write(&client->semaphore);
+
+ if (!gxp_client_has_available_vd(client, "GXP_MAP_TPU_MBX_QUEUE")) {
+ ret = -ENODEV;
+ goto out_unlock_client_semaphore;
+ }
+
+ if (client->tpu_file) {
+ dev_err(gxp->dev, "Mapping/linking TPU mailbox information already exists");
+ ret = -EBUSY;
+ goto out_unlock_client_semaphore;
+ }
+
+ /*
+ * If someone is attacking us through this interface -
+ * it's possible that ibuf.tpu_fd here is already a different file from the one passed to
+ * edgetpu_ext_driver_cmd() (if the runtime closes the FD and opens another file exactly
+ * between the TPU driver call above and the fget below).
+ *
+ * However, from Zuma, we pass the file pointer directly to the TPU KD and it will check
+ * whether that file is true TPU device file or not. Therefore, our code is safe from the
+ * fd swapping attack.
+ */
+ client->tpu_file = fget(ibuf.tpu_fd);
+ if (!client->tpu_file) {
+ ret = -EINVAL;
+ goto out_unlock_client_semaphore;
+ }
+
+ if (gxp_is_direct_mode(gxp)) {
+ ret = map_tpu_mbx_queue(client, &ibuf);
+ if (ret)
+ goto err_fput_tpu_file;
+ }
+
+ if (gxp->after_map_tpu_mbx_queue) {
+ ret = gxp->after_map_tpu_mbx_queue(gxp, client);
+ if (ret)
+ goto err_unmap_tpu_mbx_queue;
+ }
+
+ goto out_unlock_client_semaphore;
+
+err_unmap_tpu_mbx_queue:
+ if (gxp_is_direct_mode(gxp))
+ unmap_tpu_mbx_queue(client, &ibuf);
+err_fput_tpu_file:
+ fput(client->tpu_file);
+ client->tpu_file = NULL;
+out_unlock_client_semaphore:
+ up_write(&client->semaphore);
+
+ return ret;
+}
+
+static int gxp_unmap_tpu_mbx_queue(struct gxp_client *client,
+ struct gxp_tpu_mbx_queue_ioctl __user *argp)
+{
+ struct gxp_dev *gxp = client->gxp;
+ struct gxp_tpu_mbx_queue_ioctl ibuf;
+ int ret = 0;
+
+ if (copy_from_user(&ibuf, argp, sizeof(ibuf)))
+ return -EFAULT;
+
+ down_write(&client->semaphore);
+
+ if (!client->vd) {
+ dev_err(gxp->dev,
+ "GXP_UNMAP_TPU_MBX_QUEUE requires the client allocate a VIRTUAL_DEVICE\n");
+ ret = -ENODEV;
+ goto out;
+ }
+
+ if (!client->tpu_file) {
+ dev_err(gxp->dev, "No mappings exist for TPU mailboxes");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (gxp->before_unmap_tpu_mbx_queue)
+ gxp->before_unmap_tpu_mbx_queue(gxp, client);
+
+ if (gxp_is_direct_mode(gxp))
+ unmap_tpu_mbx_queue(client, &ibuf);
+
+ fput(client->tpu_file);
+ client->tpu_file = NULL;
+
+out:
+ up_write(&client->semaphore);
+
+ return ret;
+}
+
+#else /* HAS_TPU_EXT */
+
+#define gxp_map_tpu_mbx_queue(...) (-ENODEV)
+#define gxp_unmap_tpu_mbx_queue(...) (-ENODEV)
+
+#endif /* HAS_TPU_EXT */
+
+static int gxp_register_core_telemetry_eventfd(
+ struct gxp_client *client,
+ struct gxp_register_telemetry_eventfd_ioctl __user *argp)
+{
+ struct gxp_dev *gxp = client->gxp;
+ struct gxp_register_telemetry_eventfd_ioctl ibuf;
+
+ if (copy_from_user(&ibuf, argp, sizeof(ibuf)))
+ return -EFAULT;
+
+ return gxp_core_telemetry_register_eventfd(gxp, ibuf.type,
+ ibuf.eventfd);
+}
+
+static int gxp_unregister_core_telemetry_eventfd(
+ struct gxp_client *client,
+ struct gxp_register_telemetry_eventfd_ioctl __user *argp)
+{
+ struct gxp_dev *gxp = client->gxp;
+ struct gxp_register_telemetry_eventfd_ioctl ibuf;
+
+ if (copy_from_user(&ibuf, argp, sizeof(ibuf)))
+ return -EFAULT;
+
+ return gxp_core_telemetry_unregister_eventfd(gxp, ibuf.type);
+}
+
+static int gxp_read_global_counter(struct gxp_client *client,
+ __u64 __user *argp)
+{
+ struct gxp_dev *gxp = client->gxp;
+ u32 high_first, high_second, low;
+ u64 counter_val;
+ int ret = 0;
+
+ /* Caller must hold BLOCK wakelock */
+ down_read(&client->semaphore);
+
+ if (!client->has_block_wakelock) {
+ dev_err(gxp->dev,
+ "GXP_READ_GLOBAL_COUNTER requires the client hold a BLOCK wakelock\n");
+ ret = -ENODEV;
+ goto out;
+ }
+
+ high_first = gxp_read_32(gxp, GXP_REG_GLOBAL_COUNTER_HIGH);
+ low = gxp_read_32(gxp, GXP_REG_GLOBAL_COUNTER_LOW);
+
+ /*
+ * Check if the lower 32 bits could have wrapped in-between reading
+ * the high and low bit registers by validating the higher 32 bits
+ * haven't changed.
+ */
+ high_second = gxp_read_32(gxp, GXP_REG_GLOBAL_COUNTER_HIGH);
+ if (high_first != high_second)
+ low = gxp_read_32(gxp, GXP_REG_GLOBAL_COUNTER_LOW);
+
+ counter_val = ((u64)high_second << 32) | low;
+
+ if (copy_to_user(argp, &counter_val, sizeof(counter_val)))
+ ret = -EFAULT;
+
+out:
+ up_read(&client->semaphore);
+
+ return ret;
+}
+
+static int gxp_acquire_wake_lock(struct gxp_client *client,
+ struct gxp_acquire_wakelock_ioctl __user *argp)
+{
+ struct gxp_dev *gxp = client->gxp;
+ struct gxp_acquire_wakelock_ioctl ibuf;
+ bool acquired_block_wakelock = false;
+ bool requested_low_clkmux = false;
+ struct gxp_power_states power_states;
+ int ret = 0;
+
+ if (copy_from_user(&ibuf, argp, sizeof(ibuf)))
+ return -EFAULT;
+
+ if (ibuf.gxp_power_state == GXP_POWER_STATE_OFF) {
+ dev_err(gxp->dev,
+ "GXP_POWER_STATE_OFF is not a valid value when acquiring a wakelock\n");
+ return -EINVAL;
+ }
+ if (ibuf.gxp_power_state < GXP_POWER_STATE_OFF ||
+ ibuf.gxp_power_state >= GXP_NUM_POWER_STATES) {
+ dev_err(gxp->dev, "Requested power state is invalid\n");
+ return -EINVAL;
+ }
+ if ((ibuf.memory_power_state < MEMORY_POWER_STATE_MIN ||
+ ibuf.memory_power_state > MEMORY_POWER_STATE_MAX) &&
+ ibuf.memory_power_state != MEMORY_POWER_STATE_UNDEFINED) {
+ dev_err(gxp->dev,
+ "Requested memory power state %d is invalid\n",
+ ibuf.memory_power_state);
+ return -EINVAL;
+ }
+
+ if (ibuf.gxp_power_state == GXP_POWER_STATE_READY) {
+ dev_warn_once(
+ gxp->dev,
+ "GXP_POWER_STATE_READY is deprecated, please set GXP_POWER_LOW_FREQ_CLKMUX with GXP_POWER_STATE_UUD state");
+ ibuf.gxp_power_state = GXP_POWER_STATE_UUD;
+ }
+
+ if(ibuf.flags & GXP_POWER_NON_AGGRESSOR)
+ dev_warn_once(
+ gxp->dev,
+ "GXP_POWER_NON_AGGRESSOR is deprecated, no operation here");
+
+ down_write(&client->semaphore);
+ if ((ibuf.components_to_wake & WAKELOCK_VIRTUAL_DEVICE) &&
+ (!client->vd)) {
+ dev_err(gxp->dev,
+ "Must allocate a virtual device to acquire VIRTUAL_DEVICE wakelock\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ requested_low_clkmux = (ibuf.flags & GXP_POWER_LOW_FREQ_CLKMUX) != 0;
+
+ /* Acquire a BLOCK wakelock if requested */
+ if (ibuf.components_to_wake & WAKELOCK_BLOCK) {
+ ret = gxp_client_acquire_block_wakelock(
+ client, &acquired_block_wakelock);
+ if (ret) {
+ dev_err(gxp->dev,
+ "Failed to acquire BLOCK wakelock for client (ret=%d)\n",
+ ret);
+ goto out;
+ }
+ }
+
+ /* Acquire a VIRTUAL_DEVICE wakelock if requested */
+ if (ibuf.components_to_wake & WAKELOCK_VIRTUAL_DEVICE) {
+ power_states.power = aur_state_array[ibuf.gxp_power_state];
+ power_states.memory = aur_memory_state_array[ibuf.memory_power_state];
+ power_states.low_clkmux = requested_low_clkmux;
+ ret = gxp_client_acquire_vd_wakelock(client, power_states);
+ if (ret) {
+ dev_err(gxp->dev,
+ "Failed to acquire VIRTUAL_DEVICE wakelock for client (ret=%d)\n",
+ ret);
+ goto err_acquiring_vd_wl;
+ }
+ }
+
+out:
+ up_write(&client->semaphore);
+
+ return ret;
+
+err_acquiring_vd_wl:
+ /*
+ * In a single call, if any wakelock acquisition fails, all of them do.
+ * If the client was acquiring both wakelocks and failed to acquire the
+ * VIRTUAL_DEVICE wakelock after successfully acquiring the BLOCK
+ * wakelock, then release it before returning the error code.
+ */
+ if (acquired_block_wakelock)
+ gxp_client_release_block_wakelock(client);
+
+ up_write(&client->semaphore);
+
+ return ret;
+}
+
+static int gxp_release_wake_lock(struct gxp_client *client, __u32 __user *argp)
+{
+ u32 wakelock_components;
+ int ret = 0;
+
+ if (copy_from_user(&wakelock_components, argp,
+ sizeof(wakelock_components)))
+ return -EFAULT;
+
+ down_write(&client->semaphore);
+
+ if (wakelock_components & WAKELOCK_VIRTUAL_DEVICE)
+ gxp_client_release_vd_wakelock(client);
+
+ if (wakelock_components & WAKELOCK_BLOCK)
+ gxp_client_release_block_wakelock(client);
+
+ up_write(&client->semaphore);
+
+ return ret;
+}
+
+static int gxp_map_dmabuf(struct gxp_client *client,
+ struct gxp_map_dmabuf_ioctl __user *argp)
+{
+ struct gxp_dev *gxp = client->gxp;
+ struct gxp_map_dmabuf_ioctl ibuf;
+ struct gxp_mapping *mapping;
+ int ret = 0;
+
+ if (copy_from_user(&ibuf, argp, sizeof(ibuf)))
+ return -EFAULT;
+
+ down_read(&client->semaphore);
+
+ if (!gxp_client_has_available_vd(client, "GXP_MAP_DMABUF")) {
+ ret = -ENODEV;
+ goto out_unlock;
+ }
+
+ mapping = gxp_dmabuf_map(gxp, client->vd->domain, ibuf.dmabuf_fd,
+ /*gxp_dma_flags=*/0,
+ mapping_flags_to_dma_dir(ibuf.flags));
+ if (IS_ERR(mapping)) {
+ ret = PTR_ERR(mapping);
+ dev_err(gxp->dev, "Failed to map dma-buf (ret=%d)\n", ret);
+ goto out_unlock;
+ }
+
+ ret = gxp_vd_mapping_store(client->vd, mapping);
+ if (ret) {
+ dev_err(gxp->dev,
+ "Failed to store mapping for dma-buf (ret=%d)\n", ret);
+ goto out_put;
+ }
+
+ ibuf.device_address = mapping->device_address;
+
+ if (copy_to_user(argp, &ibuf, sizeof(ibuf))) {
+ /* If the IOCTL fails, the dma-buf must be unmapped */
+ gxp_vd_mapping_remove(client->vd, mapping);
+ ret = -EFAULT;
+ }
+
+ gxp_mapping_iova_log(client, mapping,
+ GXP_IOVA_LOG_MAP | GXP_IOVA_LOG_DMABUF);
+
+out_put:
+ /*
+ * Release the reference from creating the dmabuf mapping
+ * If the mapping was not successfully stored in the owning virtual
+ * device, this will unmap and cleanup the dmabuf.
+ */
+ gxp_mapping_put(mapping);
+
+out_unlock:
+ up_read(&client->semaphore);
+
+ return ret;
+}
+
+static int gxp_unmap_dmabuf(struct gxp_client *client,
+ struct gxp_map_dmabuf_ioctl __user *argp)
+{
+ struct gxp_dev *gxp = client->gxp;
+ struct gxp_map_dmabuf_ioctl ibuf;
+ struct gxp_mapping *mapping;
+ int ret = 0;
+
+ if (copy_from_user(&ibuf, argp, sizeof(ibuf)))
+ return -EFAULT;
+
+ down_read(&client->semaphore);
+
+ if (!client->vd) {
+ dev_err(gxp->dev,
+ "GXP_UNMAP_DMABUF requires the client allocate a VIRTUAL_DEVICE\n");
+ ret = -ENODEV;
+ goto out;
+ }
+
+ /*
+ * Fetch and remove the internal mapping records.
+ * If host_address is not 0, the provided device_address belongs to a
+ * non-dma-buf mapping.
+ */
+ mapping = gxp_vd_mapping_search(client->vd, ibuf.device_address);
+ if (IS_ERR_OR_NULL(mapping) || mapping->host_address) {
+ dev_warn(gxp->dev, "No dma-buf mapped for given IOVA\n");
+ /*
+ * If the device address belongs to a non-dma-buf mapping,
+ * release the reference to it obtained via the search.
+ */
+ if (!IS_ERR_OR_NULL(mapping))
+ gxp_mapping_put(mapping);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* Remove the mapping from its VD, releasing the VD's reference */
+ gxp_vd_mapping_remove(client->vd, mapping);
+
+ gxp_mapping_iova_log(client, mapping,
+ GXP_IOVA_LOG_UNMAP | GXP_IOVA_LOG_DMABUF);
+
+ /* Release the reference from gxp_vd_mapping_search() */
+ gxp_mapping_put(mapping);
+
+out:
+ up_read(&client->semaphore);
+
+ return ret;
+}
+
+static int gxp_register_mailbox_eventfd(
+ struct gxp_client *client,
+ struct gxp_register_mailbox_eventfd_ioctl __user *argp)
+{
+ struct gxp_register_mailbox_eventfd_ioctl ibuf;
+ struct gxp_eventfd *eventfd;
+ int ret = 0;
+
+ if (copy_from_user(&ibuf, argp, sizeof(ibuf)))
+ return -EFAULT;
+
+ down_write(&client->semaphore);
+
+ if (!gxp_client_has_available_vd(client, "GXP_REGISTER_MAILBOX_EVENTFD")) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ if (ibuf.virtual_core_id >= client->vd->num_cores) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* Make sure the provided eventfd is valid */
+ eventfd = gxp_eventfd_create(ibuf.eventfd);
+ if (IS_ERR(eventfd)) {
+ ret = PTR_ERR(eventfd);
+ goto out;
+ }
+
+ /* Set the new eventfd, replacing any existing one */
+ if (client->mb_eventfds[ibuf.virtual_core_id])
+ gxp_eventfd_put(client->mb_eventfds[ibuf.virtual_core_id]);
+
+ client->mb_eventfds[ibuf.virtual_core_id] = eventfd;
+
+out:
+ up_write(&client->semaphore);
+
+ return ret;
+}
+
+static int gxp_unregister_mailbox_eventfd(
+ struct gxp_client *client,
+ struct gxp_register_mailbox_eventfd_ioctl __user *argp)
+{
+ struct gxp_register_mailbox_eventfd_ioctl ibuf;
+ int ret = 0;
+
+ if (copy_from_user(&ibuf, argp, sizeof(ibuf)))
+ return -EFAULT;
+
+ down_write(&client->semaphore);
+
+ if (!client->vd) {
+ dev_err(client->gxp->dev,
+ "GXP_UNREGISTER_MAILBOX_EVENTFD requires the client allocate a VIRTUAL_DEVICE\n");
+ ret = -ENODEV;
+ goto out;
+ }
+
+ if (ibuf.virtual_core_id >= client->vd->num_cores) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (client->mb_eventfds[ibuf.virtual_core_id])
+ gxp_eventfd_put(client->mb_eventfds[ibuf.virtual_core_id]);
+
+ client->mb_eventfds[ibuf.virtual_core_id] = NULL;
+
+out:
+ up_write(&client->semaphore);
+
+ return ret;
+}
+
+static int
+gxp_get_interface_version(struct gxp_client *client,
+ struct gxp_interface_version_ioctl __user *argp)
+{
+ struct gxp_interface_version_ioctl ibuf;
+ int ret;
+
+ ibuf.version_major = GXP_INTERFACE_VERSION_MAJOR;
+ ibuf.version_minor = GXP_INTERFACE_VERSION_MINOR;
+ memset(ibuf.version_build, 0, GXP_INTERFACE_VERSION_BUILD_BUFFER_SIZE);
+ ret = snprintf(ibuf.version_build,
+ GXP_INTERFACE_VERSION_BUILD_BUFFER_SIZE - 1,
+ GIT_REPO_TAG);
+
+ if (ret < 0 || ret >= GXP_INTERFACE_VERSION_BUILD_BUFFER_SIZE) {
+ dev_warn(
+ client->gxp->dev,
+ "Buffer size insufficient to hold GIT_REPO_TAG (size=%d)\n",
+ ret);
+ }
+
+ if (copy_to_user(argp, &ibuf, sizeof(ibuf)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int gxp_trigger_debug_dump(struct gxp_client *client,
+ __u32 __user *argp)
+{
+ struct gxp_dev *gxp = client->gxp;
+ int phys_core, i;
+ u32 core_bits;
+ int ret = 0;
+
+ if (!uid_eq(current_euid(), GLOBAL_ROOT_UID))
+ return -EPERM;
+
+ if (!gxp_debug_dump_is_enabled()) {
+ dev_err(gxp->dev, "Debug dump functionality is disabled\n");
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&core_bits, argp, sizeof(core_bits)))
+ return -EFAULT;
+
+ /* Caller must hold VIRTUAL_DEVICE wakelock */
+ down_read(&client->semaphore);
+
+ if (!check_client_has_available_vd_wakelock(client,
+ "GXP_TRIGGER_DEBUG_DUMP")) {
+ ret = -ENODEV;
+ goto out_unlock_client_semaphore;
+ }
+
+ down_read(&gxp->vd_semaphore);
+
+ for (i = 0; i < GXP_NUM_CORES; i++) {
+ if (!(core_bits & BIT(i)))
+ continue;
+ phys_core = gxp_vd_virt_core_to_phys_core(client->vd, i);
+ if (phys_core < 0) {
+ dev_err(gxp->dev,
+ "Trigger debug dump failed: Invalid virtual core id (%u)\n",
+ i);
+ ret = -EINVAL;
+ continue;
+ }
+
+ if (gxp_is_fw_running(gxp, phys_core)) {
+ gxp_notification_send(gxp, phys_core,
+ CORE_NOTIF_GENERATE_DEBUG_DUMP);
+ }
+ }
+
+ up_read(&gxp->vd_semaphore);
+out_unlock_client_semaphore:
+ up_read(&client->semaphore);
+
+ return ret;
+}
+
+static int
+gxp_create_sync_fence(struct gxp_client *client,
+ struct gxp_create_sync_fence_data __user *datap)
+{
+ struct gxp_dev *gxp = client->gxp;
+ struct gxp_create_sync_fence_data data;
+ int ret;
+
+ if (copy_from_user(&data, (void __user *)datap, sizeof(data)))
+ return -EFAULT;
+ down_read(&client->semaphore);
+ if (client->vd) {
+ ret = gxp_dma_fence_create(gxp, client->vd, &data);
+ } else {
+ dev_warn(gxp->dev, "client creating sync fence has no VD");
+ ret = -EINVAL;
+ }
+ up_read(&client->semaphore);
+ if (ret)
+ return ret;
+
+ if (copy_to_user((void __user *)datap, &data, sizeof(data)))
+ ret = -EFAULT;
+ return ret;
+}
+
+static int
+gxp_signal_sync_fence(struct gxp_signal_sync_fence_data __user *datap)
+{
+ struct gxp_signal_sync_fence_data data;
+
+ if (copy_from_user(&data, (void __user *)datap, sizeof(data)))
+ return -EFAULT;
+ return gcip_dma_fence_signal(data.fence, data.error, false);
+}
+
+static int gxp_sync_fence_status(struct gxp_sync_fence_status __user *datap)
+{
+ struct gxp_sync_fence_status data;
+ int ret;
+
+ if (copy_from_user(&data, (void __user *)datap, sizeof(data)))
+ return -EFAULT;
+ ret = gcip_dma_fence_status(data.fence, &data.status);
+ if (ret)
+ return ret;
+ if (copy_to_user((void __user *)datap, &data, sizeof(data)))
+ ret = -EFAULT;
+ return ret;
+}
+
+static int gxp_register_invalidated_eventfd(
+ struct gxp_client *client,
+ struct gxp_register_invalidated_eventfd_ioctl __user *argp)
+{
+ struct gxp_register_invalidated_eventfd_ioctl ibuf;
+ struct gxp_eventfd *eventfd;
+ int ret = 0;
+
+ if (copy_from_user(&ibuf, argp, sizeof(ibuf)))
+ return -EFAULT;
+
+ down_write(&client->semaphore);
+
+ if (!gxp_client_has_available_vd(client,
+ "GXP_REGISTER_INVALIDATED_EVENTFD")) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ eventfd = gxp_eventfd_create(ibuf.eventfd);
+ if (IS_ERR(eventfd)) {
+ ret = PTR_ERR(eventfd);
+ goto out;
+ }
+
+ if (client->vd->invalidate_eventfd)
+ gxp_eventfd_put(client->vd->invalidate_eventfd);
+ client->vd->invalidate_eventfd = eventfd;
+out:
+ up_write(&client->semaphore);
+ return ret;
+}
+
+static int gxp_unregister_invalidated_eventfd(
+ struct gxp_client *client,
+ struct gxp_register_invalidated_eventfd_ioctl __user *argp)
+{
+ struct gxp_dev *gxp = client->gxp;
+ int ret = 0;
+
+ down_write(&client->semaphore);
+
+ if (!client->vd) {
+ dev_err(gxp->dev,
+ "GXP_UNREGISTER_INVALIDATED_EVENTFD requires the client allocate a VIRTUAL_DEVICE\n");
+ ret = -ENODEV;
+ goto out;
+ }
+
+ if (client->vd->invalidate_eventfd)
+ gxp_eventfd_put(client->vd->invalidate_eventfd);
+ client->vd->invalidate_eventfd = NULL;
+out:
+ up_write(&client->semaphore);
+ return ret;
+}
+
+static long gxp_ioctl(struct file *file, uint cmd, ulong arg)
+{
+ struct gxp_client *client = file->private_data;
+ void __user *argp = (void __user *)arg;
+ long ret;
+
+ if (client->gxp->handle_ioctl) {
+ ret = client->gxp->handle_ioctl(file, cmd, arg);
+ if (ret != -ENOTTY)
+ return ret;
+ }
+
+ switch (cmd) {
+ case GXP_MAP_BUFFER:
+ ret = gxp_map_buffer(client, argp);
+ break;
+ case GXP_UNMAP_BUFFER:
+ ret = gxp_unmap_buffer(client, argp);
+ break;
+ case GXP_SYNC_BUFFER:
+ ret = gxp_sync_buffer(client, argp);
+ break;
+ case GXP_MAILBOX_RESPONSE:
+ ret = gxp_mailbox_response(client, argp);
+ break;
+ case GXP_GET_SPECS:
+ ret = gxp_get_specs(client, argp);
+ break;
+ case GXP_ALLOCATE_VIRTUAL_DEVICE:
+ ret = gxp_allocate_vd(client, argp);
+ break;
+ case GXP_ETM_TRACE_START_COMMAND:
+ ret = gxp_etm_trace_start_command(client, argp);
+ break;
+ case GXP_ETM_TRACE_SW_STOP_COMMAND:
+ ret = gxp_etm_trace_sw_stop_command(client, argp);
+ break;
+ case GXP_ETM_TRACE_CLEANUP_COMMAND:
+ ret = gxp_etm_trace_cleanup_command(client, argp);
+ break;
+ case GXP_ETM_GET_TRACE_INFO_COMMAND:
+ ret = gxp_etm_get_trace_info_command(client, argp);
+ break;
+ case GXP_ENABLE_CORE_TELEMETRY:
+ ret = gxp_enable_core_telemetry(client, argp);
+ break;
+ case GXP_DISABLE_CORE_TELEMETRY:
+ ret = gxp_disable_core_telemetry(client, argp);
+ break;
+ case GXP_MAP_TPU_MBX_QUEUE:
+ ret = gxp_map_tpu_mbx_queue(client, argp);
+ break;
+ case GXP_UNMAP_TPU_MBX_QUEUE:
+ ret = gxp_unmap_tpu_mbx_queue(client, argp);
+ break;
+ case GXP_REGISTER_CORE_TELEMETRY_EVENTFD:
+ ret = gxp_register_core_telemetry_eventfd(client, argp);
+ break;
+ case GXP_UNREGISTER_CORE_TELEMETRY_EVENTFD:
+ ret = gxp_unregister_core_telemetry_eventfd(client, argp);
+ break;
+ case GXP_READ_GLOBAL_COUNTER:
+ ret = gxp_read_global_counter(client, argp);
+ break;
+ case GXP_RELEASE_WAKE_LOCK:
+ ret = gxp_release_wake_lock(client, argp);
+ break;
+ case GXP_MAP_DMABUF:
+ ret = gxp_map_dmabuf(client, argp);
+ break;
+ case GXP_UNMAP_DMABUF:
+ ret = gxp_unmap_dmabuf(client, argp);
+ break;
+ case GXP_MAILBOX_COMMAND:
+ ret = gxp_mailbox_command(client, argp);
+ break;
+ case GXP_REGISTER_MAILBOX_EVENTFD:
+ ret = gxp_register_mailbox_eventfd(client, argp);
+ break;
+ case GXP_UNREGISTER_MAILBOX_EVENTFD:
+ ret = gxp_unregister_mailbox_eventfd(client, argp);
+ break;
+ case GXP_ACQUIRE_WAKE_LOCK:
+ ret = gxp_acquire_wake_lock(client, argp);
+ break;
+ case GXP_GET_INTERFACE_VERSION:
+ ret = gxp_get_interface_version(client, argp);
+ break;
+ case GXP_TRIGGER_DEBUG_DUMP:
+ ret = gxp_trigger_debug_dump(client, argp);
+ break;
+ case GXP_CREATE_SYNC_FENCE:
+ ret = gxp_create_sync_fence(client, argp);
+ break;
+ case GXP_SIGNAL_SYNC_FENCE:
+ ret = gxp_signal_sync_fence(argp);
+ break;
+ case GXP_SYNC_FENCE_STATUS:
+ ret = gxp_sync_fence_status(argp);
+ break;
+ case GXP_REGISTER_INVALIDATED_EVENTFD:
+ ret = gxp_register_invalidated_eventfd(client, argp);
+ break;
+ case GXP_UNREGISTER_INVALIDATED_EVENTFD:
+ ret = gxp_unregister_invalidated_eventfd(client, argp);
+ break;
+ default:
+ ret = -ENOTTY; /* unknown command */
+ }
+
+ return ret;
+}
+
+static int gxp_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct gxp_client *client = file->private_data;
+ int ret;
+
+ if (!client)
+ return -ENODEV;
+
+ if (client->gxp->handle_mmap) {
+ ret = client->gxp->handle_mmap(file, vma);
+ if (ret != -EOPNOTSUPP)
+ return ret;
+ }
+
+ switch (vma->vm_pgoff << PAGE_SHIFT) {
+ case GXP_MMAP_CORE_LOG_BUFFER_OFFSET:
+ return gxp_core_telemetry_mmap_buffers(
+ client->gxp, GXP_TELEMETRY_TYPE_LOGGING, vma);
+ case GXP_MMAP_CORE_TRACE_BUFFER_OFFSET:
+ return gxp_core_telemetry_mmap_buffers(
+ client->gxp, GXP_TELEMETRY_TYPE_TRACING, vma);
+ case GXP_MMAP_CORE_LOG_BUFFER_OFFSET_LEGACY:
+ return gxp_core_telemetry_mmap_buffers_legacy(
+ client->gxp, GXP_TELEMETRY_TYPE_LOGGING, vma);
+ case GXP_MMAP_CORE_TRACE_BUFFER_OFFSET_LEGACY:
+ return gxp_core_telemetry_mmap_buffers_legacy(
+ client->gxp, GXP_TELEMETRY_TYPE_TRACING, vma);
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct file_operations gxp_fops = {
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .mmap = gxp_mmap,
+ .open = gxp_open,
+ .release = gxp_release,
+ .unlocked_ioctl = gxp_ioctl,
+};
+
+static int gxp_set_reg_resources(struct platform_device *pdev, struct gxp_dev *gxp)
+{
+ struct device *dev = gxp->dev;
+ struct resource *r;
+ int i;
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (IS_ERR_OR_NULL(r)) {
+ dev_err(dev, "Failed to get memory resource\n");
+ return -ENODEV;
+ }
+
+ gxp->regs.paddr = r->start;
+ gxp->regs.size = resource_size(r);
+ gxp->regs.vaddr = devm_ioremap_resource(dev, r);
+ if (IS_ERR_OR_NULL(gxp->regs.vaddr)) {
+ dev_err(dev, "Failed to map registers\n");
+ return -ENODEV;
+ }
+
+ r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cmu");
+ if (!IS_ERR_OR_NULL(r)) {
+ gxp->cmu.paddr = r->start;
+ gxp->cmu.size = resource_size(r);
+ gxp->cmu.vaddr = devm_ioremap_resource(dev, r);
+ if (IS_ERR_OR_NULL(gxp->cmu.vaddr))
+ dev_warn(dev, "Failed to map CMU registers\n");
+ }
+ /*
+ * TODO (b/224685748): Remove this block after CMU CSR is supported
+ * in device tree config.
+ */
+#ifdef GXP_CMU_OFFSET
+ if (IS_ERR_OR_NULL(r) || IS_ERR_OR_NULL(gxp->cmu.vaddr)) {
+ gxp->cmu.paddr = gxp->regs.paddr - GXP_CMU_OFFSET;
+ gxp->cmu.size = GXP_CMU_SIZE;
+ gxp->cmu.vaddr =
+ devm_ioremap(dev, gxp->cmu.paddr, gxp->cmu.size);
+ if (IS_ERR_OR_NULL(gxp->cmu.vaddr))
+ dev_warn(dev, "Failed to map CMU registers\n");
+ }
+#endif
+
+#ifdef GXP_SEPARATE_LPM_OFFSET
+ r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lpm");
+ if (IS_ERR_OR_NULL(r)) {
+ dev_err(dev, "Failed to get LPM resource\n");
+ return -ENODEV;
+ }
+ gxp->lpm_regs.paddr = r->start;
+ gxp->lpm_regs.size = resource_size(r);
+ gxp->lpm_regs.vaddr = devm_ioremap_resource(dev, r);
+ if (IS_ERR_OR_NULL(gxp->lpm_regs.vaddr)) {
+ dev_err(dev, "Failed to map LPM registers\n");
+ return -ENODEV;
+ }
+#else
+ gxp->lpm_regs.vaddr = gxp->regs.vaddr;
+ gxp->lpm_regs.size = gxp->regs.size;
+ gxp->lpm_regs.paddr = gxp->regs.paddr;
+#endif
+
+ for (i = 0; i < GXP_NUM_MAILBOXES; i++) {
+ r = platform_get_resource(pdev, IORESOURCE_MEM, i + 1);
+ if (IS_ERR_OR_NULL(r)) {
+ dev_err(dev, "Failed to get mailbox%d resource", i);
+ return -ENODEV;
+ }
+
+ gxp->mbx[i].paddr = r->start;
+ gxp->mbx[i].size = resource_size(r);
+ gxp->mbx[i].vaddr = devm_ioremap_resource(dev, r);
+ if (IS_ERR_OR_NULL(gxp->mbx[i].vaddr)) {
+ dev_err(dev, "Failed to map mailbox%d's register", i);
+ return -ENODEV;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Get TPU device from the device tree. Warnings are shown when any expected
+ * device tree entry is missing.
+ */
+static void gxp_get_tpu_dev(struct gxp_dev *gxp)
+{
+ struct device *dev = gxp->dev;
+ struct platform_device *tpu_pdev;
+ struct device_node *np;
+ phys_addr_t offset, base_addr;
+ int ret;
+
+ /* Get TPU device from device tree */
+ np = of_parse_phandle(dev->of_node, "tpu-device", 0);
+ if (IS_ERR_OR_NULL(np)) {
+ dev_warn(dev, "No tpu-device in device tree\n");
+ goto out_not_found;
+ }
+ tpu_pdev = of_find_device_by_node(np);
+ if (!tpu_pdev) {
+ dev_err(dev, "TPU device not found\n");
+ goto out_not_found;
+ }
+ /* get tpu mailbox register base */
+ ret = of_property_read_u64_index(np, "reg", 0, &base_addr);
+ of_node_put(np);
+ if (ret) {
+ dev_warn(dev, "Unable to get tpu-device base address\n");
+ goto out_not_found;
+ }
+ /* get gxp-tpu mailbox register offset */
+ ret = of_property_read_u64(dev->of_node, "gxp-tpu-mbx-offset", &offset);
+ if (ret) {
+ dev_warn(dev, "Unable to get tpu-device mailbox offset\n");
+ goto out_not_found;
+ }
+ gxp->tpu_dev.dev = get_device(&tpu_pdev->dev);
+ gxp->tpu_dev.mbx_paddr = base_addr + offset;
+ return;
+
+out_not_found:
+ dev_warn(dev, "TPU will not be available for interop\n");
+ gxp->tpu_dev.dev = NULL;
+ gxp->tpu_dev.mbx_paddr = 0;
+}
+
+static void gxp_put_tpu_dev(struct gxp_dev *gxp)
+{
+ /* put_device is no-op on !dev */
+ put_device(gxp->tpu_dev.dev);
+}
+
+/* Get GSA device from device tree. */
+static void gxp_get_gsa_dev(struct gxp_dev *gxp)
+{
+ struct device *dev = gxp->dev;
+ struct device_node *np;
+ struct platform_device *gsa_pdev;
+
+ gxp->gsa_dev = NULL;
+ np = of_parse_phandle(dev->of_node, "gsa-device", 0);
+ if (!np) {
+ dev_warn(
+ dev,
+ "No gsa-device in device tree. Firmware authentication not available\n");
+ return;
+ }
+ gsa_pdev = of_find_device_by_node(np);
+ if (!gsa_pdev) {
+ dev_err(dev, "GSA device not found\n");
+ of_node_put(np);
+ return;
+ }
+ gxp->gsa_dev = get_device(&gsa_pdev->dev);
+ of_node_put(np);
+ dev_info(dev, "GSA device found, Firmware authentication available\n");
+}
+
+static void gxp_put_gsa_dev(struct gxp_dev *gxp)
+{
+ put_device(gxp->gsa_dev);
+}
+
+static int gxp_common_platform_probe(struct platform_device *pdev, struct gxp_dev *gxp)
+{
+ struct device *dev = &pdev->dev;
+ int ret;
+ u64 prop;
+
+ dev_notice(dev, "Probing gxp driver with commit %s\n", GIT_REPO_TAG);
+
+ platform_set_drvdata(pdev, gxp);
+ gxp->dev = dev;
+ if (gxp->parse_dt) {
+ ret = gxp->parse_dt(pdev, gxp);
+ if (ret)
+ return ret;
+ }
+
+ ret = gxp_set_reg_resources(pdev, gxp);
+ if (ret)
+ return ret;
+
+ gxp_create_debugdir(gxp);
+
+ ret = gxp_pm_init(gxp);
+ if (ret) {
+ dev_err(dev, "Failed to init power management (ret=%d)\n", ret);
+ goto err_remove_debugdir;
+ }
+
+ gxp_get_gsa_dev(gxp);
+ gxp_get_tpu_dev(gxp);
+
+ ret = gxp_dma_init(gxp);
+ if (ret) {
+ dev_err(dev, "Failed to initialize GXP DMA interface\n");
+ goto err_put_tpu_dev;
+ }
+
+ gxp->mailbox_mgr = gxp_mailbox_create_manager(gxp, GXP_NUM_MAILBOXES);
+ if (IS_ERR(gxp->mailbox_mgr)) {
+ ret = PTR_ERR(gxp->mailbox_mgr);
+ dev_err(dev, "Failed to create mailbox manager: %d\n", ret);
+ goto err_dma_exit;
+ }
+ if (gxp_is_direct_mode(gxp)) {
+#if GXP_USE_LEGACY_MAILBOX
+ gxp_mailbox_init(gxp->mailbox_mgr);
+#else
+ gxp_dci_init(gxp->mailbox_mgr);
+#endif
+ }
+
+#if IS_ENABLED(CONFIG_SUBSYSTEM_COREDUMP)
+ ret = gxp_debug_dump_init(gxp, &gxp_sscd_dev, &gxp_sscd_pdata);
+#else
+ ret = gxp_debug_dump_init(gxp, NULL, NULL);
+#endif // !CONFIG_SUBSYSTEM_COREDUMP
+ if (ret)
+ dev_warn(dev, "Failed to initialize debug dump\n");
+
+ mutex_init(&gxp->pin_user_pages_lock);
+ mutex_init(&gxp->secure_vd_lock);
+
+ gxp->domain_pool = kmalloc(sizeof(*gxp->domain_pool), GFP_KERNEL);
+ if (!gxp->domain_pool) {
+ ret = -ENOMEM;
+ goto err_debug_dump_exit;
+ }
+ if (gxp_is_direct_mode(gxp))
+ ret = gxp_domain_pool_init(gxp, gxp->domain_pool,
+ GXP_NUM_CORES);
+ else
+ ret = gxp_domain_pool_init(gxp, gxp->domain_pool,
+ GXP_NUM_SHARED_SLICES);
+ if (ret) {
+ dev_err(dev,
+ "Failed to initialize IOMMU domain pool (ret=%d)\n",
+ ret);
+ goto err_free_domain_pool;
+ }
+
+ ret = gxp_fw_init(gxp);
+ if (ret) {
+ dev_err(dev,
+ "Failed to initialize firmware manager (ret=%d)\n",
+ ret);
+ goto err_domain_pool_destroy;
+ }
+
+ ret = gxp_firmware_loader_init(gxp);
+ if (ret) {
+ dev_err(dev, "Failed to initialize firmware loader (ret=%d)\n",
+ ret);
+ goto err_fw_destroy;
+ }
+ gxp_dma_init_default_resources(gxp);
+ gxp_vd_init(gxp);
+
+ ret = of_property_read_u64(dev->of_node, "gxp-memory-per-core",
+ &prop);
+ if (ret) {
+ dev_err(dev, "Unable to get memory-per-core from device tree\n");
+ gxp->memory_per_core = 0;
+ } else {
+ gxp->memory_per_core = (u32)prop;
+ }
+
+ ret = gxp_fw_data_init(gxp);
+ if (ret) {
+ dev_err(dev, "Failed to initialize firmware data: %d\n", ret);
+ goto err_vd_destroy;
+ }
+
+ ret = gxp_core_telemetry_init(gxp);
+ if (ret) {
+ dev_err(dev, "Failed to initialize core telemetry (ret=%d)", ret);
+ goto err_fw_data_destroy;
+ }
+
+ ret = gxp_thermal_init(gxp);
+ if (ret)
+ dev_warn(dev, "Failed to init thermal driver: %d\n", ret);
+
+ gxp->gfence_mgr = gcip_dma_fence_manager_create(gxp->dev);
+ if (IS_ERR(gxp->gfence_mgr)) {
+ ret = PTR_ERR(gxp->gfence_mgr);
+ dev_err(dev, "Failed to init DMA fence manager: %d\n", ret);
+ goto err_thermal_destroy;
+ }
+
+ INIT_LIST_HEAD(&gxp->client_list);
+ mutex_init(&gxp->client_list_lock);
+ if (gxp->after_probe) {
+ ret = gxp->after_probe(gxp);
+ if (ret)
+ goto err_dma_fence_destroy;
+ }
+ /*
+ * We only know where the system config region is after after_probe is
+ * done so this can't be called earlier.
+ */
+ gxp_fw_data_populate_system_config(gxp);
+
+ gxp->misc_dev.minor = MISC_DYNAMIC_MINOR;
+ gxp->misc_dev.name = GXP_NAME;
+ gxp->misc_dev.fops = &gxp_fops;
+ ret = misc_register(&gxp->misc_dev);
+ if (ret) {
+ dev_err(dev, "Failed to register misc device: %d", ret);
+ goto err_before_remove;
+ }
+
+ gxp_create_debugfs(gxp);
+ gxp_debug_pointer = gxp;
+
+ dev_info(dev, "Probe finished");
+ return 0;
+
+err_before_remove:
+ if (gxp->before_remove)
+ gxp->before_remove(gxp);
+err_dma_fence_destroy:
+ /* DMA fence manager creation doesn't need revert */
+err_thermal_destroy:
+ gxp_thermal_exit(gxp);
+ gxp_core_telemetry_exit(gxp);
+err_fw_data_destroy:
+ gxp_fw_data_destroy(gxp);
+err_vd_destroy:
+ gxp_vd_destroy(gxp);
+ gxp_firmware_loader_destroy(gxp);
+err_fw_destroy:
+ gxp_fw_destroy(gxp);
+err_domain_pool_destroy:
+ gxp_domain_pool_destroy(gxp->domain_pool);
+err_free_domain_pool:
+ kfree(gxp->domain_pool);
+err_debug_dump_exit:
+ gxp_debug_dump_exit(gxp);
+ /* mailbox manager init doesn't need revert */
+err_dma_exit:
+ gxp_dma_exit(gxp);
+err_put_tpu_dev:
+ gxp_put_tpu_dev(gxp);
+ gxp_put_gsa_dev(gxp);
+ gxp_pm_destroy(gxp);
+err_remove_debugdir:
+ gxp_remove_debugdir(gxp);
+ return ret;
+}
+
+static int gxp_common_platform_remove(struct platform_device *pdev)
+{
+ struct gxp_dev *gxp = platform_get_drvdata(pdev);
+
+ /*
+ * Call gxp_thermal_exit before gxp_remove_debugdir since it will
+ * remove its own debugfs.
+ */
+ gxp_thermal_exit(gxp);
+ gxp_remove_debugdir(gxp);
+ misc_deregister(&gxp->misc_dev);
+ if (gxp->before_remove)
+ gxp->before_remove(gxp);
+ gxp_core_telemetry_exit(gxp);
+ gxp_fw_data_destroy(gxp);
+ gxp_vd_destroy(gxp);
+ gxp_firmware_loader_destroy(gxp);
+ gxp_fw_destroy(gxp);
+ gxp_domain_pool_destroy(gxp->domain_pool);
+ kfree(gxp->domain_pool);
+ gxp_debug_dump_exit(gxp);
+ gxp_dma_exit(gxp);
+ gxp_put_tpu_dev(gxp);
+ gxp_put_gsa_dev(gxp);
+ gxp_pm_destroy(gxp);
+
+ gxp_debug_pointer = NULL;
+
+ return 0;
+}
+
+#if IS_ENABLED(CONFIG_PM_SLEEP)
+
+static int gxp_platform_suspend(struct device *dev)
+{
+ struct gxp_dev *gxp = dev_get_drvdata(dev);
+ struct gxp_client *client;
+
+ if (!gcip_pm_is_powered(gxp->power_mgr->pm))
+ return 0;
+
+ /* Log clients currently holding a wakelock */
+ if (!mutex_trylock(&gxp->client_list_lock)) {
+ dev_warn_ratelimited(
+ gxp->dev,
+ "Unable to get client list lock on suspend failure\n");
+ return -EAGAIN;
+ }
+
+ list_for_each_entry(client, &gxp->client_list, list_entry) {
+ if (!down_read_trylock(&client->semaphore)) {
+ dev_warn_ratelimited(
+ gxp->dev,
+ "Unable to acquire client lock (tgid=%d pid=%d)\n",
+ client->tgid, client->pid);
+ continue;
+ }
+
+ if (client->has_block_wakelock)
+ dev_warn_ratelimited(
+ gxp->dev,
+ "Cannot suspend with client holding wakelock (tgid=%d pid=%d)\n",
+ client->tgid, client->pid);
+
+ up_read(&client->semaphore);
+ }
+
+ mutex_unlock(&gxp->client_list_lock);
+
+ return -EAGAIN;
+}
+
+static int gxp_platform_resume(struct device *dev)
+{
+ return 0;
+}
+
+static const struct dev_pm_ops gxp_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(gxp_platform_suspend, gxp_platform_resume)
+};
+
+#endif /* IS_ENABLED(CONFIG_PM_SLEEP) */
diff --git a/gxp-config.h b/gxp-config.h
index 154e767..ebc1c78 100644
--- a/gxp-config.h
+++ b/gxp-config.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Include all configuration files for GXP.
*
@@ -18,13 +18,6 @@
#endif /* unknown */
-#if IS_ENABLED(CONFIG_GXP_GEM5)
-#undef GXP_NUM_CORES
-#define GXP_NUM_CORES 1
-#endif
-
-#define GXP_NUM_PREALLOCATED_DOMAINS GXP_NUM_CORES
-
#if defined(CONFIG_GXP_ZEBU) || defined(CONFIG_GXP_IP_ZEBU)
#define GXP_TIME_DELAY_FACTOR 20
#else
@@ -35,13 +28,25 @@
#define SYNC_BARRIER_COUNT 16
-/* Core address space starts at Inst_BPM block */
-#define GXP_CORE_0_BASE GXP_REG_CORE_0_INST_BPM
-#define GXP_CORE_SIZE (GXP_REG_CORE_1_INST_BPM - GXP_REG_CORE_0_INST_BPM)
+#ifndef GXP_USE_LEGACY_MAILBOX
+#define GXP_USE_LEGACY_MAILBOX 0
+#endif
+
+#ifndef GXP_HAS_LAP
+#define GXP_HAS_LAP 1
+#endif
+
+#ifndef GXP_HAS_MCU
+#define GXP_HAS_MCU 1
+#endif
-/* LPM address space starts at lpm_version register */
-#define GXP_LPM_BASE GXP_REG_LPM_VERSION
-#define GXP_LPM_PSM_0_BASE GXP_REG_LPM_PSM_0
-#define GXP_LPM_PSM_SIZE (GXP_REG_LPM_PSM_1 - GXP_REG_LPM_PSM_0)
+/*
+ * Only supports interop with TPU when
+ * 1. Unit testing, or
+ * 2. Production on Android (to exclude vanilla Linux for bringup) but not GEM5.
+ */
+#define HAS_TPU_EXT \
+ ((IS_ENABLED(CONFIG_GXP_TEST) || IS_ENABLED(CONFIG_ANDROID)) && \
+ !IS_ENABLED(CONFIG_GXP_GEM5))
#endif /* __GXP_CONFIG_H__ */
diff --git a/gxp-core-telemetry.c b/gxp-core-telemetry.c
new file mode 100644
index 0000000..bfa9264
--- /dev/null
+++ b/gxp-core-telemetry.c
@@ -0,0 +1,936 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * GXP core telemetry support
+ *
+ * Copyright (C) 2021-2022 Google LLC
+ */
+
+#include <linux/moduleparam.h>
+#include <linux/slab.h>
+#include <linux/wait.h>
+
+#include "gxp-config.h"
+#include "gxp-core-telemetry.h"
+#include "gxp-dma.h"
+#include "gxp-firmware.h"
+#include "gxp-firmware-data.h"
+#include "gxp-host-device-structs.h"
+#include "gxp-notification.h"
+#include "gxp-vd.h"
+
+static uint gxp_core_telemetry_buffer_size = CORE_TELEMETRY_DEFAULT_BUFFER_SIZE;
+module_param_named(core_telemetry_buffer_size, gxp_core_telemetry_buffer_size, uint, 0660);
+
+static inline bool is_telemetry_enabled(struct gxp_dev *gxp, uint core, u8 type)
+{
+ u32 device_status =
+ gxp_fw_data_get_core_telemetry_device_status(gxp, core, type);
+
+ return device_status & GXP_CORE_TELEMETRY_DEVICE_STATUS_ENABLED;
+}
+
+void gxp_core_telemetry_status_notify(struct gxp_dev *gxp, uint core)
+{
+ struct gxp_core_telemetry_manager *mgr = gxp->core_telemetry_mgr;
+
+ /* Wake any threads waiting on a core telemetry disable ACK */
+ wake_up(&mgr->waitq);
+
+ /* Signal the appropriate eventfd for any active core telemetry types */
+ mutex_lock(&mgr->lock);
+
+ if (is_telemetry_enabled(gxp, core, GXP_TELEMETRY_TYPE_LOGGING) &&
+ mgr->logging_efd)
+ eventfd_signal(mgr->logging_efd, 1);
+
+ if (is_telemetry_enabled(gxp, core, GXP_TELEMETRY_TYPE_TRACING) &&
+ mgr->tracing_efd)
+ eventfd_signal(mgr->tracing_efd, 1);
+
+ mutex_unlock(&mgr->lock);
+}
+
+static void telemetry_status_notification_work(struct work_struct *work)
+{
+ struct gxp_core_telemetry_work *telem_work =
+ container_of(work, struct gxp_core_telemetry_work, work);
+ struct gxp_dev *gxp = telem_work->gxp;
+ uint core = telem_work->core;
+
+ gxp_core_telemetry_status_notify(gxp, core);
+}
+
+static struct buffer_data *allocate_telemetry_buffers(struct gxp_dev *gxp,
+ size_t size);
+static void free_telemetry_buffers(struct gxp_dev *gxp, struct buffer_data *data);
+
+/**
+ * enable_telemetry_buffers() - enable the telemetry buffers from host.
+ *
+ * @gxp: The GXP device the buffers were allocated for.
+ * @data: The data describing a set of core telemetry buffers to be enabled.
+ * @type: Either `GXP_TELEMETRY_TYPE_LOGGING` or `GXP_TELEMETRY_TYPE_TRACING`.
+ *
+ * Return:
+ * * 0 - Success
+ * * otherwise - Error returned by `gxp_fw_data_set_core_telemetry_descriptors()`
+ */
+static int enable_telemetry_buffers(struct gxp_dev *gxp,
+ struct buffer_data *data, u8 type)
+{
+ int i, ret;
+
+ /* Initialize the per core telemetry buffers header with magic code. */
+ for (i = 0; i < GXP_NUM_CORES; i++) {
+ /*
+ * First 64 bytes of per core telemetry buffers are reserved
+ * for buffer metadata header. We don't need to explicitly
+ * reset the header fields as during buffer allocation the
+ * entire buffer is zeroed out. First 4 bytes of buffer
+ * metadata header are reserved for valid_magic field.
+ */
+ *((uint *)data->buffers[i].vaddr) =
+ GXP_TELEMETRY_BUFFER_VALID_MAGIC_CODE;
+ }
+
+ data->host_status |= GXP_CORE_TELEMETRY_HOST_STATUS_ENABLED;
+ ret = gxp_fw_data_set_core_telemetry_descriptors(
+ gxp, type, data->host_status, data->buffers, data->size);
+
+ if (ret) {
+ dev_err(gxp->dev,
+ "setting telemetry buffers in scratchpad region failed (ret=%d).",
+ ret);
+ return ret;
+ }
+
+ data->is_enabled = true;
+ return 0;
+}
+
+int gxp_core_telemetry_init(struct gxp_dev *gxp)
+{
+ struct gxp_core_telemetry_manager *mgr;
+ struct buffer_data *log_buff_data, *trace_buff_data;
+ int i, ret;
+
+ mgr = devm_kzalloc(gxp->dev, sizeof(*mgr), GFP_KERNEL);
+ if (!mgr)
+ return -ENOMEM;
+
+ mutex_init(&mgr->lock);
+ for (i = 0; i < GXP_NUM_CORES; i++) {
+ INIT_WORK(&mgr->notification_works[i].work,
+ telemetry_status_notification_work);
+ mgr->notification_works[i].gxp = gxp;
+ mgr->notification_works[i].core = i;
+
+ }
+ init_waitqueue_head(&mgr->waitq);
+
+ gxp->core_telemetry_mgr = mgr;
+ gxp_core_telemetry_buffer_size = ALIGN(gxp_core_telemetry_buffer_size,
+ GXP_CORE_TELEMETRY_BUFFER_UNIT_SIZE);
+ if ((gxp_core_telemetry_buffer_size < CORE_TELEMETRY_DEFAULT_BUFFER_SIZE) ||
+ (gxp_core_telemetry_buffer_size > CORE_TELEMETRY_MAX_BUFFER_SIZE)) {
+ dev_warn(gxp->dev,
+ "Invalid core telemetry buffer size, enforcing to default %u bytes\n",
+ CORE_TELEMETRY_DEFAULT_BUFFER_SIZE);
+ gxp_core_telemetry_buffer_size = CORE_TELEMETRY_DEFAULT_BUFFER_SIZE;
+ }
+
+ /* TODO(b/260959553): Remove mutex_lock/unlock during legacy telemetry removal */
+ mutex_lock(&mgr->lock);
+ log_buff_data = allocate_telemetry_buffers(gxp, gxp_core_telemetry_buffer_size);
+ if (IS_ERR_OR_NULL(log_buff_data)) {
+ dev_warn(gxp->dev,
+ "Failed to allocate per core log buffer of %u bytes\n",
+ gxp_core_telemetry_buffer_size);
+ ret = -ENOMEM;
+ goto err_free_buffers;
+ }
+
+ trace_buff_data = allocate_telemetry_buffers(gxp, gxp_core_telemetry_buffer_size);
+ if (IS_ERR_OR_NULL(trace_buff_data)) {
+ dev_warn(gxp->dev,
+ "Failed to allocate per core trace buffer of %u bytes\n",
+ gxp_core_telemetry_buffer_size);
+ free_telemetry_buffers(gxp, log_buff_data);
+ ret = -ENOMEM;
+ goto err_free_buffers;
+ }
+
+ ret = enable_telemetry_buffers(gxp, log_buff_data,
+ GXP_TELEMETRY_TYPE_LOGGING);
+ if (ret) {
+ dev_warn(gxp->dev, "enable telemetry buffer failed (ret=%d)",
+ ret);
+ goto err_free;
+ }
+ ret = enable_telemetry_buffers(gxp, trace_buff_data,
+ GXP_TELEMETRY_TYPE_TRACING);
+ if (ret) {
+ dev_warn(gxp->dev, "enable telemetry buffer failed (ret=%d)",
+ ret);
+ goto err_free;
+ }
+
+ gxp->core_telemetry_mgr->logging_buff_data = log_buff_data;
+ gxp->core_telemetry_mgr->tracing_buff_data = trace_buff_data;
+ mutex_unlock(&mgr->lock);
+ return 0;
+
+err_free:
+ free_telemetry_buffers(gxp, log_buff_data);
+ free_telemetry_buffers(gxp, trace_buff_data);
+err_free_buffers:
+ mutex_unlock(&mgr->lock);
+ mutex_destroy(&mgr->lock);
+ devm_kfree(gxp->dev, mgr);
+ gxp->core_telemetry_mgr = NULL;
+ return ret;
+}
+
+/* Wrapper struct to be used by the core telemetry vma_ops. */
+struct telemetry_vma_data {
+ struct gxp_dev *gxp;
+ struct buffer_data *buff_data;
+ u8 type;
+ refcount_t ref_count;
+};
+
+static void telemetry_vma_open(struct vm_area_struct *vma)
+{
+ struct gxp_dev *gxp;
+ struct telemetry_vma_data *vma_data =
+ (struct telemetry_vma_data *)vma->vm_private_data;
+ /*
+ * vma_ops are required only for legacy telemetry flow
+ * to keep track of buffer allocation during mmap and
+ * buffer free during munmap.
+ */
+ if (IS_ERR_OR_NULL(vma_data))
+ return;
+
+ gxp = vma_data->gxp;
+ mutex_lock(&gxp->core_telemetry_mgr->lock);
+
+ refcount_inc(&vma_data->ref_count);
+
+ mutex_unlock(&gxp->core_telemetry_mgr->lock);
+}
+
+static void telemetry_vma_close(struct vm_area_struct *vma)
+{
+ struct gxp_dev *gxp;
+ struct buffer_data *buff_data;
+ u8 type;
+ struct telemetry_vma_data *vma_data =
+ (struct telemetry_vma_data *)vma->vm_private_data;
+ /*
+ * vma_ops are required only for legacy telemetry flow
+ * to keep track of buffer allocation during mmap and
+ * buffer free during munmap.
+ */
+ if (IS_ERR_OR_NULL(vma_data))
+ return;
+
+ gxp = vma_data->gxp;
+ buff_data = vma_data->buff_data;
+ type = vma_data->type;
+
+ mutex_lock(&gxp->core_telemetry_mgr->lock);
+
+ if (!refcount_dec_and_test(&vma_data->ref_count))
+ goto out;
+
+ /*
+ * Free the core telemetry buffers if they are no longer in use.
+ *
+ * If a client enabled core telemetry, then closed their VMA without
+ * disabling it, firmware will still be expecting those buffers to be
+ * mapped. If this is the case, core telemetry will be disabled, and the
+ * buffers freed, when the client is closed.
+ *
+ * We cannot disable core telemetry here, since attempting to lock the
+ * `vd_semaphore` while holding the mmap lock can lead to deadlocks.
+ */
+ if (refcount_dec_and_test(&buff_data->ref_count)) {
+ switch (type) {
+ case GXP_TELEMETRY_TYPE_LOGGING:
+ gxp->core_telemetry_mgr->logging_buff_data_legacy = NULL;
+ break;
+ case GXP_TELEMETRY_TYPE_TRACING:
+ gxp->core_telemetry_mgr->tracing_buff_data_legacy = NULL;
+ break;
+ default:
+ dev_warn(gxp->dev, "%s called with invalid type %u\n",
+ __func__, type);
+ }
+ free_telemetry_buffers(gxp, buff_data);
+ }
+
+ kfree(vma_data);
+
+out:
+ mutex_unlock(&gxp->core_telemetry_mgr->lock);
+}
+
+/* TODO(b/260959553): Remove vma ops during legacy telemetry removal */
+static const struct vm_operations_struct telemetry_vma_ops = {
+ .open = telemetry_vma_open,
+ .close = telemetry_vma_close,
+};
+
+/**
+ * check_telemetry_type_availability() - Checks if @type is valid and whether
+ * buffers of that type already exists.
+ * @gxp: The GXP device to check availability for
+ * @type: Either `GXP_TELEMETRY_TYPE_LOGGING` or `GXP_TELEMETRY_TYPE_TRACING`
+ *
+ * Caller must hold the core telemetry_manager's lock.
+ *
+ * Return:
+ * * 0 - @type is valid and can have new buffers created
+ * * -EBUSY - Buffers already exist for @type
+ * * -EINVAL - @type is not a valid core telemetry type
+ */
+static int check_telemetry_type_availability(struct gxp_dev *gxp, u8 type)
+{
+ lockdep_assert_held(&gxp->core_telemetry_mgr->lock);
+
+ switch (type) {
+ case GXP_TELEMETRY_TYPE_LOGGING:
+ if (gxp->core_telemetry_mgr->logging_buff_data_legacy)
+ return -EBUSY;
+ break;
+ case GXP_TELEMETRY_TYPE_TRACING:
+ if (gxp->core_telemetry_mgr->tracing_buff_data_legacy)
+ return -EBUSY;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * allocate_telemetry_buffers() - Allocate and populate a `struct buffer_data`,
+ * including allocating and mapping one coherent
+ * buffer of @size bytes per core.
+ * @gxp: The GXP device to allocate the buffers for
+ * @size: The size of buffer to allocate for each core
+ *
+ * Caller must hold the core telemetry_manager's lock.
+ *
+ * Return: A pointer to the `struct buffer_data` if successful, error otherwise
+ */
+static struct buffer_data *allocate_telemetry_buffers(struct gxp_dev *gxp,
+ size_t size)
+{
+ struct buffer_data *data;
+ int i;
+ int ret = 0;
+
+ size = size < PAGE_SIZE ? PAGE_SIZE : size;
+
+ /* TODO(b/260959553): Remove lockdep_assert_held during legacy telemetry removal */
+ lockdep_assert_held(&gxp->core_telemetry_mgr->lock);
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return NULL;
+
+ /* Allocate cache-coherent buffers for logging/tracing to */
+ for (i = 0; i < GXP_NUM_CORES; i++) {
+ /* Allocate a coherent buffer in the default domain */
+ ret = gxp_dma_alloc_coherent_buf(gxp, NULL, size, GFP_KERNEL, 0,
+ &data->buffers[i]);
+ if (ret) {
+ dev_err(gxp->dev,
+ "Failed to allocate coherent buffer\n");
+ goto err_alloc;
+ }
+ }
+ data->size = size;
+ refcount_set(&data->ref_count, 1);
+ data->is_enabled = false;
+
+ return data;
+
+err_alloc:
+ while (i--)
+ gxp_dma_free_coherent_buf(gxp, NULL, &data->buffers[i]);
+ kfree(data);
+
+ return ERR_PTR(ret);
+}
+
+/**
+ * free_telemetry_buffers() - Unmap and free a `struct buffer_data`
+ * @gxp: The GXP device the buffers were allocated for
+ * @data: The descriptor of the buffers to unmap and free
+ *
+ * Caller must hold the core telemetry_manager's lock.
+ */
+static void free_telemetry_buffers(struct gxp_dev *gxp, struct buffer_data *data)
+{
+ int i;
+
+ /* TODO(b/260959553): Remove lockdep_assert_held during legacy telemetry removal */
+ lockdep_assert_held(&gxp->core_telemetry_mgr->lock);
+
+ for (i = 0; i < GXP_NUM_CORES; i++)
+ gxp_dma_free_coherent_buf(gxp, NULL, &data->buffers[i]);
+
+ kfree(data);
+}
+
+/**
+ * remap_telemetry_buffers() - Remaps a set of core telemetry buffers into a
+ * user-space vm_area.
+ * @gxp: The GXP device the buffers were allocated for
+ * @vma: A vm area to remap the buffers into
+ * @buff_data: The data describing a set of core telemetry buffers to remap
+ *
+ * Caller must hold the core telemetry_manager's lock.
+ *
+ * Return:
+ * * 0 - Success
+ * * otherwise - Error returned by `remap_pfn_range()`
+ */
+static int remap_telemetry_buffers(struct gxp_dev *gxp,
+ struct vm_area_struct *vma,
+ struct buffer_data *buff_data)
+{
+ unsigned long orig_pgoff = vma->vm_pgoff;
+ int i;
+ unsigned long offset;
+ phys_addr_t phys;
+ int ret = 0;
+
+ lockdep_assert_held(&gxp->core_telemetry_mgr->lock);
+
+ /* mmap the buffers */
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+ vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP;
+ vma->vm_pgoff = 0;
+
+ for (i = 0; i < GXP_NUM_CORES; i++) {
+ /*
+ * Remap each core's buffer a page at a time, in case it is not
+ * physically contiguous.
+ */
+ for (offset = 0; offset < buff_data->size; offset += PAGE_SIZE) {
+ /*
+ * `virt_to_phys()` does not work on memory allocated
+ * by `dma_alloc_coherent()`, so we have to use
+ * `iommu_iova_to_phys()` instead. Since all buffers
+ * are mapped to the default domain as well as any per-
+ * core domains, we can use it here to get the physical
+ * address of any valid IOVA, regardless of its core.
+ */
+ phys = iommu_iova_to_phys(
+ iommu_get_domain_for_dev(gxp->dev),
+ buff_data->buffers[i].dma_addr + offset);
+ ret = remap_pfn_range(
+ vma,
+ vma->vm_start + buff_data->size * i + offset,
+ phys >> PAGE_SHIFT, PAGE_SIZE,
+ vma->vm_page_prot);
+ if (ret)
+ goto out;
+ }
+ }
+
+out:
+ vma->vm_pgoff = orig_pgoff;
+ /* TODO(b/260959553): Remove vma ops during legacy telemetry removal */
+ vma->vm_ops = &telemetry_vma_ops;
+
+ return ret;
+}
+
+int gxp_core_telemetry_mmap_buffers(struct gxp_dev *gxp, u8 type,
+ struct vm_area_struct *vma)
+{
+ int ret = 0;
+ struct buffer_data *buff_data;
+ size_t total_size = vma->vm_end - vma->vm_start;
+ size_t size = total_size / GXP_NUM_CORES;
+
+ if (!gxp->core_telemetry_mgr)
+ return -ENODEV;
+
+ if (type == GXP_TELEMETRY_TYPE_LOGGING)
+ buff_data = gxp->core_telemetry_mgr->logging_buff_data;
+ else if (type == GXP_TELEMETRY_TYPE_TRACING)
+ buff_data = gxp->core_telemetry_mgr->tracing_buff_data;
+ else
+ return -EINVAL;
+ /*
+ * Total size must divide evenly into a GXP_CORE_TELEMETRY_BUFFER_UNIT_SIZE
+ * aligned buffer per core.
+ */
+ if (!total_size ||
+ total_size % (GXP_CORE_TELEMETRY_BUFFER_UNIT_SIZE * GXP_NUM_CORES)) {
+ dev_warn(
+ gxp->dev,
+ "Invalid vma size(%lu bytes) requested for telemetry\n",
+ total_size);
+ return -EINVAL;
+ }
+ /*
+ * Per core log buffer size should be equal to pre allocated
+ * aligned buffer per core.
+ */
+ if (size != buff_data->size) {
+ dev_warn(
+ gxp->dev,
+ "Invalid per core requested telemetry buffer size(%lu bytes)\n",
+ size);
+ return -EINVAL;
+ }
+ mutex_lock(&gxp->core_telemetry_mgr->lock);
+ ret = remap_telemetry_buffers(gxp, vma, buff_data);
+ if (ret)
+ goto err;
+ vma->vm_private_data = NULL;
+ mutex_unlock(&gxp->core_telemetry_mgr->lock);
+ return 0;
+err:
+ mutex_unlock(&gxp->core_telemetry_mgr->lock);
+ return ret;
+}
+
+int gxp_core_telemetry_mmap_buffers_legacy(struct gxp_dev *gxp, u8 type,
+ struct vm_area_struct *vma)
+{
+ int ret = 0;
+ struct telemetry_vma_data *vma_data;
+ size_t total_size = vma->vm_end - vma->vm_start;
+ size_t size = total_size / GXP_NUM_CORES;
+ struct buffer_data *buff_data;
+ int i;
+
+ if (!gxp->core_telemetry_mgr)
+ return -ENODEV;
+
+ /* Total size must divide evenly into 1 page-aligned buffer per core */
+ if (!total_size || total_size % (PAGE_SIZE * GXP_NUM_CORES))
+ return -EINVAL;
+
+ mutex_lock(&gxp->core_telemetry_mgr->lock);
+
+ ret = check_telemetry_type_availability(gxp, type);
+ if (ret)
+ goto err;
+
+ vma_data = kmalloc(sizeof(*vma_data), GFP_KERNEL);
+ if (!vma_data) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ buff_data = allocate_telemetry_buffers(gxp, size);
+ if (IS_ERR(buff_data)) {
+ ret = PTR_ERR(buff_data);
+ goto err_free_vma_data;
+ }
+
+ ret = remap_telemetry_buffers(gxp, vma, buff_data);
+ if (ret)
+ goto err_free_buffers;
+
+ vma_data->gxp = gxp;
+ vma_data->buff_data = buff_data;
+ vma_data->type = type;
+ refcount_set(&vma_data->ref_count, 1);
+ vma->vm_private_data = vma_data;
+
+ /* Save book-keeping on the buffers in the core telemetry manager */
+ if (type == GXP_TELEMETRY_TYPE_LOGGING)
+ gxp->core_telemetry_mgr->logging_buff_data_legacy = buff_data;
+ else /* type == GXP_TELEMETRY_TYPE_TRACING */
+ gxp->core_telemetry_mgr->tracing_buff_data_legacy = buff_data;
+
+ mutex_unlock(&gxp->core_telemetry_mgr->lock);
+
+ return 0;
+
+err_free_buffers:
+ for (i = 0; i < GXP_NUM_CORES; i++)
+ gxp_dma_free_coherent_buf(gxp, NULL, &buff_data->buffers[i]);
+ kfree(buff_data);
+
+err_free_vma_data:
+ kfree(vma_data);
+
+err:
+ mutex_unlock(&gxp->core_telemetry_mgr->lock);
+ return ret;
+}
+
+int gxp_core_telemetry_enable(struct gxp_dev *gxp, u8 type)
+{
+ struct buffer_data *data;
+ int ret = 0;
+ uint core;
+ struct gxp_virtual_device *vd;
+
+ /*
+ * `vd_semaphore` cannot be acquired while holding the core telemetry
+ * lock, so acquire it here before locking the core telemetry lock.
+ */
+ down_read(&gxp->vd_semaphore);
+ mutex_lock(&gxp->core_telemetry_mgr->lock);
+
+ switch (type) {
+ case GXP_TELEMETRY_TYPE_LOGGING:
+ data = gxp->core_telemetry_mgr->logging_buff_data_legacy;
+ break;
+ case GXP_TELEMETRY_TYPE_TRACING:
+ data = gxp->core_telemetry_mgr->tracing_buff_data_legacy;
+ break;
+ default:
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (!data) {
+ ret = -ENXIO;
+ goto out;
+ }
+
+ /* Map the buffers for any cores already running */
+ for (core = 0; core < GXP_NUM_CORES; core++) {
+ vd = gxp->core_to_vd[core];
+ if (vd != NULL) {
+ ret = gxp_dma_map_allocated_coherent_buffer(
+ gxp, &data->buffers[core], vd->domain, 0);
+ if (ret)
+ goto err;
+ }
+ }
+
+ /* Populate the buffer fields in firmware-data */
+ data->host_status |= GXP_CORE_TELEMETRY_HOST_STATUS_ENABLED;
+ gxp_fw_data_set_core_telemetry_descriptors(gxp, type, data->host_status,
+ data->buffers, data->size);
+
+ /* Notify any running cores that firmware-data was updated */
+ for (core = 0; core < GXP_NUM_CORES; core++) {
+ if (gxp_is_fw_running(gxp, core))
+ gxp_notification_send(gxp, core,
+ CORE_NOTIF_TELEMETRY_STATUS);
+ }
+
+ refcount_inc(&data->ref_count);
+ data->is_enabled = true;
+
+ goto out;
+err:
+ while (core--) {
+ vd = gxp->core_to_vd[core];
+ if (vd)
+ gxp_dma_unmap_allocated_coherent_buffer(
+ gxp, vd->domain, &data->buffers[core]);
+ }
+
+out:
+ mutex_unlock(&gxp->core_telemetry_mgr->lock);
+ up_read(&gxp->vd_semaphore);
+
+ return ret;
+}
+
+/**
+ * notify_core_and_wait_for_disable() - Notify a core that telemetry state has
+ * been changed by the host and wait for
+ * the core to stop using telemetry.
+ * @gxp: The GXP device core telemetry is changing for
+ * @core: The core in @gxp to notify of the telemetry state change
+ * @type: Either `GXP_TELEMETRY_TYPE_LOGGING` or `GXP_TELEMETRY_TYPE_TRACING`
+ *
+ * Caller must hold `core_telemetry_mgr->lock`.
+ * Caller must hold `gxp->vd_semaphore` for reading only.
+ * It is not allowed to hold `gxp->vd_semaphore` for writing, since this
+ * function needs to release `gxp->vd_semaphore` at different points to sleep.
+ *
+ * Return:
+ * * 0 - Firmware on @core is no longer using telemetry of @type
+ * * -ENXIO - Firmware on @core is unresponsive
+ */
+static int notify_core_and_wait_for_disable(struct gxp_dev *gxp, uint core,
+ u8 type)
+{
+ uint retries_left = 50;
+
+ lockdep_assert_held(&gxp->core_telemetry_mgr->lock);
+ lockdep_assert_held_read(&gxp->vd_semaphore);
+
+ gxp_notification_send(gxp, core, CORE_NOTIF_TELEMETRY_STATUS);
+
+ /* Wait for ACK from firmware */
+ while (is_telemetry_enabled(gxp, core, type) &&
+ gxp_is_fw_running(gxp, core) && retries_left) {
+ /* Release vd_semaphore while waiting */
+ up_read(&gxp->vd_semaphore);
+
+ /*
+ * The VD lock must be held to check if firmware is running, so
+ * the wait condition is only whether the firmware data has been
+ * updated to show the core disabling telemetry.
+ *
+ * If a core does stop running firmware while this function is
+ * asleep, it will be seen at the next timeout.
+ */
+ wait_event_timeout(gxp->core_telemetry_mgr->waitq,
+ !is_telemetry_enabled(gxp, core, type),
+ msecs_to_jiffies(10));
+ retries_left--;
+
+ /*
+ * No function may attempt to acquire the `vd_semaphore` while
+ * holding the core telemetry lock, so it must be released, then
+ * re-acquired once the `vd_semaphore` is held.
+ */
+ mutex_unlock(&gxp->core_telemetry_mgr->lock);
+ down_read(&gxp->vd_semaphore);
+ mutex_lock(&gxp->core_telemetry_mgr->lock);
+ }
+
+ /*
+ * If firmware has stopped running altogether, that is sufficient to be
+ * considered disabled. If firmware is started on this core again, it
+ * is responsible for clearing its status.
+ */
+ if (unlikely(is_telemetry_enabled(gxp, core, type) &&
+ gxp_is_fw_running(gxp, core)))
+ return -ENXIO;
+
+ return 0;
+}
+
+/**
+ * telemetry_disable_locked() - Helper function to break out the actual
+ * process of disabling core telemetry so that it
+ * can be invoked by internal functions that are
+ * already holding the core telemetry lock.
+ * @gxp: The GXP device to disable either logging or tracing for
+ * @type: Either `GXP_TELEMETRY_TYPE_LOGGING` or `GXP_TELEMETRY_TYPE_TRACING`
+ *
+ * Caller must hold `core_telemetry_mgr->lock`.
+ * Caller must hold `gxp->vd_semaphore` for reading only.
+ * It is not allowed to hold `gxp->vd_semaphore` for writing, since this
+ * function needs to release `gxp->vd_semaphore` at different points to sleep.
+ *
+ * Return:
+ * * 0 - Success
+ * * -EINVAL - The @type provided is not valid
+ * * -ENXIO - Buffers for @type have not been created/mapped yet
+ */
+static int telemetry_disable_locked(struct gxp_dev *gxp, u8 type)
+{
+ struct buffer_data *data;
+ int ret = 0;
+ uint core;
+ struct gxp_virtual_device *vd;
+
+ lockdep_assert_held(&gxp->core_telemetry_mgr->lock);
+ lockdep_assert_held_read(&gxp->vd_semaphore);
+
+ /* Cleanup core telemetry manager's book-keeping */
+ switch (type) {
+ case GXP_TELEMETRY_TYPE_LOGGING:
+ data = gxp->core_telemetry_mgr->logging_buff_data_legacy;
+ break;
+ case GXP_TELEMETRY_TYPE_TRACING:
+ data = gxp->core_telemetry_mgr->tracing_buff_data_legacy;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (!data)
+ return -ENXIO;
+
+ if (!(data->host_status & GXP_CORE_TELEMETRY_HOST_STATUS_ENABLED))
+ return 0;
+
+ data->is_enabled = false;
+
+ /* Clear the log buffer fields in firmware-data */
+ data->host_status &= ~GXP_CORE_TELEMETRY_HOST_STATUS_ENABLED;
+ gxp_fw_data_set_core_telemetry_descriptors(gxp, type, data->host_status, NULL, 0);
+
+ /* Notify any running cores that firmware-data was updated */
+ for (core = 0; core < GXP_NUM_CORES; core++) {
+ if (gxp_is_fw_running(gxp, core)) {
+ ret = notify_core_and_wait_for_disable(gxp, core, type);
+ if (ret)
+ dev_warn(
+ gxp->dev,
+ "%s: core%u failed to disable telemetry (type=%u, ret=%d)\n",
+ __func__, core, type, ret);
+ }
+ vd = gxp->core_to_vd[core];
+ if (vd)
+ gxp_dma_unmap_allocated_coherent_buffer(
+ gxp, vd->domain, &data->buffers[core]);
+ }
+
+ if (refcount_dec_and_test(&data->ref_count)) {
+ switch (type) {
+ case GXP_TELEMETRY_TYPE_LOGGING:
+ gxp->core_telemetry_mgr->logging_buff_data_legacy = NULL;
+ break;
+ case GXP_TELEMETRY_TYPE_TRACING:
+ gxp->core_telemetry_mgr->tracing_buff_data_legacy = NULL;
+ break;
+ default:
+ /* NO-OP, we returned above if `type` was invalid */
+ break;
+ }
+ free_telemetry_buffers(gxp, data);
+ }
+
+ return 0;
+}
+
+int gxp_core_telemetry_disable(struct gxp_dev *gxp, u8 type)
+{
+ int ret;
+
+ /*
+ * `vd_semaphore` cannot be acquired while holding the core telemetry
+ * lock, so acquire it here before locking the core telemetry lock.
+ */
+ down_read(&gxp->vd_semaphore);
+ mutex_lock(&gxp->core_telemetry_mgr->lock);
+
+ ret = telemetry_disable_locked(gxp, type);
+
+ mutex_unlock(&gxp->core_telemetry_mgr->lock);
+ up_read(&gxp->vd_semaphore);
+
+ return ret;
+}
+
+int gxp_core_telemetry_register_eventfd(struct gxp_dev *gxp, u8 type, int fd)
+{
+ struct eventfd_ctx *new_ctx;
+ struct eventfd_ctx **ctx_to_set = NULL;
+ int ret = 0;
+
+ new_ctx = eventfd_ctx_fdget(fd);
+ if (IS_ERR(new_ctx))
+ return PTR_ERR(new_ctx);
+
+ mutex_lock(&gxp->core_telemetry_mgr->lock);
+
+ switch (type) {
+ case GXP_TELEMETRY_TYPE_LOGGING:
+ ctx_to_set = &gxp->core_telemetry_mgr->logging_efd;
+ break;
+ case GXP_TELEMETRY_TYPE_TRACING:
+ ctx_to_set = &gxp->core_telemetry_mgr->tracing_efd;
+ break;
+ default:
+ ret = -EINVAL;
+ eventfd_ctx_put(new_ctx);
+ goto out;
+ }
+
+ if (*ctx_to_set) {
+ dev_warn(
+ gxp->dev,
+ "Replacing existing core telemetry eventfd (type=%u)\n",
+ type);
+ eventfd_ctx_put(*ctx_to_set);
+ }
+
+ *ctx_to_set = new_ctx;
+
+out:
+ mutex_unlock(&gxp->core_telemetry_mgr->lock);
+ return ret;
+}
+
+int gxp_core_telemetry_unregister_eventfd(struct gxp_dev *gxp, u8 type)
+{
+ int ret = 0;
+
+ mutex_lock(&gxp->core_telemetry_mgr->lock);
+
+ switch (type) {
+ case GXP_TELEMETRY_TYPE_LOGGING:
+ if (gxp->core_telemetry_mgr->logging_efd)
+ eventfd_ctx_put(gxp->core_telemetry_mgr->logging_efd);
+ gxp->core_telemetry_mgr->logging_efd = NULL;
+ break;
+ case GXP_TELEMETRY_TYPE_TRACING:
+ if (gxp->core_telemetry_mgr->tracing_efd)
+ eventfd_ctx_put(gxp->core_telemetry_mgr->tracing_efd);
+ gxp->core_telemetry_mgr->tracing_efd = NULL;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ mutex_unlock(&gxp->core_telemetry_mgr->lock);
+
+ return ret;
+}
+
+struct work_struct *
+gxp_core_telemetry_get_notification_handler(struct gxp_dev *gxp, uint core)
+{
+ struct gxp_core_telemetry_manager *mgr = gxp->core_telemetry_mgr;
+
+ if (!mgr || core >= GXP_NUM_CORES)
+ return NULL;
+
+ return &mgr->notification_works[core].work;
+}
+
+void gxp_core_telemetry_exit(struct gxp_dev *gxp)
+{
+ struct buffer_data *log_buff_data, *trace_buff_data;
+ struct gxp_core_telemetry_manager *mgr = gxp->core_telemetry_mgr;
+
+ if (!mgr) {
+ dev_warn(gxp->dev, "Core telemetry manager was not allocated\n");
+ return;
+ }
+
+ /* TODO(b/260959553): Remove mutex_lock/unlock during legacy telemetry removal */
+ mutex_lock(&gxp->core_telemetry_mgr->lock);
+ log_buff_data = mgr->logging_buff_data;
+ trace_buff_data = mgr->tracing_buff_data;
+
+ if (!IS_ERR_OR_NULL(log_buff_data))
+ free_telemetry_buffers(gxp, log_buff_data);
+
+ if (!IS_ERR_OR_NULL(trace_buff_data))
+ free_telemetry_buffers(gxp, trace_buff_data);
+
+ mutex_unlock(&gxp->core_telemetry_mgr->lock);
+
+ if (!IS_ERR_OR_NULL(gxp->core_telemetry_mgr->logging_efd)) {
+ dev_warn(gxp->dev, "logging_efd was not released\n");
+ eventfd_ctx_put(gxp->core_telemetry_mgr->logging_efd);
+ gxp->core_telemetry_mgr->logging_efd = NULL;
+ }
+
+ if (!IS_ERR_OR_NULL(gxp->core_telemetry_mgr->tracing_efd)) {
+ dev_warn(gxp->dev, "tracing_efd was not released\n");
+ eventfd_ctx_put(gxp->core_telemetry_mgr->tracing_efd);
+ gxp->core_telemetry_mgr->tracing_efd = NULL;
+ }
+
+ mutex_destroy(&mgr->lock);
+ devm_kfree(gxp->dev, mgr);
+ gxp->core_telemetry_mgr = NULL;
+}
diff --git a/gxp-core-telemetry.h b/gxp-core-telemetry.h
new file mode 100644
index 0000000..0ceeb60
--- /dev/null
+++ b/gxp-core-telemetry.h
@@ -0,0 +1,184 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * GXP core telemetry support
+ *
+ * Copyright (C) 2021-2022 Google LLC
+ */
+
+#ifndef __GXP_CORE_TELEMETRY_H__
+#define __GXP_CORE_TELEMETRY_H__
+
+#include <linux/eventfd.h>
+#include <linux/refcount.h>
+#include <linux/types.h>
+
+#include "gxp-dma.h"
+#include "gxp-internal.h"
+#include "gxp.h"
+
+/* Default telemetry buffer size per core */
+#define CORE_TELEMETRY_DEFAULT_BUFFER_SIZE GXP_CORE_TELEMETRY_BUFFER_UNIT_SIZE
+/**
+ * Maximum core telemetry buffer size that can be represented by GXP_GET_SPECS
+ * ioctl. 8 bits are reserved to represent telemetry buffer size in GXP_GET_SPECS
+ * ioctl and the size is represented in unit of GXP_CORE_TELEMETRY_BUFFER_UNIT_SIZE.
+ */
+#define CORE_TELEMETRY_MAX_BUFFER_SIZE (U8_MAX * GXP_CORE_TELEMETRY_BUFFER_UNIT_SIZE)
+/* Secure telemetry buffer size per core */
+#define SECURE_CORE_TELEMETRY_BUFFER_SIZE GXP_CORE_TELEMETRY_BUFFER_UNIT_SIZE
+
+struct gxp_core_telemetry_work {
+ struct work_struct work;
+ struct gxp_dev *gxp;
+ uint core;
+};
+
+struct gxp_core_telemetry_manager {
+ struct buffer_data {
+ u32 host_status;
+ struct gxp_coherent_buf buffers[GXP_NUM_CORES];
+ u32 size;
+ refcount_t ref_count;
+ bool is_enabled;
+ } *logging_buff_data_legacy, *tracing_buff_data_legacy,
+ *logging_buff_data, *tracing_buff_data;
+ /* Protects logging_buff_data and tracing_buff_data */
+ struct mutex lock;
+ struct gxp_core_telemetry_work notification_works[GXP_NUM_CORES];
+ wait_queue_head_t waitq;
+ struct eventfd_ctx *logging_efd;
+ struct eventfd_ctx *tracing_efd;
+};
+
+/**
+ * gxp_core_telemetry_init() - Initialize telemetry support
+ * @gxp: The GXP device to initialize core telemetry support for
+ *
+ * Return:
+ * * 0 - Success
+ * * -ENOMEM - Insufficient memory is available to initialize support
+ */
+int gxp_core_telemetry_init(struct gxp_dev *gxp);
+
+/**
+ * gxp_core_telemetry_mmap_buffers() - Maps the preallocated telemetry
+ * buffers to the user-space vma.
+ * @gxp: The GXP device to create the buffers for.
+ * @type: Either `GXP_TELEMETRY_TYPE_LOGGING` or `GXP_TELEMETRY_TYPE_TRACING`.
+ * @vma: The vma from user-space which all cores' buffers will be mapped into.
+ *
+ * Return:
+ * * 0 - Success.
+ * * -ENODEV - Core telemetry support has not been initialized. Must explicitly
+ * check this, since this function is called based on user-input.
+ * * -EINVAL - Either the vma size is not aligned or @type is not valid.
+ */
+int gxp_core_telemetry_mmap_buffers(struct gxp_dev *gxp, u8 type,
+ struct vm_area_struct *vma);
+
+/**
+ * gxp_core_telemetry_mmap_buffers_legacy() - Allocate a telemetry buffer for
+ * each core and map them to their
+ * core and the user-space vma
+ * @gxp: The GXP device to create the buffers for
+ * @type: EIther `GXP_TELEMETRY_TYPE_LOGGING` or `GXP_TELEMETRY_TYPE_TRACING`
+ * @vma: The vma from user-space which all cores' buffers will be mapped into
+ *
+ * Return:
+ * * 0 - Success
+ * * -ENODEV - Core telemetry support has not been initialized. Must explicitly
+ * check this, since this function is called based on user-input.
+ * * -EBUSY - The requested core telemetry @type is already in use
+ * * -EINVAL - Either the vma size is not aligned or @type is not valid
+ * * -ENOMEM - Insufficient memory is available to allocate and map the buffers
+ */
+int gxp_core_telemetry_mmap_buffers_legacy(struct gxp_dev *gxp, u8 type,
+ struct vm_area_struct *vma);
+
+/**
+ * gxp_core_telemetry_enable() - Enable logging or tracing for all DSP cores
+ * @gxp: The GXP device to enable either logging or tracing for
+ * @type: Either `GXP_TELEMETRY_TYPE_LOGGING` or `GXP_TELEMETRY_TYPE_TRACING`
+ *
+ * Return:
+ * * 0 - Success
+ * * -EINVAL - The @type provided is not valid
+ * * -ENXIO - Buffers for @type have not been created/mapped yet
+ */
+int gxp_core_telemetry_enable(struct gxp_dev *gxp, u8 type);
+
+/**
+ * gxp_core_telemetry_disable() - Disable logging or tracing for all DSP cores
+ * @gxp: The GXP device to disable either logging or tracing for
+ * @type: Either `GXP_TELEMETRY_TYPE_LOGGING` or `GXP_TELEMETRY_TYPE_TRACING`
+ *
+ * Return:
+ * * 0 - Success
+ * * -EINVAL - The @type provided is not valid
+ * * -ENXIO - Buffers for @type have not been created/mapped yet
+ */
+int gxp_core_telemetry_disable(struct gxp_dev *gxp, u8 type);
+
+/**
+ * gxp_core_telemetry_register_eventfd() - Register an eventfd to be signaled
+ * when core telemetry notifications
+ * arrive while the specified @type of
+ * core telemetry is enabled
+ * @gxp: The GXP device to register the eventfd for
+ * @type: Either `GXP_TELEMETRY_TYPE_LOGGING` or `GXP_TELEMETRY_TYPE_TRACING`
+ * @fd: A file descriptor for an eventfd from user-space
+ *
+ * If another eventfd has already been registered for the given @type, the old
+ * eventfd will be unregistered and replaced.
+ *
+ * Return:
+ * * 0 - Success
+ * * -EBADF - @fd is not a valid file descriptor (via `eventfd_ctx_fdget()`)
+ * * -EINVAL - Invalid @type or @fd is not an eventfd
+ */
+int gxp_core_telemetry_register_eventfd(struct gxp_dev *gxp, u8 type, int fd);
+
+/**
+ * gxp_core_telemetry_unregister_eventfd() - Unregister and release a reference
+ * to a previously registered eventfd
+ * @gxp: The GXP device to unregister the eventfd for
+ * @type: Either `GXP_TELEMETRY_TYPE_LOGGING` or `GXP_TELEMETRY_TYPE_TRACING`
+ *
+ * Return:
+ * * 0 - Success
+ * * -EINVAL - The @type provided is not valid
+ */
+int gxp_core_telemetry_unregister_eventfd(struct gxp_dev *gxp, u8 type);
+
+/**
+ * gxp_core_telemetry_get_notification_handler() - Get the notification handler
+ * work for the specified core
+ * @gxp: The GXP device to obtain the handler for
+ * @core: The physical core number to obtain the handler
+ *
+ * Return: A pointer to the work_struct for the @core's notification handler if
+ * successful. NULL if core telemetry has not been initialized or @core
+ * is invalid.
+ */
+struct work_struct *
+gxp_core_telemetry_get_notification_handler(struct gxp_dev *gxp, uint core);
+
+/**
+ * gxp_core_telemetry_status_notify() - Checks the telemetry status of the
+ * specified core and signals the eventfd.
+ * @gxp: The GXP device to obtain the handler for
+ * @core: The physical core number to obtain the handler
+ *
+ */
+void gxp_core_telemetry_status_notify(struct gxp_dev *gxp, uint core);
+
+/**
+ * gxp_core_telemetry_exit() - Reverts gxp_core_telemetry_init() to release the
+ * resources acquired by core telemetry manager.
+ * @gxp: The GXP device to obtain the handler for
+ *
+ */
+void gxp_core_telemetry_exit(struct gxp_dev *gxp);
+
+
+#endif /* __GXP_CORE_TELEMETRY_H__ */
diff --git a/gxp-debug-dump.c b/gxp-debug-dump.c
index 1165a28..4df7add 100644
--- a/gxp-debug-dump.c
+++ b/gxp-debug-dump.c
@@ -2,7 +2,7 @@
/*
* GXP debug dump handler
*
- * Copyright (C) 2020 Google LLC
+ * Copyright (C) 2020-2022 Google LLC
*/
#include <linux/bitops.h>
@@ -14,34 +14,43 @@
#include <linux/string.h>
#include <linux/workqueue.h>
-#if IS_ENABLED(CONFIG_SUBSYSTEM_COREDUMP)
-#include <linux/platform_data/sscoredump.h>
-#endif
+#include <gcip/gcip-pm.h>
+#include <gcip/gcip-alloc-helper.h>
+#include "gxp-client.h"
#include "gxp-debug-dump.h"
#include "gxp-dma.h"
#include "gxp-doorbell.h"
#include "gxp-firmware.h"
+#include "gxp-firmware-data.h"
+#include "gxp-firmware-loader.h"
#include "gxp-host-device-structs.h"
#include "gxp-internal.h"
#include "gxp-lpm.h"
#include "gxp-mapping.h"
#include "gxp-pm.h"
#include "gxp-vd.h"
-#include "gxp-wakelock.h"
+
+#if HAS_COREDUMP
+#include <linux/platform_data/sscoredump.h>
+#endif
#define SSCD_MSG_LENGTH 64
-#define SYNC_BARRIER_BLOCK 0x00100000
-#define SYNC_BARRIER_BASE(_x_) ((_x_) << 12)
+#define SYNC_BARRIER_BLOCK 0x00100000
+#define SYNC_BARRIER_BASE(_x_) ((_x_) << 12)
#define DEBUG_DUMP_MEMORY_SIZE 0x400000 /* size in bytes */
+/*
+ * CORE_FIRMWARE_RW_STRIDE & CORE_FIRMWARE_RW_ADDR must match with their
+ * values defind in core firmware image config.
+ */
+#define CORE_FIRMWARE_RW_STRIDE 0x200000 /* 2 MB */
+#define CORE_FIRMWARE_RW_ADDR(x) (0xFA400000 + CORE_FIRMWARE_RW_STRIDE * x)
+
/* Enum indicating the debug dump request reason. */
-enum gxp_debug_dump_init_type {
- DEBUG_DUMP_FW_INIT,
- DEBUG_DUMP_KERNEL_INIT
-};
+enum gxp_debug_dump_init_type { DEBUG_DUMP_FW_INIT, DEBUG_DUMP_KERNEL_INIT };
enum gxp_common_segments_idx {
GXP_COMMON_REGISTERS_IDX,
@@ -49,7 +58,11 @@ enum gxp_common_segments_idx {
};
/* Whether or not the debug dump subsystem should be enabled. */
+#if IS_ENABLED(CONFIG_GXP_TEST)
+static int gxp_debug_dump_enable = 1;
+#else
static int gxp_debug_dump_enable;
+#endif
module_param_named(debug_dump_enable, gxp_debug_dump_enable, int, 0660);
static void gxp_debug_dump_cache_invalidate(struct gxp_dev *gxp)
@@ -81,9 +94,9 @@ static u32 gxp_read_sync_barrier_shadow(struct gxp_dev *gxp, uint index)
return gxp_read_32(gxp, barrier_reg_offset);
}
-static void
-gxp_get_common_registers(struct gxp_dev *gxp, struct gxp_seg_header *seg_header,
- struct gxp_common_registers *common_regs)
+static void gxp_get_common_registers(struct gxp_dev *gxp,
+ struct gxp_seg_header *seg_header,
+ struct gxp_common_registers *common_regs)
{
int i;
u32 addr;
@@ -145,7 +158,13 @@ static void gxp_get_lpm_psm_registers(struct gxp_dev *gxp,
{
struct gxp_lpm_state_table_registers *state_table_regs;
int i, j;
- uint offset;
+ uint offset, lpm_psm_offset;
+
+#ifdef GXP_SEPARATE_LPM_OFFSET
+ lpm_psm_offset = 0;
+#else
+ lpm_psm_offset = GXP_LPM_PSM_0_BASE + (GXP_LPM_PSM_SIZE * psm);
+#endif
/* Get State Table registers */
for (i = 0; i < PSM_STATE_TABLE_COUNT; i++) {
@@ -153,57 +172,56 @@ static void gxp_get_lpm_psm_registers(struct gxp_dev *gxp,
/* Get Trans registers */
for (j = 0; j < PSM_TRANS_COUNT; j++) {
- offset = PSM_STATE_TABLE_BASE(i) + PSM_TRANS_BASE(j);
- state_table_regs->trans[j].next_state =
- lpm_read_32_psm(gxp, psm, offset +
- PSM_NEXT_STATE_OFFSET);
+ offset = PSM_STATE_TABLE_BASE(i) + PSM_TRANS_BASE(j) +
+ lpm_psm_offset;
+ state_table_regs->trans[j].next_state = lpm_read_32(
+ gxp, offset + PSM_NEXT_STATE_OFFSET);
state_table_regs->trans[j].seq_addr =
- lpm_read_32_psm(gxp, psm, offset +
- PSM_SEQ_ADDR_OFFSET);
+ lpm_read_32(gxp, offset + PSM_SEQ_ADDR_OFFSET);
state_table_regs->trans[j].timer_val =
- lpm_read_32_psm(gxp, psm, offset +
- PSM_TIMER_VAL_OFFSET);
+ lpm_read_32(gxp, offset + PSM_TIMER_VAL_OFFSET);
state_table_regs->trans[j].timer_en =
- lpm_read_32_psm(gxp, psm, offset +
- PSM_TIMER_EN_OFFSET);
- state_table_regs->trans[j].trigger_num =
- lpm_read_32_psm(gxp, psm, offset +
- PSM_TRIGGER_NUM_OFFSET);
- state_table_regs->trans[j].trigger_en =
- lpm_read_32_psm(gxp, psm, offset +
- PSM_TRIGGER_EN_OFFSET);
+ lpm_read_32(gxp, offset + PSM_TIMER_EN_OFFSET);
+ state_table_regs->trans[j].trigger_num = lpm_read_32(
+ gxp, offset + PSM_TRIGGER_NUM_OFFSET);
+ state_table_regs->trans[j].trigger_en = lpm_read_32(
+ gxp, offset + PSM_TRIGGER_EN_OFFSET);
}
- state_table_regs->enable_state =
- lpm_read_32_psm(gxp, psm, PSM_STATE_TABLE_BASE(i) +
- PSM_ENABLE_STATE_OFFSET);
+ state_table_regs->enable_state = lpm_read_32(
+ gxp, lpm_psm_offset + PSM_STATE_TABLE_BASE(i) +
+ PSM_ENABLE_STATE_OFFSET);
}
/* Get DMEM registers */
for (i = 0; i < PSM_DATA_COUNT; i++) {
- offset = PSM_DMEM_BASE(i) + PSM_DATA_OFFSET;
- psm_regs->data[i] = lpm_read_32_psm(gxp, psm, offset);
+ offset = PSM_DMEM_BASE(i) + PSM_DATA_OFFSET + lpm_psm_offset;
+ psm_regs->data[i] = lpm_read_32(gxp, offset);
}
- psm_regs->cfg = lpm_read_32_psm(gxp, psm, PSM_CFG_OFFSET);
- psm_regs->status = lpm_read_32_psm(gxp, psm, PSM_STATUS_OFFSET);
+ psm_regs->cfg = lpm_read_32(gxp, lpm_psm_offset + PSM_CFG_OFFSET);
+ psm_regs->status = lpm_read_32(gxp, lpm_psm_offset + PSM_STATUS_OFFSET);
/* Get Debug CSR registers */
- psm_regs->debug_cfg = lpm_read_32_psm(gxp, psm, PSM_DEBUG_CFG_OFFSET);
- psm_regs->break_addr = lpm_read_32_psm(gxp, psm, PSM_BREAK_ADDR_OFFSET);
- psm_regs->gpin_lo_rd = lpm_read_32_psm(gxp, psm, PSM_GPIN_LO_RD_OFFSET);
- psm_regs->gpin_hi_rd = lpm_read_32_psm(gxp, psm, PSM_GPIN_HI_RD_OFFSET);
+ psm_regs->debug_cfg =
+ lpm_read_32(gxp, lpm_psm_offset + PSM_DEBUG_CFG_OFFSET);
+ psm_regs->break_addr =
+ lpm_read_32(gxp, lpm_psm_offset + PSM_BREAK_ADDR_OFFSET);
+ psm_regs->gpin_lo_rd =
+ lpm_read_32(gxp, lpm_psm_offset + PSM_GPIN_LO_RD_OFFSET);
+ psm_regs->gpin_hi_rd =
+ lpm_read_32(gxp, lpm_psm_offset + PSM_GPIN_HI_RD_OFFSET);
psm_regs->gpout_lo_rd =
- lpm_read_32_psm(gxp, psm, PSM_GPOUT_LO_RD_OFFSET);
+ lpm_read_32(gxp, lpm_psm_offset + PSM_GPOUT_LO_RD_OFFSET);
psm_regs->gpout_hi_rd =
- lpm_read_32_psm(gxp, psm, PSM_GPOUT_HI_RD_OFFSET);
+ lpm_read_32(gxp, lpm_psm_offset + PSM_GPOUT_HI_RD_OFFSET);
psm_regs->debug_status =
- lpm_read_32_psm(gxp, psm, PSM_DEBUG_STATUS_OFFSET);
+ lpm_read_32(gxp, lpm_psm_offset + PSM_DEBUG_STATUS_OFFSET);
}
-static void
-gxp_get_lpm_registers(struct gxp_dev *gxp, struct gxp_seg_header *seg_header,
- struct gxp_lpm_registers *lpm_regs)
+static void gxp_get_lpm_registers(struct gxp_dev *gxp,
+ struct gxp_seg_header *seg_header,
+ struct gxp_lpm_registers *lpm_regs)
{
int i;
uint offset;
@@ -266,15 +284,13 @@ static int gxp_get_common_dump(struct gxp_dev *gxp)
int ret;
/* Power on BLK_AUR to read the common registers */
- ret = gxp_wakelock_acquire(gxp);
+ ret = gcip_pm_get(gxp->power_mgr->pm);
if (ret) {
dev_err(gxp->dev,
"Failed to acquire wakelock for getting common dump\n");
return ret;
}
- gxp_pm_update_requested_power_states(gxp, AUR_OFF, true, AUR_UUD, false,
- AUR_MEM_UNDEFINED,
- AUR_MEM_UNDEFINED);
+ gxp_pm_update_requested_power_states(gxp, off_states, uud_states);
gxp_get_common_registers(gxp,
&common_seg_header[GXP_COMMON_REGISTERS_IDX],
@@ -282,10 +298,8 @@ static int gxp_get_common_dump(struct gxp_dev *gxp)
gxp_get_lpm_registers(gxp, &common_seg_header[GXP_LPM_REGISTERS_IDX],
&common_dump_data->lpm_regs);
- gxp_wakelock_release(gxp);
- gxp_pm_update_requested_power_states(gxp, AUR_UUD, false, AUR_OFF, true,
- AUR_MEM_UNDEFINED,
- AUR_MEM_UNDEFINED);
+ gcip_pm_put(gxp->power_mgr->pm);
+ gxp_pm_update_requested_power_states(gxp, uud_states, off_states);
dev_dbg(gxp->dev, "Segment Header for Common Segment\n");
dev_dbg(gxp->dev, "Name: %s, Size: 0x%0x bytes, Valid :%0x\n",
@@ -297,7 +311,7 @@ static int gxp_get_common_dump(struct gxp_dev *gxp)
return ret;
}
-#if IS_ENABLED(CONFIG_SUBSYSTEM_COREDUMP)
+#if HAS_COREDUMP
static void gxp_send_to_sscd(struct gxp_dev *gxp, void *segs, int seg_cnt,
const char *info)
{
@@ -349,27 +363,21 @@ static int gxp_add_user_buffer_to_segments(struct gxp_dev *gxp,
* Caller must have locked `gxp->vd_semaphore` for reading.
*/
static void gxp_user_buffers_vunmap(struct gxp_dev *gxp,
+ struct gxp_virtual_device *vd,
struct gxp_core_header *core_header)
{
- struct gxp_virtual_device *vd;
struct gxp_user_buffer user_buf;
int i;
struct gxp_mapping *mapping;
- lockdep_assert_held(&gxp->vd_semaphore);
-
- /*
- * TODO (b/234172464): When implementing per-core debug dump locks,
- * down_read(&gxp->vd_semaphore) must be re-added before accessing
- * gxp->core_to_vd[], and up_read(&gxp->vd_semaphore) must be re-added
- * after.
- */
- vd = gxp->core_to_vd[core_header->core_id];
- if (!vd) {
- dev_err(gxp->dev, "Virtual device is not available for vunmap\n");
+ if (!vd || vd->state == GXP_VD_RELEASED) {
+ dev_err(gxp->dev,
+ "Virtual device is not available for vunmap\n");
return;
}
+ lockdep_assert_held(&vd->debug_dump_lock);
+
for (i = 0; i < GXP_NUM_BUFFER_MAPPINGS; i++) {
user_buf = core_header->user_bufs[i];
if (user_buf.size == 0)
@@ -393,30 +401,23 @@ static void gxp_user_buffers_vunmap(struct gxp_dev *gxp,
* Caller must have locked `gxp->vd_semaphore` for reading.
*/
static int gxp_user_buffers_vmap(struct gxp_dev *gxp,
+ struct gxp_virtual_device *vd,
struct gxp_core_header *core_header,
void *user_buf_vaddrs[])
{
- struct gxp_virtual_device *vd;
struct gxp_user_buffer *user_buf;
int i, cnt = 0;
dma_addr_t daddr;
struct gxp_mapping *mapping;
void *vaddr;
- lockdep_assert_held(&gxp->vd_semaphore);
-
- /*
- * TODO (b/234172464): When implementing per-core debug dump locks,
- * down_read(&gxp->vd_semaphore) must be re-added before accessing
- * gxp->core_to_vd[], and up_read(&gxp->vd_semaphore) must be re-added
- * after.
- */
- vd = gxp->core_to_vd[core_header->core_id];
- if (!vd) {
+ if (!vd || vd->state == GXP_VD_RELEASED) {
dev_err(gxp->dev, "Virtual device is not available for vmap\n");
goto out;
}
+ lockdep_assert_held(&vd->debug_dump_lock);
+
for (i = 0; i < GXP_NUM_BUFFER_MAPPINGS; i++) {
user_buf = &core_header->user_bufs[i];
if (user_buf->size == 0)
@@ -441,7 +442,7 @@ static int gxp_user_buffers_vmap(struct gxp_dev *gxp,
gxp_mapping_put(mapping);
if (IS_ERR(vaddr)) {
- gxp_user_buffers_vunmap(gxp, core_header);
+ gxp_user_buffers_vunmap(gxp, vd, core_header);
return 0;
}
@@ -453,7 +454,7 @@ static int gxp_user_buffers_vmap(struct gxp_dev *gxp,
/* Check that the entire user buffer is mapped */
if ((user_buf_vaddrs[i] + user_buf->size) >
(vaddr + mapping->size)) {
- gxp_user_buffers_vunmap(gxp, core_header);
+ gxp_user_buffers_vunmap(gxp, vd, core_header);
return 0;
}
@@ -463,9 +464,64 @@ static int gxp_user_buffers_vmap(struct gxp_dev *gxp,
out:
return cnt;
}
-#endif
-static void gxp_invalidate_segments(struct gxp_dev *gxp, uint32_t core_id)
+/**
+ * gxp_map_fw_rw_section() - Maps the fw rw section address and size to be
+ * sent to sscd module for taking the dump.
+ * @gxp: The GXP device.
+ * @vd: vd of the crashed client.
+ * @core_id: physical core_id of crashed core.
+ * @seg_idx: Pointer to a index that is keeping track of
+ * gxp->debug_dump_mgr->segs[] array.
+ *
+ * This function parses the ns_regions of the given vd to find
+ * fw_rw_section details.
+ *
+ * Return:
+ * * 0 - Successfully mapped fw_rw_section data.
+ * * -EOPNOTSUPP - Operation not supported for invalid image config.
+ * * -ENXIO - No IOVA found for the fw_rw_section.
+ */
+static int gxp_map_fw_rw_section(struct gxp_dev *gxp,
+ struct gxp_virtual_device *vd,
+ uint32_t core_id, int *seg_idx)
+{
+ size_t idx;
+ struct sg_table *sgt;
+ struct gxp_debug_dump_manager *mgr = gxp->debug_dump_mgr;
+ dma_addr_t fw_rw_section_daddr = CORE_FIRMWARE_RW_ADDR(core_id);
+ const size_t n_reg = ARRAY_SIZE(vd->ns_regions);
+
+ if (!gxp_fw_data_use_per_vd_config(vd)) {
+ dev_err(gxp->dev, "Unsupported Image config version = %d.",
+ gxp->fw_loader_mgr->core_img_cfg.config_version);
+ return -EOPNOTSUPP;
+ }
+
+ for (idx = 0; idx < n_reg; idx++) {
+ sgt = vd->ns_regions[idx].sgt;
+ if (!sgt)
+ break;
+
+ if (fw_rw_section_daddr != vd->ns_regions[idx].daddr)
+ continue;
+
+ mgr->segs[core_id][*seg_idx].addr =
+ gcip_noncontiguous_sgt_to_mem(sgt);
+ mgr->segs[core_id][*seg_idx].size = gcip_ns_config_to_size(
+ gxp->fw_loader_mgr->core_img_cfg.ns_iommu_mappings[idx]);
+ *seg_idx += 1;
+ return 0;
+ }
+ dev_err(gxp->dev,
+ "fw_rw_section mapping for core %u at iova 0x%llx does not exist",
+ core_id, fw_rw_section_daddr);
+ return -ENXIO;
+}
+
+#endif /* HAS_COREDUMP */
+
+void gxp_debug_dump_invalidate_segments(struct gxp_dev *gxp, uint32_t core_id)
{
int i;
struct gxp_debug_dump_manager *mgr = gxp->debug_dump_mgr;
@@ -504,7 +560,9 @@ static void gxp_invalidate_segments(struct gxp_dev *gxp, uint32_t core_id)
* Caller must make sure that gxp->debug_dump_mgr->common_dump and
* gxp->debug_dump_mgr->core_dump are not NULL.
*/
-static int gxp_handle_debug_dump(struct gxp_dev *gxp, uint32_t core_id)
+static int gxp_handle_debug_dump(struct gxp_dev *gxp,
+ struct gxp_virtual_device *vd,
+ uint32_t core_id)
{
struct gxp_debug_dump_manager *mgr = gxp->debug_dump_mgr;
struct gxp_core_dump *core_dump = mgr->core_dump;
@@ -512,7 +570,7 @@ static int gxp_handle_debug_dump(struct gxp_dev *gxp, uint32_t core_id)
&core_dump->core_dump_header[core_id];
struct gxp_core_header *core_header = &core_dump_header->core_header;
int ret = 0;
-#if IS_ENABLED(CONFIG_SUBSYSTEM_COREDUMP)
+#if HAS_COREDUMP
struct gxp_common_dump *common_dump = mgr->common_dump;
int i;
int seg_idx = 0;
@@ -520,7 +578,7 @@ static int gxp_handle_debug_dump(struct gxp_dev *gxp, uint32_t core_id)
char sscd_msg[SSCD_MSG_LENGTH];
void *user_buf_vaddrs[GXP_NUM_BUFFER_MAPPINGS];
int user_buf_cnt;
-#endif
+#endif /* HAS_COREDUMP */
/* Core */
if (!core_header->dump_available) {
@@ -529,7 +587,7 @@ static int gxp_handle_debug_dump(struct gxp_dev *gxp, uint32_t core_id)
goto out;
}
-#if IS_ENABLED(CONFIG_SUBSYSTEM_COREDUMP)
+#if HAS_COREDUMP
/* Common */
data_addr = &common_dump->common_dump_data.common_regs;
for (i = 0; i < GXP_NUM_COMMON_SEGMENTS; i++) {
@@ -553,9 +611,9 @@ static int gxp_handle_debug_dump(struct gxp_dev *gxp, uint32_t core_id)
mgr->segs[core_id][seg_idx].size = sizeof(struct gxp_core_header);
seg_idx++;
- data_addr = &core_dump->dump_data[core_id *
- core_header->core_dump_size /
- sizeof(u32)];
+ data_addr =
+ &core_dump->dump_data[core_id * core_header->core_dump_size /
+ sizeof(u32)];
for (i = 0; i < GXP_NUM_CORE_SEGMENTS - 1; i++) {
if (seg_idx >= GXP_NUM_SEGMENTS_PER_CORE) {
@@ -578,16 +636,23 @@ static int gxp_handle_debug_dump(struct gxp_dev *gxp, uint32_t core_id)
ret = -EFAULT;
goto out_efault;
}
+ /* fw ro section */
mgr->segs[core_id][seg_idx].addr = gxp->fwbufs[core_id].vaddr;
- mgr->segs[core_id][seg_idx].size = gxp->fwbufs[core_id].size;
+ mgr->segs[core_id][seg_idx].size = vd->fw_ro_size;
seg_idx++;
+ /* fw rw section */
+ ret = gxp_map_fw_rw_section(gxp, vd, core_id, &seg_idx);
+ if (ret)
+ goto out;
+
/* User Buffers */
- user_buf_cnt = gxp_user_buffers_vmap(gxp, core_header, user_buf_vaddrs);
+ user_buf_cnt =
+ gxp_user_buffers_vmap(gxp, vd, core_header, user_buf_vaddrs);
if (user_buf_cnt > 0) {
if (gxp_add_user_buffer_to_segments(gxp, core_header, core_id,
seg_idx, user_buf_vaddrs)) {
- gxp_user_buffers_vunmap(gxp, core_header);
+ gxp_user_buffers_vunmap(gxp, vd, core_header);
ret = -EFAULT;
goto out_efault;
}
@@ -605,19 +670,19 @@ out_efault:
gxp_send_to_sscd(gxp, mgr->segs[core_id],
seg_idx + user_buf_cnt, sscd_msg);
- gxp_user_buffers_vunmap(gxp, core_header);
+ gxp_user_buffers_vunmap(gxp, vd, core_header);
}
-#endif
+#endif /* HAS_COREDUMP */
out:
- gxp_invalidate_segments(gxp, core_id);
+ gxp_debug_dump_invalidate_segments(gxp, core_id);
return ret;
}
static int gxp_init_segments(struct gxp_dev *gxp)
{
-#if !IS_ENABLED(CONFIG_SUBSYSTEM_COREDUMP)
+#if !HAS_COREDUMP
return 0;
#else
struct gxp_debug_dump_manager *mgr = gxp->debug_dump_mgr;
@@ -627,14 +692,16 @@ static int gxp_init_segments(struct gxp_dev *gxp)
return -ENOMEM;
return 0;
-#endif
+#endif /* HAS_COREDUMP */
}
/*
* Caller must have locked `gxp->debug_dump_mgr->debug_dump_lock` before calling
* `gxp_generate_coredump`.
*/
-static int gxp_generate_coredump(struct gxp_dev *gxp, uint32_t core_id)
+static int gxp_generate_coredump(struct gxp_dev *gxp,
+ struct gxp_virtual_device *vd,
+ uint32_t core_id)
{
int ret = 0;
@@ -650,7 +717,7 @@ static int gxp_generate_coredump(struct gxp_dev *gxp, uint32_t core_id)
if (ret)
goto out;
- ret = gxp_handle_debug_dump(gxp, core_id);
+ ret = gxp_handle_debug_dump(gxp, vd, core_id);
if (ret)
goto out;
@@ -660,42 +727,19 @@ out:
return ret;
}
-static void gxp_debug_dump_process_dump(struct work_struct *work)
+static void gxp_generate_debug_dump(struct gxp_dev *gxp, uint core_id,
+ struct gxp_virtual_device *vd)
{
- struct gxp_debug_dump_work *debug_dump_work =
- container_of(work, struct gxp_debug_dump_work, work);
-
- uint core_id = debug_dump_work->core_id;
- struct gxp_dev *gxp = debug_dump_work->gxp;
- u32 boot_mode;
- bool gxp_generate_coredump_called = false;
-
+ bool gxp_generate_coredump_called = true;
mutex_lock(&gxp->debug_dump_mgr->debug_dump_lock);
- /*
- * Lock the VD semaphore to ensure no suspend/resume/start/stop requests
- * can be made on core `core_id` while generating debug dump.
- * However, since VD semaphore is used by other VDs as well, it can
- * potentially block device creation and destruction for other cores.
- * TODO (b/234172464): Implement per-core debug dump locks and
- * lock/unlock vd_semaphore before/after accessing gxp->core_to_vd[].
- */
- down_read(&gxp->vd_semaphore);
-
- boot_mode = gxp_firmware_get_boot_mode(gxp, core_id);
-
- if (gxp_is_fw_running(gxp, core_id) &&
- (boot_mode == GXP_BOOT_MODE_STATUS_COLD_BOOT_COMPLETED ||
- boot_mode == GXP_BOOT_MODE_STATUS_RESUME_COMPLETED)) {
- gxp_generate_coredump_called = true;
- if (gxp_generate_coredump(gxp, core_id))
- dev_err(gxp->dev, "Failed to generate coredump\n");
+ if (gxp_generate_coredump(gxp, vd, core_id)) {
+ gxp_generate_coredump_called = false;
+ dev_err(gxp->dev, "Failed to generate the coredump.\n");
}
/* Invalidate segments to prepare for the next debug dump trigger */
- gxp_invalidate_segments(gxp, core_id);
-
- up_read(&gxp->vd_semaphore);
+ gxp_debug_dump_invalidate_segments(gxp, core_id);
/*
* This delay is needed to ensure there's sufficient time
@@ -709,6 +753,65 @@ static void gxp_debug_dump_process_dump(struct work_struct *work)
mutex_unlock(&gxp->debug_dump_mgr->debug_dump_lock);
}
+static void gxp_debug_dump_process_dump_direct_mode(struct work_struct *work)
+{
+ struct gxp_debug_dump_work *debug_dump_work =
+ container_of(work, struct gxp_debug_dump_work, work);
+ uint core_id = debug_dump_work->core_id;
+ struct gxp_dev *gxp = debug_dump_work->gxp;
+ struct gxp_virtual_device *vd = NULL;
+
+ down_read(&gxp->vd_semaphore);
+ if (gxp->core_to_vd[core_id]) {
+ vd = gxp_vd_get(gxp->core_to_vd[core_id]);
+ } else {
+ dev_err(gxp->dev, "debug dump failed for null vd on core %d.",
+ core_id);
+ up_read(&gxp->vd_semaphore);
+ return;
+ }
+ up_read(&gxp->vd_semaphore);
+
+ /*
+ * Hold @vd->debug_dump_lock instead of @gxp->vd_semaphore to prevent changing the state
+ * of @vd while generating a debug dump. This will help not to block other virtual devices
+ * proceeding their jobs.
+ */
+ mutex_lock(&vd->debug_dump_lock);
+
+ gxp_generate_debug_dump(gxp, core_id, vd);
+
+ mutex_unlock(&vd->debug_dump_lock);
+ gxp_vd_put(vd);
+}
+
+int gxp_debug_dump_process_dump_mcu_mode(struct gxp_dev *gxp, uint core_list,
+ struct gxp_virtual_device *crashed_vd)
+{
+ uint core;
+ struct gxp_core_dump_header *core_dump_header;
+ struct gxp_debug_dump_manager *mgr = gxp->debug_dump_mgr;
+
+ lockdep_assert_held(&crashed_vd->debug_dump_lock);
+
+ if (crashed_vd->state != GXP_VD_UNAVAILABLE) {
+ dev_dbg(gxp->dev, "Invalid vd state=%u for processing dumps.\n",
+ crashed_vd->state);
+ return -EINVAL;
+ }
+
+ for (core = 0; core < GXP_NUM_CORES; core++) {
+ if (!(BIT(core) & core_list))
+ continue;
+ core_dump_header = &mgr->core_dump->core_dump_header[core];
+ /* Check if dump has been generated by core firmware */
+ if (core_dump_header &&
+ core_dump_header->core_header.dump_available == 1)
+ gxp_generate_debug_dump(gxp, core, crashed_vd);
+ }
+ return 0;
+}
+
struct work_struct *gxp_debug_dump_get_notification_handler(struct gxp_dev *gxp,
uint core)
{
@@ -730,7 +833,7 @@ struct work_struct *gxp_debug_dump_get_notification_handler(struct gxp_dev *gxp,
int gxp_debug_dump_init(struct gxp_dev *gxp, void *sscd_dev, void *sscd_pdata)
{
struct gxp_debug_dump_manager *mgr;
- int core;
+ int core, ret;
/* Don't initialize the debug dump subsystem unless it's enabled. */
if (!gxp_debug_dump_enable)
@@ -742,12 +845,11 @@ int gxp_debug_dump_init(struct gxp_dev *gxp, void *sscd_dev, void *sscd_pdata)
gxp->debug_dump_mgr = mgr;
mgr->gxp = gxp;
- mgr->buf.vaddr =
- gxp_dma_alloc_coherent(gxp, NULL, 0, DEBUG_DUMP_MEMORY_SIZE,
- &mgr->buf.daddr, GFP_KERNEL, 0);
- if (!mgr->buf.vaddr) {
+ ret = gxp_dma_alloc_coherent_buf(gxp, NULL, DEBUG_DUMP_MEMORY_SIZE,
+ GFP_KERNEL, 0, &mgr->buf);
+ if (ret) {
dev_err(gxp->dev, "Failed to allocate memory for debug dump\n");
- return -ENODEV;
+ return ret;
}
mgr->buf.size = DEBUG_DUMP_MEMORY_SIZE;
@@ -756,11 +858,11 @@ int gxp_debug_dump_init(struct gxp_dev *gxp, void *sscd_dev, void *sscd_pdata)
gxp_init_segments(gxp);
for (core = 0; core < GXP_NUM_CORES; core++) {
- gxp_invalidate_segments(gxp, core);
+ gxp_debug_dump_invalidate_segments(gxp, core);
mgr->debug_dump_works[core].gxp = gxp;
mgr->debug_dump_works[core].core_id = core;
INIT_WORK(&mgr->debug_dump_works[core].work,
- gxp_debug_dump_process_dump);
+ gxp_debug_dump_process_dump_direct_mode);
}
/* No need for a DMA handle since the carveout is coherent */
@@ -782,8 +884,7 @@ void gxp_debug_dump_exit(struct gxp_dev *gxp)
}
kfree(gxp->debug_dump_mgr->common_dump);
- gxp_dma_free_coherent(gxp, NULL, 0, DEBUG_DUMP_MEMORY_SIZE,
- mgr->buf.vaddr, mgr->buf.daddr);
+ gxp_dma_free_coherent_buf(gxp, NULL, &mgr->buf);
mutex_destroy(&mgr->debug_dump_lock);
devm_kfree(mgr->gxp->dev, mgr);
diff --git a/gxp-debug-dump.h b/gxp-debug-dump.h
index 1b1fda0..aeb8229 100644
--- a/gxp-debug-dump.h
+++ b/gxp-debug-dump.h
@@ -2,8 +2,9 @@
/*
* GXP debug dump handler
*
- * Copyright (C) 2020 Google LLC
+ * Copyright (C) 2020-2022 Google LLC
*/
+
#ifndef __GXP_DEBUG_DUMP_H__
#define __GXP_DEBUG_DUMP_H__
@@ -11,32 +12,36 @@
#include <linux/types.h>
#include <linux/workqueue.h>
-#if IS_ENABLED(CONFIG_SUBSYSTEM_COREDUMP)
+#include "gxp-dma.h"
+#include "gxp-internal.h"
+
+#define HAS_COREDUMP \
+ (IS_ENABLED(CONFIG_GXP_TEST) || IS_ENABLED(CONFIG_SUBSYSTEM_COREDUMP))
+
+#if HAS_COREDUMP
#include <linux/platform_data/sscoredump.h>
#endif
-#include "gxp-internal.h"
-
#define GXP_NUM_COMMON_SEGMENTS 2
#define GXP_NUM_CORE_SEGMENTS 8
#define GXP_NUM_BUFFER_MAPPINGS 32
#define GXP_SEG_HEADER_NAME_LENGTH 32
-#define GXP_NUM_SEGMENTS_PER_CORE \
- (GXP_NUM_COMMON_SEGMENTS + GXP_NUM_CORE_SEGMENTS + \
+#define GXP_NUM_SEGMENTS_PER_CORE \
+ (GXP_NUM_COMMON_SEGMENTS + GXP_NUM_CORE_SEGMENTS + \
GXP_NUM_BUFFER_MAPPINGS + 1)
#define GXP_Q7_ICACHE_SIZE 131072 /* I-cache size in bytes */
#define GXP_Q7_ICACHE_LINESIZE 64 /* I-cache line size in bytes */
#define GXP_Q7_ICACHE_WAYS 4
-#define GXP_Q7_ICACHE_SETS ((GXP_Q7_ICACHE_SIZE / GXP_Q7_ICACHE_WAYS) / \
- GXP_Q7_ICACHE_LINESIZE)
+#define GXP_Q7_ICACHE_SETS \
+ ((GXP_Q7_ICACHE_SIZE / GXP_Q7_ICACHE_WAYS) / GXP_Q7_ICACHE_LINESIZE)
#define GXP_Q7_ICACHE_WORDS_PER_LINE (GXP_Q7_ICACHE_LINESIZE / sizeof(u32))
#define GXP_Q7_DCACHE_SIZE 65536 /* D-cache size in bytes */
-#define GXP_Q7_DCACHE_LINESIZE 64 /* D-cache line size in bytes */
+#define GXP_Q7_DCACHE_LINESIZE 64 /* D-cache line size in bytes */
#define GXP_Q7_DCACHE_WAYS 4
-#define GXP_Q7_DCACHE_SETS ((GXP_Q7_DCACHE_SIZE / GXP_Q7_DCACHE_WAYS) / \
- GXP_Q7_DCACHE_LINESIZE)
+#define GXP_Q7_DCACHE_SETS \
+ ((GXP_Q7_DCACHE_SIZE / GXP_Q7_DCACHE_WAYS) / GXP_Q7_DCACHE_LINESIZE)
#define GXP_Q7_DCACHE_WORDS_PER_LINE (GXP_Q7_DCACHE_LINESIZE / sizeof(u32))
#define GXP_Q7_NUM_AREGS 64
#define GXP_Q7_DCACHE_TAG_RAMS 2
@@ -172,15 +177,9 @@ struct gxp_debug_dump_work {
uint core_id;
};
-struct gxp_debug_dump_buffer {
- void *vaddr;
- dma_addr_t daddr;
- u32 size;
-};
-
struct gxp_debug_dump_manager {
struct gxp_dev *gxp;
- struct gxp_debug_dump_buffer buf;
+ struct gxp_coherent_buf buf; /* Buffer holding debug dump data */
struct gxp_debug_dump_work debug_dump_works[GXP_NUM_CORES];
struct gxp_core_dump *core_dump; /* start of the core dump */
struct gxp_common_dump *common_dump;
@@ -192,7 +191,7 @@ struct gxp_debug_dump_manager {
* time
*/
struct mutex debug_dump_lock;
-#if IS_ENABLED(CONFIG_SUBSYSTEM_COREDUMP)
+#if HAS_COREDUMP
struct sscd_segment segs[GXP_NUM_CORES][GXP_NUM_SEGMENTS_PER_CORE];
#endif
};
@@ -203,4 +202,33 @@ struct work_struct *gxp_debug_dump_get_notification_handler(struct gxp_dev *gxp,
uint core);
bool gxp_debug_dump_is_enabled(void);
+/**
+ * gxp_debug_dump_invalidate_segments() - Invalidate debug dump segments to enable
+ * firmware to populate them on next debug
+ * dump trigger.
+ *
+ * This function is not thread safe. Caller should take the necessary precautions.
+ *
+ * @gxp: The GXP device to obtain the handler for
+ * @core_id: physical id of core whose dump segments need to be invalidated.
+ */
+void gxp_debug_dump_invalidate_segments(struct gxp_dev *gxp, uint32_t core_id);
+
+/**
+ * gxp_debug_dump_process_dump_mcu_mode() - Checks and process the debug dump
+ * for cores from core_list.
+ * @gxp: The GXP device to obtain the handler for
+ * @core_list: A bitfield enumerating the physical cores on which crash is
+ * reported from firmware.
+ * @crashed_vd: vd that has crashed.
+ *
+ * The caller must hold @crashed_vd->debug_dump_lock.
+ *
+ * Return:
+ * * 0 - Success.
+ * * -EINVAL - If vd state is not GXP_VD_UNAVAILABLE.
+ */
+int gxp_debug_dump_process_dump_mcu_mode(struct gxp_dev *gxp, uint core_list,
+ struct gxp_virtual_device *crashed_vd);
+
#endif /* __GXP_DEBUG_DUMP_H__ */
diff --git a/gxp-debugfs.c b/gxp-debugfs.c
index e1b199b..6dacde9 100644
--- a/gxp-debugfs.c
+++ b/gxp-debugfs.c
@@ -7,24 +7,31 @@
#include <linux/acpm_dvfs.h>
+#include <gcip/gcip-pm.h>
+
#include "gxp-client.h"
+#include "gxp-core-telemetry.h"
#include "gxp-debug-dump.h"
#include "gxp-debugfs.h"
+#include "gxp-dma.h"
#include "gxp-firmware-data.h"
+#include "gxp-firmware-loader.h"
#include "gxp-firmware.h"
#include "gxp-internal.h"
#include "gxp-notification.h"
#include "gxp-lpm.h"
#include "gxp-mailbox.h"
#include "gxp-pm.h"
-#include "gxp-telemetry.h"
#include "gxp-vd.h"
-#include "gxp-wakelock.h"
#include "gxp.h"
+#if GXP_HAS_MCU
+#include "gxp-mcu-platform.h"
+#endif
+
static int gxp_debugfs_lpm_test(void *data, u64 val)
{
- struct gxp_dev *gxp = (struct gxp_dev *) data;
+ struct gxp_dev *gxp = (struct gxp_dev *)data;
dev_info(gxp->dev, "%llu\n", val);
@@ -35,53 +42,104 @@ DEFINE_DEBUGFS_ATTRIBUTE(gxp_lpm_test_fops, NULL, gxp_debugfs_lpm_test,
static int gxp_debugfs_mailbox(void *data, u64 val)
{
- int core;
- struct gxp_command cmd;
- struct gxp_response resp;
+ int core = 0, retval;
+ u16 status;
struct gxp_dev *gxp = (struct gxp_dev *)data;
+ struct gxp_mailbox *mbx;
+ struct gxp_client *client;
+ struct gxp_power_states power_states = {
+ .power = GXP_POWER_STATE_NOM,
+ .memory = MEMORY_POWER_STATE_UNDEFINED,
+ };
+ u16 cmd_code;
+ int ret;
- core = val / 1000;
- if (core >= GXP_NUM_CORES) {
- dev_notice(gxp->dev,
- "Mailbox for core %d doesn't exist.\n", core);
- return -EINVAL;
- }
+ mutex_lock(&gxp->debugfs_client_lock);
+ client = gxp->debugfs_client;
+
+#if GXP_HAS_MCU
+ if (gxp_is_direct_mode(gxp)) {
+#endif
+ core = val / 1000;
+ if (core >= GXP_NUM_CORES) {
+ dev_notice(gxp->dev,
+ "Mailbox for core %d doesn't exist.\n",
+ core);
+ ret = -EINVAL;
+ goto out;
+ }
- if (gxp->mailbox_mgr == NULL ||
- gxp->mailbox_mgr->mailboxes[core] == NULL) {
- dev_notice(gxp->dev,
- "Unable to send mailbox command -- mailbox %d not ready\n",
- core);
- return -EINVAL;
- }
+ if (gxp->mailbox_mgr->mailboxes[core] == NULL) {
+ dev_notice(
+ gxp->dev,
+ "Unable to send mailbox command -- mailbox %d not ready\n",
+ core);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* Create a dummy client to access @client->gxp from the `execute_cmd` callback. */
+ if (!client)
+ client = gxp_client_create(gxp);
+ mbx = gxp->mailbox_mgr->mailboxes[core];
+ cmd_code = GXP_MBOX_CODE_DISPATCH;
+#if GXP_HAS_MCU
+ } else {
+ if (!client) {
+ dev_err(gxp->dev,
+ "You should load firmwares via gxp/firmware_run first\n");
+ ret = -EIO;
+ goto out;
+ }
- cmd.code = (u16) val;
- cmd.priority = 0;
- cmd.buffer_descriptor.address = 0;
- cmd.buffer_descriptor.size = 0;
- cmd.buffer_descriptor.flags = 0;
+ down_read(&gxp->debugfs_client->semaphore);
+ if (!gxp_client_has_available_vd(gxp->debugfs_client,
+ "GXP_MAILBOX_COMMAND")) {
+ ret = -ENODEV;
+ up_read(&gxp->debugfs_client->semaphore);
+ goto out;
+ }
+ up_read(&gxp->debugfs_client->semaphore);
- down_read(&gxp->vd_semaphore);
- gxp_mailbox_execute_cmd(gxp->mailbox_mgr->mailboxes[core], &cmd, &resp);
- up_read(&gxp->vd_semaphore);
+ mbx = to_mcu_dev(gxp)->mcu.uci.mbx;
+ if (!mbx) {
+ dev_err(gxp->dev, "UCI is not initialized.\n");
+ ret = -EIO;
+ goto out;
+ }
- dev_info(gxp->dev,
- "Mailbox Command Sent: cmd.code=%d, resp.status=%d, resp.retval=%d\n",
- cmd.code, resp.status, resp.retval);
- return 0;
+ cmd_code = CORE_COMMAND;
+ }
+#endif
+
+ retval = gxp->mailbox_mgr->execute_cmd(client, mbx, core, cmd_code, 0,
+ 0, 0, 0, 1, power_states, NULL,
+ &status);
+
+ dev_info(
+ gxp->dev,
+ "Mailbox Command Sent: core=%d, resp.status=%d, resp.retval=%d\n",
+ core, status, retval);
+ ret = 0;
+out:
+ if (client && client != gxp->debugfs_client)
+ gxp_client_destroy(client);
+ mutex_unlock(&gxp->debugfs_client_lock);
+ return ret;
}
DEFINE_DEBUGFS_ATTRIBUTE(gxp_mailbox_fops, NULL, gxp_debugfs_mailbox, "%llu\n");
static int gxp_firmware_run_set(void *data, u64 val)
{
- struct gxp_dev *gxp = (struct gxp_dev *) data;
+ struct gxp_dev *gxp = (struct gxp_dev *)data;
struct gxp_client *client;
int ret = 0;
uint core;
+ bool acquired_block_wakelock;
- ret = gxp_firmware_request_if_needed(gxp);
+ ret = gxp_firmware_loader_load_if_needed(gxp);
if (ret) {
- dev_err(gxp->dev, "Unable to request dsp firmware files\n");
+ dev_err(gxp->dev, "Unable to load firmware files\n");
return ret;
}
@@ -89,7 +147,7 @@ static int gxp_firmware_run_set(void *data, u64 val)
if (val) {
if (gxp->debugfs_client) {
- dev_err(gxp->dev, "Firmware already running!\n");
+ dev_err(gxp->dev, "Firmware is already running!\n");
ret = -EIO;
goto out;
}
@@ -109,13 +167,7 @@ static int gxp_firmware_run_set(void *data, u64 val)
goto out;
}
}
-
- /*
- * Cleanup any bad state or corruption the device might've
- * caused
- */
- gxp_fw_data_destroy(gxp);
- gxp_fw_data_init(gxp);
+ up_write(&gxp->vd_semaphore);
client = gxp_client_create(gxp);
if (IS_ERR(client)) {
@@ -124,34 +176,36 @@ static int gxp_firmware_run_set(void *data, u64 val)
}
gxp->debugfs_client = client;
- gxp->debugfs_client->vd = gxp_vd_allocate(gxp, GXP_NUM_CORES);
- if (IS_ERR(gxp->debugfs_client->vd)) {
+ mutex_lock(&gxp->client_list_lock);
+ list_add(&client->list_entry, &gxp->client_list);
+ mutex_unlock(&gxp->client_list_lock);
+
+ down_write(&client->semaphore);
+
+ ret = gxp_client_allocate_virtual_device(client, GXP_NUM_CORES,
+ 0);
+ if (ret) {
dev_err(gxp->dev, "Failed to allocate VD\n");
- ret = PTR_ERR(gxp->debugfs_client->vd);
- goto err_wakelock;
+ goto err_destroy_client;
}
- ret = gxp_wakelock_acquire(gxp);
+ ret = gxp_client_acquire_block_wakelock(
+ client, &acquired_block_wakelock);
if (ret) {
dev_err(gxp->dev, "Failed to acquire BLOCK wakelock\n");
- goto err_wakelock;
+ goto err_destroy_client;
}
- gxp->debugfs_client->has_block_wakelock = true;
- gxp_pm_update_requested_power_states(gxp, AUR_OFF, true,
- AUR_UUD, true,
- AUR_MEM_UNDEFINED,
- AUR_MEM_UNDEFINED);
- ret = gxp_vd_start(gxp->debugfs_client->vd);
- up_write(&gxp->vd_semaphore);
+ ret = gxp_client_acquire_vd_wakelock(client, uud_states);
if (ret) {
- dev_err(gxp->dev, "Failed to start VD\n");
- goto err_start;
+ dev_err(gxp->dev, "Failed to acquire VD wakelock\n");
+ goto err_release_block_wakelock;
}
- gxp->debugfs_client->has_vd_wakelock = true;
+
+ up_write(&client->semaphore);
} else {
if (!gxp->debugfs_client) {
- dev_err(gxp->dev, "Firmware not running!\n");
+ dev_err(gxp->dev, "Firmware is not running!\n");
ret = -EIO;
goto out;
}
@@ -160,12 +214,7 @@ static int gxp_firmware_run_set(void *data, u64 val)
* Cleaning up the client will stop the VD it owns and release
* the BLOCK wakelock it is holding.
*/
- gxp_client_destroy(gxp->debugfs_client);
- gxp->debugfs_client = NULL;
- gxp_pm_update_requested_power_states(gxp, AUR_UUD, true,
- AUR_OFF, true,
- AUR_MEM_UNDEFINED,
- AUR_MEM_UNDEFINED);
+ goto out_destroy_client;
}
out:
@@ -173,12 +222,15 @@ out:
return ret;
-err_start:
- gxp_wakelock_release(gxp);
- gxp_pm_update_requested_power_states(gxp, AUR_UUD, true, AUR_OFF, true,
- AUR_MEM_UNDEFINED,
- AUR_MEM_UNDEFINED);
-err_wakelock:
+err_release_block_wakelock:
+ gxp_client_release_block_wakelock(client);
+err_destroy_client:
+ up_write(&client->semaphore);
+out_destroy_client:
+ mutex_lock(&gxp->client_list_lock);
+ list_del(&gxp->debugfs_client->list_entry);
+ mutex_unlock(&gxp->client_list_lock);
+
/* Destroying a client cleans up any VDss or wakelocks it held. */
gxp_client_destroy(gxp->debugfs_client);
gxp->debugfs_client = NULL;
@@ -188,10 +240,10 @@ err_wakelock:
static int gxp_firmware_run_get(void *data, u64 *val)
{
- struct gxp_dev *gxp = (struct gxp_dev *) data;
+ struct gxp_dev *gxp = (struct gxp_dev *)data;
down_read(&gxp->vd_semaphore);
- *val = gxp->firmware_running;
+ *val = gxp->firmware_mgr->firmware_running;
up_read(&gxp->vd_semaphore);
return 0;
@@ -216,18 +268,14 @@ static int gxp_wakelock_set(void *data, u64 val)
goto out;
}
- ret = gxp_wakelock_acquire(gxp);
+ ret = gcip_pm_get(gxp->power_mgr->pm);
if (ret) {
- dev_err(gxp->dev,
- "Failed to acquire debugfs wakelock ret=%d\n",
- ret);
+ dev_err(gxp->dev, "gcip_pm_get failed ret=%d\n", ret);
goto out;
}
gxp->debugfs_wakelock_held = true;
- gxp_pm_update_requested_power_states(gxp, AUR_OFF, true,
- AUR_UUD, true,
- AUR_MEM_UNDEFINED,
- AUR_MEM_UNDEFINED);
+ gxp_pm_update_requested_power_states(gxp, off_states,
+ uud_states);
} else {
/* Wakelock Release */
if (!gxp->debugfs_wakelock_held) {
@@ -236,12 +284,10 @@ static int gxp_wakelock_set(void *data, u64 val)
goto out;
}
- gxp_wakelock_release(gxp);
+ gcip_pm_put(gxp->power_mgr->pm);
gxp->debugfs_wakelock_held = false;
- gxp_pm_update_requested_power_states(gxp, AUR_UUD, true,
- AUR_OFF, true,
- AUR_MEM_UNDEFINED,
- AUR_MEM_UNDEFINED);
+ gxp_pm_update_requested_power_states(gxp, uud_states,
+ off_states);
}
out:
@@ -321,29 +367,24 @@ static int gxp_log_buff_set(void *data, u64 val)
{
struct gxp_dev *gxp = (struct gxp_dev *)data;
int i;
- u64 **buffers;
+ struct gxp_coherent_buf *buffers;
u64 *ptr;
- mutex_lock(&gxp->telemetry_mgr->lock);
+ mutex_lock(&gxp->core_telemetry_mgr->lock);
- if (!gxp->telemetry_mgr->logging_buff_data) {
- dev_err(gxp->dev, "%s: Logging buffer has not been created\n",
- __func__);
- mutex_unlock(&gxp->telemetry_mgr->lock);
+ if (!gxp->core_telemetry_mgr->logging_buff_data_legacy) {
+ dev_err(gxp->dev, "Logging buffer has not been created");
+ mutex_unlock(&gxp->core_telemetry_mgr->lock);
return -ENODEV;
}
- buffers = (u64 **)gxp->telemetry_mgr->logging_buff_data->buffers;
+ buffers = gxp->core_telemetry_mgr->logging_buff_data_legacy->buffers;
for (i = 0; i < GXP_NUM_CORES; i++) {
- ptr = buffers[i];
+ ptr = buffers[i].vaddr;
*ptr = val;
}
- dev_dbg(gxp->dev,
- "%s: log buff first bytes: [0] = %llu, [1] = %llu, [2] = %llu, [3] = %llu (val=%llu)\n",
- __func__, *buffers[0], *buffers[1], *buffers[2], *buffers[3],
- val);
- mutex_unlock(&gxp->telemetry_mgr->lock);
+ mutex_unlock(&gxp->core_telemetry_mgr->lock);
return 0;
}
@@ -351,25 +392,21 @@ static int gxp_log_buff_set(void *data, u64 val)
static int gxp_log_buff_get(void *data, u64 *val)
{
struct gxp_dev *gxp = (struct gxp_dev *)data;
- u64 **buffers;
+ struct gxp_coherent_buf *buffers;
- mutex_lock(&gxp->telemetry_mgr->lock);
+ mutex_lock(&gxp->core_telemetry_mgr->lock);
- if (!gxp->telemetry_mgr->logging_buff_data) {
- dev_err(gxp->dev, "%s: Logging buffer has not been created\n",
- __func__);
- mutex_unlock(&gxp->telemetry_mgr->lock);
+ if (!gxp->core_telemetry_mgr->logging_buff_data_legacy) {
+ dev_err(gxp->dev, "Logging buffer has not been created");
+ mutex_unlock(&gxp->core_telemetry_mgr->lock);
return -ENODEV;
}
- buffers = (u64 **)gxp->telemetry_mgr->logging_buff_data->buffers;
- dev_dbg(gxp->dev,
- "%s: log buff first bytes: [0] = %llu, [1] = %llu, [2] = %llu, [3] = %llu\n",
- __func__, *buffers[0], *buffers[1], *buffers[2], *buffers[3]);
+ buffers = gxp->core_telemetry_mgr->logging_buff_data_legacy->buffers;
- *val = *buffers[0];
+ *val = *(u64 *)(buffers[0].vaddr);
- mutex_unlock(&gxp->telemetry_mgr->lock);
+ mutex_unlock(&gxp->core_telemetry_mgr->lock);
return 0;
}
@@ -382,17 +419,17 @@ static int gxp_log_eventfd_signal_set(void *data, u64 val)
struct gxp_dev *gxp = (struct gxp_dev *)data;
int ret = 0;
- mutex_lock(&gxp->telemetry_mgr->lock);
+ mutex_lock(&gxp->core_telemetry_mgr->lock);
- if (!gxp->telemetry_mgr->logging_efd) {
+ if (!gxp->core_telemetry_mgr->logging_efd) {
ret = -ENODEV;
goto out;
}
- ret = eventfd_signal(gxp->telemetry_mgr->logging_efd, 1);
+ ret = eventfd_signal(gxp->core_telemetry_mgr->logging_efd, 1);
out:
- mutex_unlock(&gxp->telemetry_mgr->lock);
+ mutex_unlock(&gxp->core_telemetry_mgr->lock);
return ret;
}
@@ -400,37 +437,33 @@ out:
DEFINE_DEBUGFS_ATTRIBUTE(gxp_log_eventfd_signal_fops, NULL,
gxp_log_eventfd_signal_set, "%llu\n");
-/* TODO: Remove these mux entry once experiment is done */
static int gxp_cmu_mux1_set(void *data, u64 val)
{
struct gxp_dev *gxp = (struct gxp_dev *)data;
- void *addr;
+ if (IS_ERR_OR_NULL(gxp->cmu.vaddr)) {
+ dev_err(gxp->dev, "CMU registers are not mapped");
+ return -ENODEV;
+ }
if (val > 1) {
- dev_err(gxp->dev, "Incorrect val for cmu_mux1, only 0 and 1 allowed\n");
+ dev_err(gxp->dev,
+ "Incorrect val for cmu_mux1, only 0 and 1 allowed\n");
return -EINVAL;
}
- addr = ioremap(gxp->regs.paddr - GXP_CMU_OFFSET, 0x1000);
-
- if (!addr) {
- dev_err(gxp->dev, "Cannot map CMU1 address\n");
- return -EIO;
- }
-
- writel(val << 4, addr + PLL_CON0_PLL_AUR);
- iounmap(addr);
+ writel(val << 4, gxp->cmu.vaddr + PLL_CON0_PLL_AUR);
return 0;
}
static int gxp_cmu_mux1_get(void *data, u64 *val)
{
struct gxp_dev *gxp = (struct gxp_dev *)data;
- void *addr;
- addr = ioremap(gxp->regs.paddr - GXP_CMU_OFFSET, 0x1000);
- *val = readl(addr + PLL_CON0_PLL_AUR);
- iounmap(addr);
+ if (IS_ERR_OR_NULL(gxp->cmu.vaddr)) {
+ dev_err(gxp->dev, "CMU registers are not mapped");
+ return -ENODEV;
+ }
+ *val = readl(gxp->cmu.vaddr + PLL_CON0_PLL_AUR);
return 0;
}
@@ -440,43 +473,49 @@ DEFINE_DEBUGFS_ATTRIBUTE(gxp_cmu_mux1_fops, gxp_cmu_mux1_get, gxp_cmu_mux1_set,
static int gxp_cmu_mux2_set(void *data, u64 val)
{
struct gxp_dev *gxp = (struct gxp_dev *)data;
- void *addr;
+ if (IS_ERR_OR_NULL(gxp->cmu.vaddr)) {
+ dev_err(gxp->dev, "CMU registers are not mapped");
+ return -ENODEV;
+ }
if (val > 1) {
- dev_err(gxp->dev, "Incorrect val for cmu_mux2, only 0 and 1 allowed\n");
+ dev_err(gxp->dev,
+ "Incorrect val for cmu_mux2, only 0 and 1 allowed\n");
return -EINVAL;
}
- addr = ioremap(gxp->regs.paddr - GXP_CMU_OFFSET, 0x1000);
-
- if (!addr) {
- dev_err(gxp->dev, "Cannot map CMU2 address\n");
- return -EIO;
- }
-
- writel(val << 4, addr + PLL_CON0_NOC_USER);
- iounmap(addr);
+ writel(val << 4, gxp->cmu.vaddr + PLL_CON0_NOC_USER);
return 0;
}
static int gxp_cmu_mux2_get(void *data, u64 *val)
{
struct gxp_dev *gxp = (struct gxp_dev *)data;
- void *addr;
- addr = ioremap(gxp->regs.paddr - GXP_CMU_OFFSET, 0x1000);
- *val = readl(addr + 0x610);
- iounmap(addr);
+ if (IS_ERR_OR_NULL(gxp->cmu.vaddr)) {
+ dev_err(gxp->dev, "CMU registers are not mapped");
+ return -ENODEV;
+ }
+ *val = readl(gxp->cmu.vaddr + PLL_CON0_NOC_USER);
return 0;
}
DEFINE_DEBUGFS_ATTRIBUTE(gxp_cmu_mux2_fops, gxp_cmu_mux2_get, gxp_cmu_mux2_set,
"%llu\n");
+void gxp_create_debugdir(struct gxp_dev *gxp)
+{
+ gxp->d_entry = debugfs_create_dir(GXP_NAME, NULL);
+ if (IS_ERR_OR_NULL(gxp->d_entry)) {
+ dev_warn(gxp->dev, "Create debugfs dir failed: %d",
+ PTR_ERR_OR_ZERO(gxp->d_entry));
+ gxp->d_entry = NULL;
+ }
+}
+
void gxp_create_debugfs(struct gxp_dev *gxp)
{
- gxp->d_entry = debugfs_create_dir("gxp", NULL);
- if (IS_ERR_OR_NULL(gxp->d_entry))
+ if (!gxp->d_entry)
return;
mutex_init(&gxp->debugfs_client_lock);
@@ -503,8 +542,10 @@ void gxp_create_debugfs(struct gxp_dev *gxp)
&gxp_cmu_mux2_fops);
}
-void gxp_remove_debugfs(struct gxp_dev *gxp)
+void gxp_remove_debugdir(struct gxp_dev *gxp)
{
+ if (!gxp->d_entry)
+ return;
debugfs_remove_recursive(gxp->d_entry);
/*
diff --git a/gxp-debugfs.h b/gxp-debugfs.h
index 4b42546..6ea8688 100644
--- a/gxp-debugfs.h
+++ b/gxp-debugfs.h
@@ -9,7 +9,12 @@
#include "gxp-internal.h"
+/*
+ * Creates the GXP debug FS directory and assigns to @gxp->d_entry.
+ * On failure a warning is logged and @gxp->d_entry is NULL.
+ */
+void gxp_create_debugdir(struct gxp_dev *gxp);
void gxp_create_debugfs(struct gxp_dev *gxp);
-void gxp_remove_debugfs(struct gxp_dev *gxp);
+void gxp_remove_debugdir(struct gxp_dev *gxp);
#endif /* __GXP_DEBUGFS_H__ */
diff --git a/gxp-dma-fence.c b/gxp-dma-fence.c
new file mode 100644
index 0000000..4733081
--- /dev/null
+++ b/gxp-dma-fence.c
@@ -0,0 +1,82 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * GXP support for DMA fence.
+ *
+ * Copyright (C) 2023 Google LLC
+ */
+
+#include <linux/slab.h>
+
+#include <gcip/gcip-dma-fence.h>
+
+#include "gxp-dma-fence.h"
+#include "gxp-internal.h"
+#include "gxp-vd.h"
+#include "gxp.h"
+
+static const char *gxp_get_driver_name(struct dma_fence *fence)
+{
+ return GXP_NAME;
+}
+
+static void gxp_dma_fence_release(struct dma_fence *fence)
+{
+ struct gxp_dma_fence *gxp_fence = to_gxp_fence(fence);
+ struct gxp_virtual_device *vd = gxp_fence->vd;
+
+ mutex_lock(&vd->fence_list_lock);
+ list_del(&gxp_fence->fence_list);
+ mutex_unlock(&vd->fence_list_lock);
+ gxp_vd_put(vd);
+ gcip_dma_fence_exit(&gxp_fence->gfence);
+ kfree(gxp_fence);
+}
+
+static const struct dma_fence_ops gxp_dma_fence_ops = {
+ .get_driver_name = gxp_get_driver_name,
+ .get_timeline_name = gcip_dma_fence_get_timeline_name,
+ .wait = dma_fence_default_wait,
+ .enable_signaling = gcip_dma_fence_always_true,
+ .release = gxp_dma_fence_release,
+};
+
+static int gxp_dma_fence_after_init(struct gcip_dma_fence *gfence)
+{
+ struct gxp_dma_fence *gxp_fence =
+ container_of(gfence, struct gxp_dma_fence, gfence);
+ struct gxp_virtual_device *vd = gxp_fence->vd;
+
+ mutex_lock(&vd->fence_list_lock);
+ list_add_tail(&gxp_fence->fence_list, &vd->gxp_fence_list);
+ mutex_unlock(&vd->fence_list_lock);
+
+ return 0;
+}
+
+int gxp_dma_fence_create(struct gxp_dev *gxp, struct gxp_virtual_device *vd,
+ struct gxp_create_sync_fence_data *datap)
+{
+ struct gcip_dma_fence_data data = {
+ .timeline_name = datap->timeline_name,
+ .ops = &gxp_dma_fence_ops,
+ .seqno = datap->seqno,
+ .after_init = gxp_dma_fence_after_init,
+ };
+ struct gxp_dma_fence *gxp_fence =
+ kzalloc(sizeof(*gxp_fence), GFP_KERNEL);
+ int ret;
+
+ if (!gxp_fence)
+ return -ENOMEM;
+
+ gxp_fence->vd = gxp_vd_get(vd);
+ ret = gcip_dma_fence_init(gxp->gfence_mgr, &gxp_fence->gfence, &data);
+ if (!ret)
+ datap->fence = data.fence;
+ /*
+ * We don't need to kfree(gxp_fence) on error because that's called in
+ * gxp_dma_fence_release.
+ */
+
+ return ret;
+}
diff --git a/gxp-dma-fence.h b/gxp-dma-fence.h
new file mode 100644
index 0000000..38f8cf2
--- /dev/null
+++ b/gxp-dma-fence.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * GXP support for DMA fence.
+ *
+ * Copyright (C) 2023 Google LLC
+ */
+
+#ifndef __GXP_DMA_FENCE_H__
+#define __GXP_DMA_FENCE_H__
+
+#include <gcip/gcip-dma-fence.h>
+
+#include "gxp-vd.h"
+#include "gxp.h"
+
+/* Converts struct dma_fence to gxp_dma_fence. */
+#define to_gxp_fence(fence) \
+ container_of(to_gcip_fence(fence), struct gxp_dma_fence, gfence)
+
+struct gxp_dma_fence {
+ struct gcip_dma_fence gfence;
+ /* The owner of this DMA fence */
+ struct gxp_virtual_device *vd;
+ /* List of DMA fences owned by the same VD. */
+ struct list_head fence_list;
+};
+
+/*
+ * Creates a DMA fence associates with @vd.
+ *
+ * @datap->fence is set to the fence FD on success.
+ *
+ * Returns 0 on success. Otherwise a negative errno.
+ */
+int gxp_dma_fence_create(struct gxp_dev *gxp, struct gxp_virtual_device *vd,
+ struct gxp_create_sync_fence_data *datap);
+
+#endif /* __GXP_DMA_FENCE_H__ */
diff --git a/gxp-dma-iommu.c b/gxp-dma-iommu.c
index 97322f5..ad1111b 100644
--- a/gxp-dma-iommu.c
+++ b/gxp-dma-iommu.c
@@ -5,6 +5,7 @@
* Copyright (C) 2021 Google LLC
*/
+#include <linux/bits.h>
#include <linux/dma-iommu.h>
#include <linux/dma-mapping.h>
#include <linux/iommu.h>
@@ -14,16 +15,17 @@
#include "gxp-config.h"
#include "gxp-dma.h"
-#include "gxp-iova.h"
+#include "gxp-firmware.h" /* gxp_core_boot */
+#include "gxp-mailbox.h"
#include "gxp-mapping.h"
#include "gxp-pm.h"
-#include "gxp-vd.h"
+#include "gxp-ssmt.h"
+#include "gxp.h"
struct gxp_dma_iommu_manager {
struct gxp_dma_manager dma_mgr;
- struct iommu_domain *default_domain;
- void __iomem *idma_ssmt_base;
- void __iomem *inst_data_ssmt_base;
+ struct gxp_iommu_domain *default_domain;
+ struct gxp_ssmt ssmt;
};
/**
@@ -40,7 +42,13 @@ struct gxp_dma_iommu_manager {
static int dma_info_to_prot(enum dma_data_direction dir, bool coherent,
unsigned long attrs)
{
- int prot = coherent ? IOMMU_CACHE : 0;
+ int prot = 0;
+
+ if (coherent) {
+#ifdef GXP_IS_DMA_COHERENT
+ prot = IOMMU_CACHE;
+#endif
+ }
if (attrs & DMA_ATTR_PRIVILEGED)
prot |= IOMMU_PRIV;
@@ -56,81 +64,34 @@ static int dma_info_to_prot(enum dma_data_direction dir, bool coherent,
}
}
-/* SSMT handling */
-
-#define INST_SID_FOR_CORE(_x_) ((1 << 6) | ((_x_) << 4) | (0 << 3))
-#define DATA_SID_FOR_CORE(_x_) ((1 << 6) | ((_x_) << 4) | (1 << 3))
-#define IDMA_SID_FOR_CORE(_x_) ((1 << 6) | ((_x_) << 4))
-
-static inline void ssmt_set_vid_for_sid(void __iomem *ssmt, int vid, u8 sid)
+static int map_flags_to_iommu_prot(enum dma_data_direction dir,
+ unsigned long attrs, u32 gxp_dma_flags)
{
- /* NS_READ_STREAM_VID_<sid> */
- writel(vid, (ssmt) + 0x1000u + (0x4u * (sid)));
- /* NS_WRITE_STREAM_VID_<sid> */
- writel(vid, (ssmt) + 0x1200u + (0x4u * (sid)));
+ bool coherent = gxp_dma_flags & GXP_MAP_COHERENT ? 1 : 0;
+
+ return dma_info_to_prot(dir, coherent, attrs);
}
static int gxp_dma_ssmt_program(struct gxp_dev *gxp,
- struct gxp_virtual_device *vd, uint virt_core,
- uint core)
+ struct iommu_domain *domain, uint core_list)
{
-/* SSMT is not supported in unittests */
-#ifndef CONFIG_GXP_TEST
struct gxp_dma_iommu_manager *mgr = container_of(
gxp->dma_mgr, struct gxp_dma_iommu_manager, dma_mgr);
- int core_vid;
-
- core_vid = iommu_aux_get_pasid(vd->core_domains[virt_core], gxp->dev);
- dev_dbg(gxp->dev, "SysMMU: core%u assigned vid %d\n", core,
- core_vid);
- ssmt_set_vid_for_sid(mgr->idma_ssmt_base, core_vid,
- IDMA_SID_FOR_CORE(core));
- ssmt_set_vid_for_sid(mgr->inst_data_ssmt_base, core_vid,
- INST_SID_FOR_CORE(core));
- ssmt_set_vid_for_sid(mgr->inst_data_ssmt_base, core_vid,
- DATA_SID_FOR_CORE(core));
-#endif
- return 0;
-}
-
-
-static inline int ssmt_init(struct gxp_dev *gxp,
- struct gxp_dma_iommu_manager *mgr)
-{
- struct platform_device *pdev =
- container_of(gxp->dev, struct platform_device, dev);
- struct resource *r;
-
- r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ssmt_idma");
- if (!r) {
- dev_err(gxp->dev, "Failed to find IDMA SSMT register base\n");
- return -EINVAL;
+ int pasid;
+ uint core;
+
+ /* Program VID only when cores are managed by us. */
+ if (gxp_is_direct_mode(gxp) || gxp_core_boot(gxp)) {
+ pasid = iommu_aux_get_pasid(domain, gxp->dev);
+ for (core = 0; core < GXP_NUM_CORES; core++)
+ if (BIT(core) & core_list) {
+ dev_dbg(gxp->dev, "Assign core%u to PASID %d\n",
+ core, pasid);
+ gxp_ssmt_set_core_vid(&mgr->ssmt, core, pasid);
+ }
+ } else {
+ gxp_ssmt_set_bypass(&mgr->ssmt);
}
-
- mgr->idma_ssmt_base = devm_ioremap_resource(gxp->dev, r);
- if (IS_ERR(mgr->idma_ssmt_base)) {
- dev_err(gxp->dev,
- "Failed to map IDMA SSMT register base (%ld)\n",
- PTR_ERR(mgr->idma_ssmt_base));
- return PTR_ERR(mgr->idma_ssmt_base);
- }
-
- r = platform_get_resource_byname(pdev, IORESOURCE_MEM,
- "ssmt_inst_data");
- if (!r) {
- dev_err(gxp->dev,
- "Failed to find instruction/data SSMT register base\n");
- return -EINVAL;
- }
-
- mgr->inst_data_ssmt_base = devm_ioremap_resource(gxp->dev, r);
- if (IS_ERR(mgr->inst_data_ssmt_base)) {
- dev_err(gxp->dev,
- "Failed to map instruction/data SSMT register base (%ld)\n",
- PTR_ERR(mgr->inst_data_ssmt_base));
- return PTR_ERR(mgr->inst_data_ssmt_base);
- }
-
return 0;
}
@@ -170,8 +131,87 @@ static int sysmmu_fault_handler(struct iommu_fault *fault, void *token)
return -EAGAIN;
}
+#if GXP_HAS_LAP
+
+/* No need to map CSRs when local access path exists. */
+
+#define gxp_map_csrs(...) 0
+#define gxp_unmap_csrs(...)
+
+#else /* !GXP_HAS_LAP */
+
+#define SYNC_BARRIERS_SIZE 0x100000
+
+static int gxp_map_csrs(struct gxp_dev *gxp, struct iommu_domain *domain,
+ struct gxp_mapped_resource *regs)
+{
+ int ret = iommu_map(domain, GXP_IOVA_AURORA_TOP, gxp->regs.paddr,
+ gxp->regs.size, IOMMU_READ | IOMMU_WRITE);
+ if (ret)
+ return ret;
+ /*
+ * Firmware expects to access the sync barriers at a separate
+ * address, lower than the rest of the AURORA_TOP registers.
+ */
+ ret = iommu_map(domain, GXP_IOVA_SYNC_BARRIERS,
+ gxp->regs.paddr + GXP_IOVA_SYNC_BARRIERS,
+ SYNC_BARRIERS_SIZE, IOMMU_READ | IOMMU_WRITE);
+ if (ret) {
+ iommu_unmap(domain, GXP_IOVA_AURORA_TOP, gxp->regs.size);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void gxp_unmap_csrs(struct gxp_dev *gxp, struct iommu_domain *domain,
+ struct gxp_mapped_resource *regs)
+{
+ iommu_unmap(domain, GXP_IOVA_SYNC_BARRIERS, SYNC_BARRIERS_SIZE);
+ iommu_unmap(domain, GXP_IOVA_AURORA_TOP, gxp->regs.size);
+}
+
+#endif /* GXP_HAS_LAP */
+
/* gxp-dma.h Interface */
+uint gxp_iommu_aux_get_pasid(struct gxp_dev *gxp,
+ struct gxp_iommu_domain *gdomain)
+{
+ return iommu_aux_get_pasid(gdomain->domain, gxp->dev);
+}
+
+struct gxp_iommu_domain *gxp_iommu_get_domain_for_dev(struct gxp_dev *gxp)
+{
+ struct gxp_iommu_domain *gdomain = gxp->default_domain;
+
+ if (IS_ERR_OR_NULL(gdomain)) {
+ gdomain = devm_kzalloc(gxp->dev, sizeof(*gdomain), GFP_KERNEL);
+ if (!gdomain)
+ return ERR_PTR(-ENOMEM);
+ gdomain->domain = iommu_get_domain_for_dev(gxp->dev);
+ if (!gdomain->domain) {
+ devm_kfree(gxp->dev, gdomain);
+ return ERR_PTR(-ENOMEM);
+ }
+ gxp->default_domain = gdomain;
+ }
+
+ return gdomain;
+}
+
+int gxp_iommu_map(struct gxp_dev *gxp, struct gxp_iommu_domain *gdomain,
+ unsigned long iova, phys_addr_t paddr, size_t size, int prot)
+{
+ return iommu_map(gdomain->domain, iova, paddr, size, prot);
+}
+
+void gxp_iommu_unmap(struct gxp_dev *gxp, struct gxp_iommu_domain *gdomain,
+ unsigned long iova, size_t size)
+{
+ iommu_unmap(gdomain->domain, iova, size);
+}
+
int gxp_dma_init(struct gxp_dev *gxp)
{
struct gxp_dma_iommu_manager *mgr;
@@ -188,20 +228,16 @@ int gxp_dma_init(struct gxp_dev *gxp)
if (!mgr)
return -ENOMEM;
-/* TODO(b/201505925): remove this and prepare a of_node in unittests */
-/* SSMT is not supported in unittests */
-#ifndef CONFIG_GXP_TEST
- ret = ssmt_init(gxp, mgr);
+ ret = gxp_ssmt_init(gxp, &mgr->ssmt);
if (ret) {
dev_err(gxp->dev, "Failed to find SSMT\n");
return ret;
}
-#endif
- mgr->default_domain = iommu_get_domain_for_dev(gxp->dev);
- if (!mgr->default_domain) {
+ mgr->default_domain = gxp_iommu_get_domain_for_dev(gxp);
+ if (IS_ERR(mgr->default_domain)) {
dev_err(gxp->dev, "Failed to find default IOMMU domain\n");
- return -EIO;
+ return PTR_ERR(mgr->default_domain);
}
if (iommu_register_device_fault_handler(gxp->dev, sysmmu_fault_handler,
@@ -216,8 +252,14 @@ int gxp_dma_init(struct gxp_dev *gxp)
goto err_unreg_fault_handler;
}
+#if IS_ENABLED(CONFIG_ANDROID)
/* Enable best fit algorithm to minimize fragmentation */
- iommu_dma_enable_best_fit_algo(gxp->dev);
+ ret = iommu_dma_enable_best_fit_algo(gxp->dev);
+ if (ret)
+ dev_warn(gxp->dev,
+ "Failed to enable best-fit IOVA allocator (%d)\n",
+ ret);
+#endif
gxp->dma_mgr = &(mgr->dma_mgr);
@@ -238,95 +280,85 @@ void gxp_dma_exit(struct gxp_dev *gxp)
"Failed to unregister SysMMU fault handler\n");
}
-#define SYNC_BARRIERS_SIZE 0x100000
-#define SYNC_BARRIERS_TOP_OFFSET 0x100000
-#define EXT_TPU_MBX_SIZE 0x2000
-
-/* Offset from mailbox base to the device interface that needs to be mapped */
-#define MAILBOX_DEVICE_INTERFACE_OFFSET 0x10000
+#define EXT_TPU_MBX_SIZE 0x2000
void gxp_dma_init_default_resources(struct gxp_dev *gxp)
{
unsigned int core;
+ int i;
- for (core = 0; core < GXP_NUM_CORES; core++) {
- gxp->mbx[core].daddr = GXP_IOVA_MAILBOX(core);
+ for (i = 0; i < GXP_NUM_MAILBOXES; i++)
+ gxp->mbx[i].daddr = GXP_IOVA_MAILBOX(i);
+ for (core = 0; core < GXP_NUM_CORES; core++)
gxp->fwbufs[core].daddr = GXP_IOVA_FIRMWARE(core);
- }
- gxp->regs.daddr = GXP_IOVA_AURORA_TOP;
gxp->fwdatabuf.daddr = GXP_IOVA_FW_DATA;
}
int gxp_dma_domain_attach_device(struct gxp_dev *gxp,
- struct gxp_virtual_device *vd, uint virt_core,
- uint core)
+ struct gxp_iommu_domain *gdomain,
+ uint core_list)
{
int ret;
- ret = iommu_aux_attach_device(vd->core_domains[virt_core], gxp->dev);
+ ret = iommu_aux_attach_device(gdomain->domain, gxp->dev);
if (ret)
goto out;
- gxp_dma_ssmt_program(gxp, vd, virt_core, core);
+ gxp_dma_ssmt_program(gxp, gdomain->domain, core_list);
out:
return ret;
}
void gxp_dma_domain_detach_device(struct gxp_dev *gxp,
- struct gxp_virtual_device *vd, uint virt_core)
+ struct gxp_iommu_domain *gdomain)
{
- iommu_aux_detach_device(vd->core_domains[virt_core], gxp->dev);
+ iommu_aux_detach_device(gdomain->domain, gxp->dev);
}
int gxp_dma_map_core_resources(struct gxp_dev *gxp,
- struct gxp_virtual_device *vd, uint virt_core,
- uint core)
+ struct gxp_iommu_domain *gdomain, uint core_list,
+ u8 slice_index)
{
int ret;
+ uint i;
+ struct iommu_domain *domain = gdomain->domain;
- ret = iommu_map(vd->core_domains[virt_core], gxp->regs.daddr,
- gxp->regs.paddr, gxp->regs.size,
- IOMMU_READ | IOMMU_WRITE);
- if (ret)
- goto err;
- /*
- * Firmware expects to access the sync barriers at a separate
- * address, lower than the rest of the AURORA_TOP registers.
- */
- ret = iommu_map(vd->core_domains[virt_core], GXP_IOVA_SYNC_BARRIERS,
- gxp->regs.paddr + SYNC_BARRIERS_TOP_OFFSET,
- SYNC_BARRIERS_SIZE, IOMMU_READ | IOMMU_WRITE);
- if (ret)
- goto err;
- ret = iommu_map(vd->core_domains[virt_core], gxp->mbx[core].daddr,
- gxp->mbx[core].paddr + MAILBOX_DEVICE_INTERFACE_OFFSET,
- gxp->mbx[core].size, IOMMU_READ | IOMMU_WRITE);
- if (ret)
- goto err;
- /*
- * TODO(b/202213606): Map FW regions of all cores in a VD for
- * each other at VD creation.
- */
- ret = iommu_map(vd->core_domains[virt_core], gxp->fwbufs[0].daddr,
- gxp->fwbufs[0].paddr,
- gxp->fwbufs[0].size * GXP_NUM_CORES,
- IOMMU_READ | IOMMU_WRITE);
+ if (!gxp_is_direct_mode(gxp))
+ return 0;
+
+ ret = gxp_map_csrs(gxp, domain, &gxp->regs);
if (ret)
goto err;
- ret = iommu_map(vd->core_domains[virt_core], gxp->fwdatabuf.daddr,
- gxp->fwdatabuf.paddr, gxp->fwdatabuf.size,
- IOMMU_READ | IOMMU_WRITE);
+
+ for (i = 0; i < GXP_NUM_CORES; i++) {
+ if (!(BIT(i) & core_list))
+ continue;
+ ret = iommu_map(domain, gxp->mbx[i].daddr,
+ gxp->mbx[i].paddr +
+ MAILBOX_DEVICE_INTERFACE_OFFSET,
+ gxp->mbx[i].size, IOMMU_READ | IOMMU_WRITE);
+ if (ret)
+ goto err;
+ }
+ /* TODO(b/265748027): directly remove this map */
+ if (gxp->fwdatabuf.daddr)
+ ret = iommu_map(domain, gxp->fwdatabuf.daddr,
+ gxp->fwdatabuf.paddr, gxp->fwdatabuf.size,
+ IOMMU_READ | IOMMU_WRITE);
if (ret)
goto err;
/* Only map the TPU mailboxes if they were found on probe */
if (gxp->tpu_dev.mbx_paddr) {
- ret = iommu_map(
- vd->core_domains[virt_core],
- GXP_IOVA_EXT_TPU_MBX + core * EXT_TPU_MBX_SIZE,
- gxp->tpu_dev.mbx_paddr +
- core * EXT_TPU_MBX_SIZE,
- EXT_TPU_MBX_SIZE, IOMMU_READ | IOMMU_WRITE);
- if (ret)
- goto err;
+ for (i = 0; i < GXP_NUM_CORES; i++) {
+ if (!(BIT(i) & core_list))
+ continue;
+ ret = iommu_map(
+ domain,
+ GXP_IOVA_EXT_TPU_MBX + i * EXT_TPU_MBX_SIZE,
+ gxp->tpu_dev.mbx_paddr + i * EXT_TPU_MBX_SIZE,
+ EXT_TPU_MBX_SIZE, IOMMU_READ | IOMMU_WRITE);
+ if (ret)
+ goto err;
+ }
}
return ret;
@@ -336,42 +368,43 @@ err:
* Any resource that hadn't been mapped yet will cause `iommu_unmap()`
* to return immediately, so its safe to try to unmap everything.
*/
- gxp_dma_unmap_core_resources(gxp, vd, virt_core, core);
+ gxp_dma_unmap_core_resources(gxp, gdomain, core_list);
return ret;
}
void gxp_dma_unmap_core_resources(struct gxp_dev *gxp,
- struct gxp_virtual_device *vd, uint virt_core,
- uint core)
+ struct gxp_iommu_domain *gdomain,
+ uint core_list)
{
+ uint i;
+ struct iommu_domain *domain = gdomain->domain;
+
+ if (!gxp_is_direct_mode(gxp))
+ return;
+
/* Only unmap the TPU mailboxes if they were found on probe */
if (gxp->tpu_dev.mbx_paddr) {
- iommu_unmap(vd->core_domains[virt_core],
- GXP_IOVA_EXT_TPU_MBX +
- core * EXT_TPU_MBX_SIZE,
- EXT_TPU_MBX_SIZE);
+ for (i = 0; i < GXP_NUM_CORES; i++) {
+ if (!(BIT(i) & core_list))
+ continue;
+ iommu_unmap(domain,
+ GXP_IOVA_EXT_TPU_MBX + i * EXT_TPU_MBX_SIZE,
+ EXT_TPU_MBX_SIZE);
+ }
}
- iommu_unmap(vd->core_domains[virt_core], gxp->fwdatabuf.daddr,
- gxp->fwdatabuf.size);
- /*
- * TODO(b/202213606): A core should only have access to the FW
- * of other cores if they're in the same VD, and have the FW
- * region unmapped on VD destruction.
- */
- iommu_unmap(vd->core_domains[virt_core], gxp->fwbufs[0].daddr,
- gxp->fwbufs[0].size * GXP_NUM_CORES);
- iommu_unmap(vd->core_domains[virt_core], gxp->mbx[core].daddr,
- gxp->mbx[core].size);
- iommu_unmap(vd->core_domains[virt_core], GXP_IOVA_SYNC_BARRIERS,
- SYNC_BARRIERS_SIZE);
- iommu_unmap(vd->core_domains[virt_core], gxp->regs.daddr,
- gxp->regs.size);
+ if (gxp->fwdatabuf.daddr)
+ iommu_unmap(domain, gxp->fwdatabuf.daddr, gxp->fwdatabuf.size);
+ for (i = 0; i < GXP_NUM_CORES; i++) {
+ if (!(BIT(i) & core_list))
+ continue;
+ iommu_unmap(domain, gxp->mbx[i].daddr, gxp->mbx[i].size);
+ }
+ gxp_unmap_csrs(gxp, domain, &gxp->regs);
}
-static inline struct sg_table *
-alloc_sgt_for_buffer(void *ptr, size_t size,
- struct iommu_domain *domain,
- dma_addr_t daddr)
+static inline struct sg_table *alloc_sgt_for_buffer(void *ptr, size_t size,
+ struct iommu_domain *domain,
+ dma_addr_t daddr)
{
struct sg_table *sgt;
ulong offset;
@@ -409,7 +442,7 @@ alloc_sgt_for_buffer(void *ptr, size_t size,
*/
size_in_page = size > (PAGE_SIZE - offset_in_page(ptr)) ?
PAGE_SIZE - offset_in_page(ptr) :
- size;
+ size;
page = phys_to_page(iommu_iova_to_phys(domain, daddr));
sg_set_page(next, page, size_in_page, offset_in_page(ptr));
size -= size_in_page;
@@ -437,136 +470,114 @@ alloc_sgt_for_buffer(void *ptr, size_t size,
return sgt;
}
-#if (IS_ENABLED(CONFIG_GXP_TEST) || IS_ENABLED(CONFIG_ANDROID)) && !IS_ENABLED(CONFIG_GXP_GEM5)
-int gxp_dma_map_tpu_buffer(struct gxp_dev *gxp, struct gxp_virtual_device *vd,
- uint virt_core_list, uint core_list,
+#if (IS_ENABLED(CONFIG_GXP_TEST) || IS_ENABLED(CONFIG_ANDROID)) && \
+ !IS_ENABLED(CONFIG_GXP_GEM5)
+int gxp_dma_map_tpu_buffer(struct gxp_dev *gxp,
+ struct gxp_iommu_domain *gdomain, uint core_list,
struct edgetpu_ext_mailbox_info *mbx_info)
{
- uint orig_virt_core_list = virt_core_list;
+ uint orig_core_list = core_list;
u64 queue_iova;
- uint virt_core;
int core;
int ret;
int i = 0;
+ struct iommu_domain *domain = gdomain->domain;
- while (virt_core_list) {
+ while (core_list) {
phys_addr_t cmdq_pa = mbx_info->mailboxes[i].cmdq_pa;
phys_addr_t respq_pa = mbx_info->mailboxes[i++].respq_pa;
- virt_core = ffs(virt_core_list) - 1;
- virt_core_list &= ~BIT(virt_core);
core = ffs(core_list) - 1;
- core_list &= ~BIT(core);
queue_iova = GXP_IOVA_TPU_MBX_BUFFER(core);
- ret = iommu_map(vd->core_domains[virt_core], queue_iova,
- cmdq_pa, mbx_info->cmdq_size, IOMMU_WRITE);
+ ret = iommu_map(domain, queue_iova, cmdq_pa,
+ mbx_info->cmdq_size, IOMMU_WRITE);
if (ret)
goto error;
- ret = iommu_map(vd->core_domains[virt_core],
- queue_iova + mbx_info->cmdq_size, respq_pa,
- mbx_info->respq_size, IOMMU_READ);
+ ret = iommu_map(domain, queue_iova + mbx_info->cmdq_size,
+ respq_pa, mbx_info->respq_size, IOMMU_READ);
if (ret) {
- iommu_unmap(vd->core_domains[virt_core], queue_iova,
- mbx_info->cmdq_size);
+ iommu_unmap(domain, queue_iova, mbx_info->cmdq_size);
goto error;
}
+ core_list &= ~BIT(core);
}
return 0;
error:
- virt_core_list ^= orig_virt_core_list;
- while (virt_core_list) {
- virt_core = ffs(virt_core_list) - 1;
- virt_core_list &= ~BIT(virt_core);
+ core_list ^= orig_core_list;
+ while (core_list) {
core = ffs(core_list) - 1;
core_list &= ~BIT(core);
queue_iova = GXP_IOVA_TPU_MBX_BUFFER(core);
- iommu_unmap(vd->core_domains[virt_core], queue_iova,
- mbx_info->cmdq_size);
- iommu_unmap(vd->core_domains[virt_core], queue_iova +
- mbx_info->cmdq_size, mbx_info->respq_size);
+ iommu_unmap(domain, queue_iova, mbx_info->cmdq_size);
+ iommu_unmap(domain, queue_iova + mbx_info->cmdq_size,
+ mbx_info->respq_size);
}
return ret;
}
void gxp_dma_unmap_tpu_buffer(struct gxp_dev *gxp,
- struct gxp_virtual_device *vd,
+ struct gxp_iommu_domain *gdomain,
struct gxp_tpu_mbx_desc mbx_desc)
{
- uint virt_core_list = mbx_desc.virt_core_list;
uint core_list = mbx_desc.phys_core_list;
u64 queue_iova;
int core;
- uint virt_core;
+ struct iommu_domain *domain = gdomain->domain;
- while (virt_core_list) {
- virt_core = ffs(virt_core_list) - 1;
- virt_core_list &= ~BIT(virt_core);
+ while (core_list) {
core = ffs(core_list) - 1;
core_list &= ~BIT(core);
queue_iova = GXP_IOVA_TPU_MBX_BUFFER(core);
- iommu_unmap(vd->core_domains[virt_core], queue_iova,
- mbx_desc.cmdq_size);
- iommu_unmap(vd->core_domains[virt_core], queue_iova +
- mbx_desc.cmdq_size, mbx_desc.respq_size);
+ iommu_unmap(domain, queue_iova, mbx_desc.cmdq_size);
+ iommu_unmap(domain, queue_iova + mbx_desc.cmdq_size,
+ mbx_desc.respq_size);
}
}
-#endif // (CONFIG_GXP_TEST || CONFIG_ANDROID) && !CONFIG_GXP_GEM5
+#endif // (CONFIG_GXP_TEST || CONFIG_ANDROID) && !CONFIG_GXP_GEM5
-int gxp_dma_map_allocated_coherent_buffer(struct gxp_dev *gxp, void *buf,
- struct gxp_virtual_device *vd,
- uint virt_core_list, size_t size,
- dma_addr_t dma_handle,
+int gxp_dma_map_allocated_coherent_buffer(struct gxp_dev *gxp,
+ struct gxp_coherent_buf *buf,
+ struct gxp_iommu_domain *gdomain,
uint gxp_dma_flags)
{
struct gxp_dma_iommu_manager *mgr = container_of(
gxp->dma_mgr, struct gxp_dma_iommu_manager, dma_mgr);
struct sg_table *sgt;
- int virt_core;
ssize_t size_mapped;
+ int ret = 0;
+ size_t size;
+ struct iommu_domain *domain = gdomain->domain;
- size = size < PAGE_SIZE ? PAGE_SIZE : size;
- sgt = alloc_sgt_for_buffer(buf, size, mgr->default_domain, dma_handle);
+ size = buf->size;
+ sgt = alloc_sgt_for_buffer(buf->vaddr, buf->size,
+ mgr->default_domain->domain, buf->dma_addr);
if (IS_ERR(sgt)) {
dev_err(gxp->dev,
"Failed to allocate sgt for coherent buffer\n");
- return -ENOMEM;
- }
-
- /* Create identical mappings in the specified cores' domains */
- for (virt_core = 0; virt_core < vd->num_cores; virt_core++) {
- if (!(virt_core_list & BIT(virt_core)))
- continue;
- /*
- * In Linux 5.15 and beyond, `iommu_map_sg()` returns a
- * `ssize_t` to encode errors that earlier versions throw out.
- * Explicitly cast here for backwards compatibility.
- */
- size_mapped = (ssize_t)iommu_map_sg(vd->core_domains[virt_core],
- dma_handle, sgt->sgl,
- sgt->orig_nents,
- IOMMU_READ | IOMMU_WRITE);
- if (size_mapped != size)
- goto err;
+ return PTR_ERR(sgt);
}
- sg_free_table(sgt);
- kfree(sgt);
- return 0;
-
-err:
- for (virt_core -= 1; virt_core >= 0; virt_core--)
- iommu_unmap(vd->core_domains[virt_core], dma_handle, size);
+ /*
+ * In Linux 5.15 and beyond, `iommu_map_sg()` returns a
+ * `ssize_t` to encode errors that earlier versions throw out.
+ * Explicitly cast here for backwards compatibility.
+ */
+ size_mapped = (ssize_t)iommu_map_sg(domain, buf->dma_addr, sgt->sgl,
+ sgt->orig_nents,
+ IOMMU_READ | IOMMU_WRITE);
+ if (size_mapped != size)
+ ret = size_mapped < 0 ? -EINVAL : (int)size_mapped;
sg_free_table(sgt);
kfree(sgt);
- return -EINVAL;
+ return ret;
}
-void *gxp_dma_alloc_coherent(struct gxp_dev *gxp, struct gxp_virtual_device *vd,
- uint virt_core_list, size_t size,
- dma_addr_t *dma_handle, gfp_t flag,
- uint gxp_dma_flags)
+int gxp_dma_alloc_coherent_buf(struct gxp_dev *gxp,
+ struct gxp_iommu_domain *gdomain, size_t size,
+ gfp_t flag, uint gxp_dma_flags,
+ struct gxp_coherent_buf *buffer)
{
void *buf;
dma_addr_t daddr;
@@ -578,226 +589,55 @@ void *gxp_dma_alloc_coherent(struct gxp_dev *gxp, struct gxp_virtual_device *vd,
buf = dma_alloc_coherent(gxp->dev, size, &daddr, flag);
if (!buf) {
dev_err(gxp->dev, "Failed to allocate coherent buffer\n");
- return NULL;
+ return -ENOMEM;
}
- if (vd != NULL) {
- ret = gxp_dma_map_allocated_coherent_buffer(gxp, buf, vd,
- virt_core_list,
- size, daddr,
- gxp_dma_flags);
+
+ buffer->vaddr = buf;
+ buffer->size = size;
+ buffer->dma_addr = daddr;
+
+ if (gdomain != NULL) {
+ ret = gxp_dma_map_allocated_coherent_buffer(
+ gxp, buffer, gdomain, gxp_dma_flags);
if (ret) {
+ buffer->vaddr = NULL;
+ buffer->size = 0;
dma_free_coherent(gxp->dev, size, buf, daddr);
- return NULL;
+ return ret;
}
}
- if (dma_handle)
- *dma_handle = daddr;
+ buffer->dsp_addr = daddr;
- return buf;
+ return 0;
}
void gxp_dma_unmap_allocated_coherent_buffer(struct gxp_dev *gxp,
- struct gxp_virtual_device *vd,
- uint virt_core_list, size_t size,
- dma_addr_t dma_handle)
-{
- int virt_core;
-
- size = size < PAGE_SIZE ? PAGE_SIZE : size;
-
- for (virt_core = 0; virt_core < vd->num_cores; virt_core++) {
- if (!(virt_core_list & BIT(virt_core)))
- continue;
- if (size !=
- iommu_unmap(vd->core_domains[virt_core], dma_handle, size))
- dev_warn(gxp->dev, "Failed to unmap coherent buffer\n");
- }
-}
-
-void gxp_dma_free_coherent(struct gxp_dev *gxp, struct gxp_virtual_device *vd,
- uint virt_core_list, size_t size, void *cpu_addr,
- dma_addr_t dma_handle)
-{
- if (vd != NULL)
- gxp_dma_unmap_allocated_coherent_buffer(gxp, vd, virt_core_list,
- size, dma_handle);
- dma_free_coherent(gxp->dev, size, cpu_addr, dma_handle);
-}
-
-dma_addr_t gxp_dma_map_single(struct gxp_dev *gxp,
- struct gxp_virtual_device *vd,
- uint virt_core_list, void *cpu_addr, size_t size,
- enum dma_data_direction direction,
- unsigned long attrs, uint gxp_dma_flags)
-{
- struct gxp_dma_iommu_manager *mgr = container_of(
- gxp->dma_mgr, struct gxp_dma_iommu_manager, dma_mgr);
- dma_addr_t daddr;
- phys_addr_t paddr;
- int prot = dma_info_to_prot(direction, 0, attrs);
- int virt_core;
-
- daddr = dma_map_single_attrs(gxp->dev, cpu_addr, size, direction,
- attrs);
- if (dma_mapping_error(gxp->dev, daddr))
- return DMA_MAPPING_ERROR;
-
- paddr = iommu_iova_to_phys(mgr->default_domain, daddr);
- for (virt_core = 0; virt_core < vd->num_cores; virt_core++) {
- if (!(virt_core_list & BIT(virt_core)))
- continue;
- if (iommu_map(vd->core_domains[virt_core], daddr, paddr, size,
- prot))
- goto err;
- }
-
- return daddr;
-
-err:
- for (virt_core -= 1; virt_core >= 0; virt_core--)
- iommu_unmap(vd->core_domains[virt_core], daddr, size);
- dma_unmap_single_attrs(gxp->dev, daddr, size, direction,
- DMA_ATTR_SKIP_CPU_SYNC);
- return DMA_MAPPING_ERROR;
-}
-
-void gxp_dma_unmap_single(struct gxp_dev *gxp, struct gxp_virtual_device *vd,
- uint virt_core_list, dma_addr_t dma_addr, size_t size,
- enum dma_data_direction direction,
- unsigned long attrs)
-{
- int virt_core;
-
- for (virt_core = 0; virt_core < vd->num_cores; virt_core++) {
- if (!(virt_core_list & BIT(virt_core)))
- continue;
- if (size !=
- iommu_unmap(vd->core_domains[virt_core], dma_addr, size))
- dev_warn(gxp->dev, "Failed to unmap single\n");
- }
-
- dma_unmap_single_attrs(gxp->dev, dma_addr, size, direction, attrs);
-}
-
-dma_addr_t gxp_dma_map_page(struct gxp_dev *gxp, struct gxp_virtual_device *vd,
- uint virt_core_list, struct page *page,
- unsigned long offset, size_t size,
- enum dma_data_direction direction,
- unsigned long attrs, uint gxp_dma_flags)
-{
- struct gxp_dma_iommu_manager *mgr = container_of(
- gxp->dma_mgr, struct gxp_dma_iommu_manager, dma_mgr);
- dma_addr_t daddr;
- phys_addr_t paddr;
- int prot = dma_info_to_prot(direction, 0, attrs);
- int virt_core;
-
- daddr = dma_map_page_attrs(gxp->dev, page, offset, size, direction,
- attrs);
- if (dma_mapping_error(gxp->dev, daddr))
- return DMA_MAPPING_ERROR;
-
- paddr = iommu_iova_to_phys(mgr->default_domain, daddr);
- for (virt_core = 0; virt_core < vd->num_cores; virt_core++) {
- if (!(virt_core_list & BIT(virt_core)))
- continue;
- if (iommu_map(vd->core_domains[virt_core], daddr, paddr, size,
- prot))
- goto err;
- }
-
- return daddr;
-
-err:
- for (virt_core -= 1; virt_core >= 0; virt_core--)
- iommu_unmap(vd->core_domains[virt_core], daddr, size);
- dma_unmap_page_attrs(gxp->dev, daddr, size, direction,
- DMA_ATTR_SKIP_CPU_SYNC);
- return DMA_MAPPING_ERROR;
-}
-
-void gxp_dma_unmap_page(struct gxp_dev *gxp, struct gxp_virtual_device *vd,
- uint virt_core_list, dma_addr_t dma_addr, size_t size,
- enum dma_data_direction direction, unsigned long attrs)
-{
- int virt_core;
-
- for (virt_core = 0; virt_core < vd->num_cores; virt_core++) {
- if (!(virt_core_list & BIT(virt_core)))
- continue;
- if (size !=
- iommu_unmap(vd->core_domains[virt_core], dma_addr, size))
- dev_warn(gxp->dev, "Failed to unmap page\n");
- }
-
- dma_unmap_page_attrs(gxp->dev, dma_addr, size, direction, attrs);
-}
-
-dma_addr_t gxp_dma_map_resource(struct gxp_dev *gxp,
- struct gxp_virtual_device *vd,
- uint virt_core_list, phys_addr_t phys_addr,
- size_t size, enum dma_data_direction direction,
- unsigned long attrs, uint gxp_dma_flags)
+ struct gxp_iommu_domain *gdomain,
+ struct gxp_coherent_buf *buf)
{
- dma_addr_t daddr;
- int prot = dma_info_to_prot(direction, 0, attrs);
- int virt_core;
-
- daddr = dma_map_resource(gxp->dev, phys_addr, size, direction, attrs);
- if (dma_mapping_error(gxp->dev, daddr))
- return DMA_MAPPING_ERROR;
-
- for (virt_core = 0; virt_core < vd->num_cores; virt_core++) {
- if (!(virt_core_list & BIT(virt_core)))
- continue;
- if (iommu_map(vd->core_domains[virt_core], daddr, phys_addr,
- size, prot))
- goto err;
- }
-
- return daddr;
-
-err:
- for (virt_core -= 1; virt_core >= 0; virt_core--)
- iommu_unmap(vd->core_domains[virt_core], daddr, size);
- dma_unmap_resource(gxp->dev, daddr, size, direction,
- DMA_ATTR_SKIP_CPU_SYNC);
- return DMA_MAPPING_ERROR;
+ if (buf->size != iommu_unmap(gdomain->domain, buf->dma_addr, buf->size))
+ dev_warn(gxp->dev, "Failed to unmap coherent buffer\n");
}
-void gxp_dma_unmap_resource(struct gxp_dev *gxp, struct gxp_virtual_device *vd,
- uint virt_core_list, dma_addr_t dma_addr,
- size_t size, enum dma_data_direction direction,
- unsigned long attrs)
+void gxp_dma_free_coherent_buf(struct gxp_dev *gxp,
+ struct gxp_iommu_domain *gdomain,
+ struct gxp_coherent_buf *buf)
{
- int virt_core;
-
- for (virt_core = 0; virt_core < vd->num_cores; virt_core++) {
- if (!(virt_core_list & BIT(virt_core)))
- continue;
- if (size !=
- iommu_unmap(vd->core_domains[virt_core], dma_addr, size))
- dev_warn(gxp->dev, "Failed to unmap resource\n");
- }
-
- dma_unmap_resource(gxp->dev, dma_addr, size, direction, attrs);
+ if (gdomain != NULL)
+ gxp_dma_unmap_allocated_coherent_buffer(gxp, gdomain, buf);
+ dma_free_coherent(gxp->dev, buf->size, buf->vaddr, buf->dma_addr);
}
-int gxp_dma_map_sg(struct gxp_dev *gxp, struct gxp_virtual_device *vd,
- int virt_core_list, struct scatterlist *sg, int nents,
+int gxp_dma_map_sg(struct gxp_dev *gxp, struct gxp_iommu_domain *gdomain,
+ struct scatterlist *sg, int nents,
enum dma_data_direction direction, unsigned long attrs,
uint gxp_dma_flags)
{
int nents_mapped;
dma_addr_t daddr;
- int prot = dma_info_to_prot(direction, 0, attrs);
- int virt_core;
+ int prot = map_flags_to_iommu_prot(direction, attrs, gxp_dma_flags);
ssize_t size_mapped;
- /* Variables needed to cleanup if an error occurs */
- struct scatterlist *s;
- int i;
- size_t size = 0;
nents_mapped = dma_map_sg_attrs(gxp->dev, sg, nents, direction, attrs);
if (!nents_mapped)
@@ -805,71 +645,73 @@ int gxp_dma_map_sg(struct gxp_dev *gxp, struct gxp_virtual_device *vd,
daddr = sg_dma_address(sg);
- for (virt_core = 0; virt_core < vd->num_cores; virt_core++) {
- if (!(virt_core_list & BIT(virt_core)))
- continue;
- /*
- * In Linux 5.15 and beyond, `iommu_map_sg()` returns a
- * `ssize_t` to encode errors that earlier versions throw out.
- * Explicitly cast here for backwards compatibility.
- */
- size_mapped = (ssize_t)iommu_map_sg(vd->core_domains[virt_core],
- daddr, sg, nents, prot);
- if (size_mapped <= 0)
- goto err;
- }
+ /*
+ * In Linux 5.15 and beyond, `iommu_map_sg()` returns a
+ * `ssize_t` to encode errors that earlier versions throw out.
+ * Explicitly cast here for backwards compatibility.
+ */
+ size_mapped =
+ (ssize_t)iommu_map_sg(gdomain->domain, daddr, sg, nents, prot);
+ if (size_mapped <= 0)
+ goto err;
return nents_mapped;
err:
- for_each_sg(sg, s, nents, i) {
- size += sg_dma_len(s);
- }
-
- for (virt_core -= 1; virt_core >= 0; virt_core--)
- iommu_unmap(vd->core_domains[virt_core], daddr, size);
dma_unmap_sg_attrs(gxp->dev, sg, nents, direction, attrs);
return 0;
}
-void gxp_dma_unmap_sg(struct gxp_dev *gxp, struct gxp_virtual_device *vd,
- uint virt_core_list, struct scatterlist *sg, int nents,
+void gxp_dma_unmap_sg(struct gxp_dev *gxp, struct gxp_iommu_domain *gdomain,
+ struct scatterlist *sg, int nents,
enum dma_data_direction direction, unsigned long attrs)
{
struct scatterlist *s;
int i;
size_t size = 0;
- int virt_core;
- for_each_sg(sg, s, nents, i) {
+ for_each_sg (sg, s, nents, i)
size += sg_dma_len(s);
- }
- for (virt_core = 0; virt_core < vd->num_cores; virt_core++) {
- if (!(virt_core_list & BIT(virt_core)))
- continue;
- if (!iommu_unmap(vd->core_domains[virt_core], sg_dma_address(sg),
- size))
- dev_warn(gxp->dev, "Failed to unmap sg\n");
- }
+ if (!iommu_unmap(gdomain->domain, sg_dma_address(sg), size))
+ dev_warn(gxp->dev, "Failed to unmap sg\n");
dma_unmap_sg_attrs(gxp->dev, sg, nents, direction, attrs);
}
-void gxp_dma_sync_single_for_cpu(struct gxp_dev *gxp, dma_addr_t dma_handle,
- size_t size,
- enum dma_data_direction direction)
+int gxp_dma_map_iova_sgt(struct gxp_dev *gxp, struct gxp_iommu_domain *gdomain,
+ dma_addr_t iova, struct sg_table *sgt, int prot)
{
- /* Syncing is not domain specific. Just call through to DMA API */
- dma_sync_single_for_cpu(gxp->dev, dma_handle, size, direction);
+ ssize_t size_mapped;
+
+ size_mapped = (ssize_t)iommu_map_sg(gdomain->domain, iova, sgt->sgl,
+ sgt->orig_nents, prot);
+ if (size_mapped <= 0) {
+ dev_err(gxp->dev, "map IOVA %pad to SG table failed: %d", &iova,
+ (int)size_mapped);
+ if (size_mapped == 0)
+ return -EINVAL;
+ return size_mapped;
+ }
+ dma_sync_sg_for_device(gxp->dev, sgt->sgl, sgt->orig_nents,
+ DMA_BIDIRECTIONAL);
+
+ return 0;
}
-void gxp_dma_sync_single_for_device(struct gxp_dev *gxp, dma_addr_t dma_handle,
- size_t size,
- enum dma_data_direction direction)
+void gxp_dma_unmap_iova_sgt(struct gxp_dev *gxp,
+ struct gxp_iommu_domain *gdomain, dma_addr_t iova,
+ struct sg_table *sgt)
{
- /* Syncing is not domain specific. Just call through to DMA API */
- dma_sync_single_for_device(gxp->dev, dma_handle, size, direction);
+ struct scatterlist *s;
+ int i;
+ size_t size = 0;
+
+ for_each_sg (sgt->sgl, s, sgt->orig_nents, i)
+ size += s->length;
+
+ if (!iommu_unmap(gdomain->domain, iova, size))
+ dev_warn(gxp->dev, "Failed to unmap sgt");
}
void gxp_dma_sync_sg_for_cpu(struct gxp_dev *gxp, struct scatterlist *sg,
@@ -886,20 +728,16 @@ void gxp_dma_sync_sg_for_device(struct gxp_dev *gxp, struct scatterlist *sg,
dma_sync_sg_for_device(gxp->dev, sg, nents, direction);
}
-struct sg_table *gxp_dma_map_dmabuf_attachment(
- struct gxp_dev *gxp, struct gxp_virtual_device *vd, uint virt_core_list,
- struct dma_buf_attachment *attachment,
- enum dma_data_direction direction)
+struct sg_table *
+gxp_dma_map_dmabuf_attachment(struct gxp_dev *gxp,
+ struct gxp_iommu_domain *gdomain,
+ struct dma_buf_attachment *attachment,
+ enum dma_data_direction direction)
{
struct sg_table *sgt;
int prot = dma_info_to_prot(direction, /*coherent=*/0, /*attrs=*/0);
ssize_t size_mapped;
- int virt_core;
int ret;
- /* Variables needed to cleanup if an error occurs */
- struct scatterlist *s;
- int i;
- size_t size = 0;
/* Map the attachment into the default domain */
sgt = dma_buf_map_attachment(attachment, direction);
@@ -910,49 +748,34 @@ struct sg_table *gxp_dma_map_dmabuf_attachment(
return sgt;
}
- /* Map the sgt into the aux domain of all specified cores */
- for (virt_core = 0; virt_core < vd->num_cores; virt_core++) {
- if (!(virt_core_list & BIT(virt_core)))
- continue;
+ /*
+ * In Linux 5.15 and beyond, `iommu_map_sg()` returns a
+ * `ssize_t` to encode errors that earlier versions throw out.
+ * Explicitly cast here for backwards compatibility.
+ */
+ size_mapped =
+ (ssize_t)iommu_map_sg(gdomain->domain, sg_dma_address(sgt->sgl),
+ sgt->sgl, sgt->orig_nents, prot);
+ if (size_mapped <= 0) {
+ dev_err(gxp->dev, "Failed to map dma-buf: %ld\n", size_mapped);
/*
- * In Linux 5.15 and beyond, `iommu_map_sg()` returns a
- * `ssize_t` to encode errors that earlier versions throw out.
- * Explicitly cast here for backwards compatibility.
+ * Prior to Linux 5.15, `iommu_map_sg()` returns 0 for
+ * any failure. Return a generic IO error in this case.
*/
- size_mapped =
- (ssize_t)iommu_map_sg(vd->core_domains[virt_core],
- sg_dma_address(sgt->sgl),
- sgt->sgl, sgt->orig_nents, prot);
- if (size_mapped <= 0) {
- dev_err(gxp->dev,
- "Failed to map dma-buf to virtual core %d (ret=%ld)\n",
- virt_core, size_mapped);
- /*
- * Prior to Linux 5.15, `iommu_map_sg()` returns 0 for
- * any failure. Return a generic IO error in this case.
- */
- ret = size_mapped == 0 ? -EIO : (int)size_mapped;
- goto err;
- }
+ ret = size_mapped == 0 ? -EIO : (int)size_mapped;
+ goto err;
}
return sgt;
err:
- for_each_sg(sgt->sgl, s, sgt->nents, i)
- size += sg_dma_len(s);
-
- for (virt_core -= 1; virt_core >= 0; virt_core--)
- iommu_unmap(vd->core_domains[virt_core], sg_dma_address(sgt->sgl), size);
dma_buf_unmap_attachment(attachment, sgt, direction);
return ERR_PTR(ret);
-
}
void gxp_dma_unmap_dmabuf_attachment(struct gxp_dev *gxp,
- struct gxp_virtual_device *vd,
- uint virt_core_list,
+ struct gxp_iommu_domain *gdomain,
struct dma_buf_attachment *attachment,
struct sg_table *sgt,
enum dma_data_direction direction)
@@ -960,23 +783,13 @@ void gxp_dma_unmap_dmabuf_attachment(struct gxp_dev *gxp,
struct scatterlist *s;
int i;
size_t size = 0;
- int virt_core;
/* Find the size of the mapping in IOVA-space */
- for_each_sg(sgt->sgl, s, sgt->nents, i)
+ for_each_sg (sgt->sgl, s, sgt->nents, i)
size += sg_dma_len(s);
- /* Unmap the dma-buf from the aux domain of all specified cores */
- for (virt_core = 0; virt_core < vd->num_cores; virt_core++) {
- if (!(virt_core_list & BIT(virt_core)))
- continue;
- if (!iommu_unmap(vd->core_domains[virt_core],
- sg_dma_address(sgt->sgl), size))
- dev_warn(
- gxp->dev,
- "Failed to unmap dma-buf from virtual core %d\n",
- virt_core);
- }
+ if (!iommu_unmap(gdomain->domain, sg_dma_address(sgt->sgl), size))
+ dev_warn(gxp->dev, "Failed to unmap dma-buf\n");
/* Unmap the attachment from the default domain */
dma_buf_unmap_attachment(attachment, sgt, direction);
diff --git a/gxp-dma.h b/gxp-dma.h
index cf05e57..eb131fd 100644
--- a/gxp-dma.h
+++ b/gxp-dma.h
@@ -10,13 +10,33 @@
#include <linux/dma-buf.h>
#include <linux/dma-direction.h>
#include <linux/dma-mapping.h>
+#include <linux/iommu.h>
#include <linux/types.h>
-#if (IS_ENABLED(CONFIG_GXP_TEST) || IS_ENABLED(CONFIG_ANDROID)) && !IS_ENABLED(CONFIG_GXP_GEM5)
+#if (IS_ENABLED(CONFIG_GXP_TEST) || IS_ENABLED(CONFIG_ANDROID)) && \
+ !IS_ENABLED(CONFIG_GXP_GEM5)
#include <soc/google/tpu-ext.h>
#endif
#include "gxp-internal.h"
+struct gxp_iommu_domain {
+ struct iommu_domain *domain;
+ uint ctx_id;
+};
+
+struct gxp_coherent_buf {
+ void *vaddr; /* kernel VA, no allocation if NULL */
+ /* TODO(b/249030390): Use standard DMA-IOMMU APIs returned address */
+ dma_addr_t dma_addr; /* DMA handle obtained from DMA-IOMMU APIs. */
+ /*
+ * IOVA to be accessed by the device. Equal to @dma_addr when there is
+ * no self-managed IOMMU.
+ */
+ dma_addr_t dsp_addr;
+ u64 phys_addr; /* physical address, if available */
+ size_t size;
+};
+
struct gxp_dma_manager {
struct rb_root mapping_tree;
};
@@ -33,6 +53,24 @@ struct gxp_dma_manager {
#endif
/**
+ * gxp_iommu_map() - Create mappings in iommu
+ * @gxp: The GXP device
+ * @gdomain: The IOMMU domain to create mappings in.
+ *
+ * Return: 0 on success or negative value indicating error
+ */
+int gxp_iommu_map(struct gxp_dev *gxp, struct gxp_iommu_domain *gdomain,
+ unsigned long iova, phys_addr_t paddr, size_t size, int prot);
+
+/**
+ * gxp_iommu_unmap() - Reverts mappings created by gxp_iommu_map()
+ * @gxp: The GXP device
+ * @gdomain: The IOMMU domain to revert mappings in.
+ */
+void gxp_iommu_unmap(struct gxp_dev *gxp, struct gxp_iommu_domain *gdomain,
+ unsigned long iova, size_t size);
+
+/**
* gxp_dma_init() - Initialize the GXP DMA subsystem
* @gxp: The GXP device to initialize DMA for
*
@@ -51,35 +89,27 @@ int gxp_dma_init(struct gxp_dev *gxp);
void gxp_dma_exit(struct gxp_dev *gxp);
/**
- * gxp_dma_domain_attach_device() - Attach the page table of a virtual core to
- * the device and perform any necessary initialization.
+ * gxp_dma_domain_attach_device() - Attach the page table to the device and
+ * perform necessary initialization.
* @gxp: The GXP device to attach
- * @vd: The virtual device including the virtual core
- * @virt_core: The virtual core the page table belongs to
- * @core: The physical core is bound with the virtual core
+ * @gdomain: The IOMMU domain to be attached.
+ * @core_list: The physical cores to attach.
*
- * The caller must make sure @vd will not be released for the duration of the
- * call.
+ * Caller ensures a BLOCK wakelock is hold for the iommu attaching.
*/
int gxp_dma_domain_attach_device(struct gxp_dev *gxp,
- struct gxp_virtual_device *vd, uint virt_core,
- uint core);
+ struct gxp_iommu_domain *gdomain,
+ uint core_list);
/**
- * gxp_dma_domain_detach_device() - Detach the page table of a virtual core from
- * the device.
+ * gxp_dma_domain_detach_device() - Detach the page table from the device.
* @gxp: The GXP device to detach
- * @vd: The virtual device including the virtual core
- * @virt_core: The virtual core the page table belongs to
- *
- * The client the @vd belongs to must hold a BLOCK wakelock for the iommu
- * detaching
+ * @gdomain: The IOMMU domain to be detached
*
- * The caller must make sure @vd will not be released for the duration of the
- * call.
+ * Caller ensures a BLOCK wakelock is hold for the iommu detaching.
*/
void gxp_dma_domain_detach_device(struct gxp_dev *gxp,
- struct gxp_virtual_device *vd, uint virt_core);
+ struct gxp_iommu_domain *gdomain);
/**
* gxp_dma_init_default_resources() - Set the various buffers/registers with
@@ -93,11 +123,11 @@ void gxp_dma_init_default_resources(struct gxp_dev *gxp);
/**
* gxp_dma_map_core_resources() - Map the various buffers/registers with
- * fixed IOVAs on certain virtual core
+ * fixed IOVAs on the IOMMU domain.
* @gxp: The GXP device to set up the mappings for
- * @vd: The virtual device including the virtual core the IOVA are mapped for
- * @virt_core: The virtual core the IOVAs are mapped for
- * @core: The corresponding physical core of the @virt_core
+ * @gdomain: The IOMMU domain to be mapped on
+ * @core_list: The physical cores that may use the domain
+ * @slice_index: The index of slice of shared buffer to be mapped
*
* GXP firmware expects several buffers and registers to be mapped to fixed
* locations in their IOVA space. This function initializes all those mappings
@@ -107,324 +137,167 @@ void gxp_dma_init_default_resources(struct gxp_dev *gxp);
* fields of every `struct gxp_mapped_resource` inside of @gxp have been
* initialized.
*
- * The caller must make sure @vd will not be released for the duration of the
- * call.
- *
* Return:
* * 0 - Mappings created successfully
* * -EIO - Failed to create one or more of the mappings
*/
int gxp_dma_map_core_resources(struct gxp_dev *gxp,
- struct gxp_virtual_device *vd, uint virt_core,
- uint core);
+ struct gxp_iommu_domain *gdomain, uint core_list,
+ u8 slice_index);
/**
* gxp_dma_unmap_core_resources() - Unmap the IOVAs mapped by
- * gxp_dma_map_resources
+ * gxp_dma_map_core_resources()
* @gxp: The GXP device that was passed to gxp_dma_map_core_resources()
- * @vd: The virtual device including the virtual core the IOVAs were mapped for
- * @virt_core: The virtual core the IOVAs were mapped for
- * @core: The physical cores the IOVAs were mapped for
+ * @gdomain: The IOMMU domain to be unmapped
+ * @core_list: The physical cores the IOVAs were mapped for
*
* GXP firmware expects several buffers and registers to be mapped to fixed
* locations in their IOVA space. This function releases all those mappings.
- *
- * The caller must make sure @vd will not be released for the duration of the
- * call.
*/
void gxp_dma_unmap_core_resources(struct gxp_dev *gxp,
- struct gxp_virtual_device *vd, uint virt_core,
- uint core);
+ struct gxp_iommu_domain *gdomain,
+ uint core_list);
-#if (IS_ENABLED(CONFIG_GXP_TEST) || IS_ENABLED(CONFIG_ANDROID)) && !IS_ENABLED(CONFIG_GXP_GEM5)
+#if (IS_ENABLED(CONFIG_GXP_TEST) || IS_ENABLED(CONFIG_ANDROID)) && \
+ !IS_ENABLED(CONFIG_GXP_GEM5)
/**
* gxp_dma_map_tpu_buffer() - Map the tpu mbx queue buffers with fixed IOVAs
* @gxp: The GXP device to set up the mappings for
- * @vd: The virtual device including the virtual cores the mapping is for
- * @virt_core_list: A bitfield enumerating the virtual cores the mapping is for
+ * @gdomain: The IOMMU domain to be mapped on
* @core_list: A bitfield enumerating the physical cores the mapping is for
* @mbx_info: Structure holding TPU-DSP mailbox queue buffer information
*
* Return:
* * 0 - Mappings created successfully
* * -EIO - Failed to create the mappings
- *
- * The caller must make sure @vd will not be released for the duration of the
- * call.
*/
-int gxp_dma_map_tpu_buffer(struct gxp_dev *gxp, struct gxp_virtual_device *vd,
- uint virt_core_list, uint core_list,
+int gxp_dma_map_tpu_buffer(struct gxp_dev *gxp,
+ struct gxp_iommu_domain *gdomain, uint core_list,
struct edgetpu_ext_mailbox_info *mbx_info);
/**
* gxp_dma_unmap_tpu_buffer() - Unmap IOVAs mapped by gxp_dma_map_tpu_buffer()
* @gxp: The GXP device that was passed to gxp_dma_map_tpu_buffer()
- * @vd: The virtual device including the virtual cores the mapping was for
- * @mbx_desc: Structure holding info for already mapped TPU-DSP mailboxes. The
- * list of virtual cores to unmap is in this descriptor.
- *
- * The caller must make sure @vd will not be released for the duration of the
- * call.
+ * @gdomain: The IOMMU domain the mappings were mapped on
+ * @mbx_desc: Structure holding info for already mapped TPU-DSP mailboxes.
*/
void gxp_dma_unmap_tpu_buffer(struct gxp_dev *gxp,
- struct gxp_virtual_device *vd,
+ struct gxp_iommu_domain *gdomain,
struct gxp_tpu_mbx_desc mbx_desc);
-#endif // (CONFIG_GXP_TEST || CONFIG_ANDROID) && !CONFIG_GXP_GEM5
+#endif // (CONFIG_GXP_TEST || CONFIG_ANDROID) && !CONFIG_GXP_GEM5
/**
* gxp_dma_map_allocated_coherent_buffer() - Map a coherent buffer
* @gxp: The GXP device to map the allocated buffer for
- * @vd: The virtual device including the virtual cores the mapping is for
- * @virt_core_list: A bitfield enumerating the virtual cores the mapping is for
- * @size: The size of the allocated buffer, in bytes
- * @dma_handle: The allocated device IOVA
+ * @buf: The coherent buffer
+ * @gdomain: The IOMMU domain to be mapped on
* @gxp_dma_flags: The type of mapping to create; currently unused
*
- * Return: Kernel virtual address of the mapped buffer
- *
- * The caller must make sure @vd will not be released for the duration of the
- * call.
+ * Return: 0 on success else error code
*/
-int gxp_dma_map_allocated_coherent_buffer(struct gxp_dev *gxp, void *buf,
- struct gxp_virtual_device *vd,
- uint virt_core_list, size_t size,
- dma_addr_t dma_handle,
+int gxp_dma_map_allocated_coherent_buffer(struct gxp_dev *gxp,
+ struct gxp_coherent_buf *buf,
+ struct gxp_iommu_domain *gdomain,
uint gxp_dma_flags);
/**
* gxp_dma_unmap_allocated_coherent_buffer() - Unmap a coherent buffer
* @gxp: The GXP device the buffer was allocated and mapped for
- * @vd: The virtual device including the virtual cores the mapping was for
- * @virt_core_list: A bitfield enumerating the virtual cores the mapping was for
- * @size: The size of the buffer, in bytes
- * @dma_handle: The device IOVA
- *
- * The caller must make sure @vd will not be released for the duration of the
- * call.
+ * @gdomain: The IOMMU domain the mapping was mapped
+ * @buf: The coherent buffer
*/
void gxp_dma_unmap_allocated_coherent_buffer(struct gxp_dev *gxp,
- struct gxp_virtual_device *vd,
- uint virt_core_list, size_t size,
- dma_addr_t dma_handle);
+ struct gxp_iommu_domain *gdomain,
+ struct gxp_coherent_buf *buf);
/**
* gxp_dma_alloc_coherent() - Allocate and map a coherent buffer for a GXP core
* @gxp: The GXP device to map the allocated buffer for
- * @vd: The virtual device including the virtual cores the mapping is for
- * @virt_core_list: A bitfield enumerating the virtual cores the mapping is for
+ * @gdomain: The IOMMU domain the mapping to be mapped on
* @size: The size of the buffer to be allocated, in bytes
- * @dma_handle: Reference to a variable to be set to the allocated IOVA
* @flag: The type of memory to allocate (see kmalloc)
* @gxp_dma_flags: The type of mapping to create; Currently unused
+ * @buffer: The coherent buffer
*
- * Return: Kernel virtual address of the allocated/mapped buffer
+ * Return: 0 on success else error code
*
- * If the passed @vd is a null pointer, this function will only allocate a
- * buffer but not map it to any particular core.
- *
- * The caller must make sure @vd will not be released for the duration of the
- * call.
+ * If the passed @domain is a null pointer, this function will only allocate a
+ * buffer but not map it to the domain.
+ * Note: Allocated buffers size may be larger than the requested size.
*/
-void *gxp_dma_alloc_coherent(struct gxp_dev *gxp, struct gxp_virtual_device *vd,
- uint virt_core_list, size_t size,
- dma_addr_t *dma_handle, gfp_t flag,
- uint gxp_dma_flags);
+int gxp_dma_alloc_coherent_buf(struct gxp_dev *gxp,
+ struct gxp_iommu_domain *gdomain, size_t size,
+ gfp_t flag, uint gxp_dma_flags,
+ struct gxp_coherent_buf *buffer);
/**
* gxp_dma_free_coherent() - Unmap and free a coherent buffer
* @gxp: The GXP device the buffer was allocated and mapped for
- * @vd: The virtual device including the virtual cores the mapping was for
- * @virt_core_list: A bitfield enumerating the virtual cores the mapping was for
- * @size: The size of the buffer, in bytes, passed to `gxp_dma_alloc_coherent()`
- * @cpu_addr: The kernel virtual address returned by `gxp_dma_alloc_coherent()`
- * @dma_handle: The device IOVA, set by `gxp_dma_alloc_coherent()`
+ * @gdomain: The IOMMU domain the mapping was mapped to
+ * @buf: The coherent buffer
*
* If the buffer is mapped via `gxp_dma_map_allocated_coherent_buffer`, the
* caller must call `gxp_dma_unmap_allocated_coherent_buffer` to unmap before
* freeing the buffer.
*
- * If the passed @vd is a null pointer, this function will only free the buffer
- * but not do any unmapping.
- *
- * The caller must make sure @vd will not be released for the duration of the
- * call.
- */
-void gxp_dma_free_coherent(struct gxp_dev *gxp, struct gxp_virtual_device *vd,
- uint virt_core_list, size_t size, void *cpu_addr,
- dma_addr_t dma_handle);
-
-/**
- * gxp_dma_map_single() - Create a mapping for a kernel buffer
- * @gxp: The GXP device to map the buffer for
- * @vd: The virtual device including the virtual cores the mapping is for
- * @virt_core_list: A bitfield enumerating the virtual cores the mapping is for
- * @cpu_addr: The kernel virtual address of the buffer to map
- * @size: The size of the buffer to map, in bytes
- * @direction: DMA direction
- * @attrs: The same set of flags used by the base DMA API
- * @gxp_dma_flags: The type of mapping to create; Currently unused
- *
- * Return: The IOVA the buffer was mapped to
- *
- * The caller must make sure @vd will not be released for the duration of the
- * call.
- */
-dma_addr_t gxp_dma_map_single(struct gxp_dev *gxp,
- struct gxp_virtual_device *vd,
- uint virt_core_list, void *cpu_addr, size_t size,
- enum dma_data_direction direction,
- unsigned long attrs, uint gxp_dma_flags);
-/**
- * gxp_dma_unmap_single() - Unmap a kernel buffer
- * @gxp: The GXP device the buffer was mapped for
- * @vd: The virtual device including the virtual cores the mapping was for
- * @virt_core_list: A bitfield enumerating the virtual cores the mapping was for
- * @dma_addr: The device IOVA, returned by `gxp_dma_map_single()`
- * @size: The size of the mapping, which was passed to `gxp_dma_map_single()`
- * @direction: DMA direction; same as passed to `gxp_dma_map_single()`
- * @attrs: The same set of flags used by the base DMA API
- *
- * The caller must make sure @vd will not be released for the duration of the
- * call.
- */
-void gxp_dma_unmap_single(struct gxp_dev *gxp, struct gxp_virtual_device *vd,
- uint virt_core_list, dma_addr_t dma_addr, size_t size,
- enum dma_data_direction direction,
- unsigned long attrs);
-
-/**
- * gxp_dma_map_page() - Create a mapping for a physical page of memory
- * @gxp: The GXP device to map the page for
- * @vd: The virtual device including the virtual cores the mapping is for
- * @virt_core_list: A bitfield enumerating the virtual cores the mapping is for
- * @page: The `struct page` of the physical page to create a mapping for
- * @offset: The offset into @page to begin the mapping at
- * @size: The number of bytes in @page to map
- * @direction: DMA direction
- * @attrs: The same set of flags used by the base DMA API
- * @gxp_dma_flags: The type of mapping to create; Currently unused
- *
- * Return: The IOVA the page was mapped to
- *
- * The caller must make sure @vd will not be released for the duration of the
- * call.
- */
-dma_addr_t gxp_dma_map_page(struct gxp_dev *gxp, struct gxp_virtual_device *vd,
- uint virt_core_list, struct page *page,
- unsigned long offset, size_t size,
- enum dma_data_direction direction,
- unsigned long attrs, uint gxp_dma_flags);
-/**
- * gxp_dma_unmap_page() - Unmap a physical page of memory
- * @gxp: The GXP device the page was mapped for
- * @vd: The virtual device including the virtual cores the mapping was for
- * @virt_core_list: A bitfield enumerating the virtual cores the mapping was for
- * @dma_addr: The device IOVA, returned by `gxp_dma_map_page()`
- * @size: The size of the mapping, which was passed to `gxp_dma_map_page()`
- * @direction: DMA direction; Same as passed to `gxp_dma_map_page()`
- * @attrs: The same set of flags used by the base DMA API
- *
- * The caller must make sure @vd will not be released for the duration of the
- * call.
- */
-void gxp_dma_unmap_page(struct gxp_dev *gxp, struct gxp_virtual_device *vd,
- uint virt_core_list, dma_addr_t dma_addr, size_t size,
- enum dma_data_direction direction, unsigned long attrs);
-
-/**
- * gxp_dma_map_resource() - Create a mapping for an MMIO resource
- * @gxp: The GXP device to map the resource for
- * @vd: The virtual device including the virtual cores the mapping is for
- * @virt_core_list: A bitfield enumerating the virtual cores the mapping is for
- * @phys_addr: The physical address of the MMIO resource to map
- * @size: The size of the MMIO region to map, in bytes
- * @direction: DMA direction
- * @attrs: The same set of flags used by the base DMA API
- * @gxp_dma_flags: The type of mapping to create; Currently unused
- *
- * Return: The IOVA the MMIO resource was mapped to
- *
- * The caller must make sure @vd will not be released for the duration of the
- * call.
- */
-dma_addr_t gxp_dma_map_resource(struct gxp_dev *gxp,
- struct gxp_virtual_device *vd,
- uint virt_core_list, phys_addr_t phys_addr,
- size_t size, enum dma_data_direction direction,
- unsigned long attrs, uint gxp_dma_flags);
-/**
- * gxp_dma_unmap_resource() - Unmap an MMIO resource
- * @gxp: The GXP device the MMIO resource was mapped for
- * @vd: The virtual device including the virtual cores the mapping was for
- * @virt_core_list: A bitfield enumerating the virtual cores the mapping was for
- * @dma_addr: The device IOVA, returned by `gxp_dma_map_resource()`
- * @size: The size of the mapping, which was passed to `gxp_dma_map_resource()`
- * @direction: DMA direction; Same as passed to `gxp_dma_map_resource()`
- * @attrs: The same set of flags used by the base DMA API
- *
- * The caller must make sure @vd will not be released for the duration of the
- * call.
+ * If the passed @domain is a null pointer, this function will only free the
+ * buffer but not do any unmapping.
*/
-void gxp_dma_unmap_resource(struct gxp_dev *gxp, struct gxp_virtual_device *vd,
- uint virt_core_list, dma_addr_t dma_addr,
- size_t size, enum dma_data_direction direction,
- unsigned long attrs);
+void gxp_dma_free_coherent_buf(struct gxp_dev *gxp,
+ struct gxp_iommu_domain *gdomain,
+ struct gxp_coherent_buf *buf);
/**
* gxp_dma_map_sg() - Create a mapping for a scatter-gather list
* @gxp: The GXP device to map the scatter-gather list for
- * @vd: The virtual device including the virtual cores the mapping is for
- * @virt_core_list: A bitfield enumerating the virtual cores the mapping is for
+ * @gdomain: The IOMMU domain to be mapped
* @sg: The scatter-gather list of the buffer to be mapped
* @nents: The number of entries in @sg
* @direction: DMA direction
* @attrs: The same set of flags used by the base DMA API
- * @gxp_dma_flags: The type of mapping to create; Currently unused
+ * @gxp_dma_flags: The type of mapping to create
*
* Return: The number of scatter-gather entries mapped to
- *
- * The caller must make sure @vd will not be released for the duration of the
- * call.
*/
-int gxp_dma_map_sg(struct gxp_dev *gxp, struct gxp_virtual_device *vd,
- int virt_core_list, struct scatterlist *sg, int nents,
+int gxp_dma_map_sg(struct gxp_dev *gxp, struct gxp_iommu_domain *gdomain,
+ struct scatterlist *sg, int nents,
enum dma_data_direction direction, unsigned long attrs,
uint gxp_dma_flags);
/**
* gxp_dma_unmap_sg() - Unmap a scatter-gather list
* @gxp: The GXP device the scatter-gather list was mapped for
- * @vd: The virtual device including the virtual cores the mapping was for
- * @virt_core_list: A bitfield enumerating the virtual cores the mapping was for
+ * @gdomain: The IOMMU domain mapping was mapped on
* @sg: The scatter-gather list to unmap; The same one passed to
* `gxp_dma_map_sg()`
* @nents: The number of entries in @sg; Same value passed to `gxp_dma_map_sg()`
* @direction: DMA direction; Same as passed to `gxp_dma_map_sg()`
* @attrs: The same set of flags used by the base DMA API
- *
- * The caller must make sure @vd will not be released for the duration of the
- * call.
*/
-void gxp_dma_unmap_sg(struct gxp_dev *gxp, struct gxp_virtual_device *vd,
- uint virt_core_list, struct scatterlist *sg, int nents,
+void gxp_dma_unmap_sg(struct gxp_dev *gxp, struct gxp_iommu_domain *gdomain,
+ struct scatterlist *sg, int nents,
enum dma_data_direction direction, unsigned long attrs);
/**
- * gxp_dma_sync_single_for_cpu() - Sync buffer for reading by the CPU
- * @gxp: The GXP device the mapping was created for
- * @dma_handle: The device IOVA, obtained from one of the `gxp_dma_map_*` APIs
- * @size: The size of the mapped region to sync
- * @direction: DMA direction
+ * gxp_dma_map_iova_sgt() - Create a mapping for a scatter-gather list, with specific IOVA.
+ * @gxp: The GXP device to map the scatter-gather list for
+ * @gdomain: The IOMMU domain to be mapped
+ * @iova: The IOVA to be mapped.
+ * @sgt: The scatter-gather list table of the buffer to be mapped
+ * @prot: The protection bits to be passed to IOMMU API
+ *
+ * Return: 0 on success. Negative errno otherwise.
*/
-void gxp_dma_sync_single_for_cpu(struct gxp_dev *gxp, dma_addr_t dma_handle,
- size_t size,
- enum dma_data_direction direction);
+int gxp_dma_map_iova_sgt(struct gxp_dev *gxp, struct gxp_iommu_domain *gdomain,
+ dma_addr_t iova, struct sg_table *sgt, int prot);
/**
- * gxp_dma_sync_single_for_device() - Sync buffer for reading by the device
- * @gxp: The GXP device the mapping was created for
- * @dma_handle: The device IOVA, obtained from one of the `gxp_dma_map_*` APIs
- * @size: The size of the mapped region to sync
- * @direction: DMA direction
+ * gxp_dma_unmap_iova_sgt() - Revert gxp_dma_map_iova_sgt()
+ * @gxp: The GXP device the scatter-gather list was mapped for
+ * @gdomain: The IOMMU domain mapping was mapped on
+ * @iova: The IOVA to be un-mapped.
+ * @sgt: The scatter-gather list to unmap; The same one passed to
+ * `gxp_dma_map_iova_sgt()`
*/
-void gxp_dma_sync_single_for_device(struct gxp_dev *gxp, dma_addr_t dma_handle,
- size_t size,
- enum dma_data_direction direction);
+void gxp_dma_unmap_iova_sgt(struct gxp_dev *gxp,
+ struct gxp_iommu_domain *gdomain, dma_addr_t iova,
+ struct sg_table *sgt);
/**
* gxp_dma_sync_sg_for_cpu() - Sync sg list for reading by the CPU
@@ -448,42 +321,57 @@ void gxp_dma_sync_sg_for_device(struct gxp_dev *gxp, struct scatterlist *sg,
/**
* gxp_dma_map_dmabuf_attachment() - Create a mapping for a dma-buf
* @gxp: The GXP device to map the dma-buf for
- * @vd: The virtual device including the virtual cores the dma-buf is for
- * @virt_core_list: A bitfield enumerating the virtual cores the dma-buf is for
+ * @gdomain: The IOMMU domain the dma-buf to be mapped on
* @attachment: An attachment, representing the dma-buf, obtained from
* `dma_buf_attach()`
* @direction: DMA direction
*
* Return: A scatter-gather table describing the mapping of the dma-buf
* into the default IOMMU domain. Returns ERR_PTR on failure.
- *
- * The caller must make sure @vd will not be released for the duration of the
- * call.
*/
-struct sg_table *gxp_dma_map_dmabuf_attachment(
- struct gxp_dev *gxp, struct gxp_virtual_device *vd, uint virt_core_list,
- struct dma_buf_attachment *attachment,
- enum dma_data_direction direction);
+struct sg_table *
+gxp_dma_map_dmabuf_attachment(struct gxp_dev *gxp,
+ struct gxp_iommu_domain *gdomain,
+ struct dma_buf_attachment *attachment,
+ enum dma_data_direction direction);
/**
* gxp_dma_unmap_dmabuf_attachment() - Unmap a dma-buf
* @gxp: The GXP device the dma-buf was mapped for
- * @vd: The virtual device including the virtual cores the dma-buf is for
- * @virt_core_list: A bitfield enumerating the virtual cores the dma-buf was for
+ * @gdomain: The IOMMU domain the buffer was mapped on
* @attachment: The attachment, representing the dma-buf, that was passed to
* `gxp_dma_map_dmabuf_attachment()` to create the mapping
* @sgt: The scatter-gather table returned by `gxp_dma_map_dmabuf_attachment()`
* when mapping this dma-buf
* @direction: DMA direction
- *
- * The caller must make sure @vd will not be released for the duration of the
- * call.
*/
void gxp_dma_unmap_dmabuf_attachment(struct gxp_dev *gxp,
- struct gxp_virtual_device *vd,
- uint virt_core_list,
+ struct gxp_iommu_domain *gdomain,
struct dma_buf_attachment *attachment,
struct sg_table *sgt,
enum dma_data_direction direction);
+/**
+ * gxp_iommu_get_domain_for_dev() - Get default domain
+ * @gxp: The GXP device to get the default domain for
+ *
+ * Return: Domain embedding default IOMMU domain information.
+ */
+struct gxp_iommu_domain *gxp_iommu_get_domain_for_dev(struct gxp_dev *gxp);
+
+/**
+ * gxp_iommu_aux_get_pasid() - Get PASID corresponding to gdomain
+ * @gxp: The GXP device attached to IOMMU
+ * @gdomain: The IOMMU domain to get the PASID for
+ *
+ * Return: PASID of the passed domain
+ */
+uint gxp_iommu_aux_get_pasid(struct gxp_dev *gxp,
+ struct gxp_iommu_domain *gdomain);
+
+/**
+ * gxp_iommu_setup_shareability() - Set shareability to enable IO-Coherency.
+ * @gxp: The GXP device to set shareability for
+ */
+void gxp_iommu_setup_shareability(struct gxp_dev *gxp);
#endif /* __GXP_DMA_H__ */
diff --git a/gxp-dmabuf.c b/gxp-dmabuf.c
index 789efeb..92c419d 100644
--- a/gxp-dmabuf.c
+++ b/gxp-dmabuf.c
@@ -8,9 +8,11 @@
#include <linux/dma-buf.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
+#include <linux/version.h>
#include "gxp-dma.h"
#include "gxp-dmabuf.h"
+#include "gxp-vd.h"
struct gxp_dmabuf_mapping {
struct gxp_mapping mapping;
@@ -34,13 +36,12 @@ static void destroy_dmabuf_mapping(struct gxp_mapping *mapping)
{
struct gxp_dmabuf_mapping *dmabuf_mapping;
struct gxp_dev *gxp = mapping->gxp;
- struct gxp_virtual_device *vd = mapping->vd;
/* Unmap and detach the dma-buf */
dmabuf_mapping =
container_of(mapping, struct gxp_dmabuf_mapping, mapping);
- gxp_dma_unmap_dmabuf_attachment(gxp, vd, mapping->virt_core_list,
+ gxp_dma_unmap_dmabuf_attachment(gxp, mapping->domain,
dmabuf_mapping->attachment,
dmabuf_mapping->sgt, mapping->dir);
dma_buf_detach(dmabuf_mapping->dmabuf, dmabuf_mapping->attachment);
@@ -50,9 +51,8 @@ static void destroy_dmabuf_mapping(struct gxp_mapping *mapping)
}
struct gxp_mapping *gxp_dmabuf_map(struct gxp_dev *gxp,
- struct gxp_virtual_device *vd,
- uint virt_core_list, int fd, u32 flags,
- enum dma_data_direction dir)
+ struct gxp_iommu_domain *domain, int fd,
+ u32 flags, enum dma_data_direction dir)
{
struct dma_buf *dmabuf;
struct dma_buf_attachment *attachment;
@@ -78,7 +78,7 @@ struct gxp_mapping *gxp_dmabuf_map(struct gxp_dev *gxp,
goto err_attach;
}
- sgt = gxp_dma_map_dmabuf_attachment(gxp, vd, virt_core_list, attachment, dir);
+ sgt = gxp_dma_map_dmabuf_attachment(gxp, domain, attachment, dir);
if (IS_ERR(sgt)) {
dev_err(gxp->dev,
"Failed to map dma-buf attachment (ret=%ld)\n",
@@ -98,10 +98,10 @@ struct gxp_mapping *gxp_dmabuf_map(struct gxp_dev *gxp,
dmabuf_mapping->mapping.destructor = destroy_dmabuf_mapping;
dmabuf_mapping->mapping.host_address = 0;
dmabuf_mapping->mapping.gxp = gxp;
- dmabuf_mapping->mapping.virt_core_list = virt_core_list;
- dmabuf_mapping->mapping.vd = vd;
+ dmabuf_mapping->mapping.domain = domain;
dmabuf_mapping->mapping.device_address = sg_dma_address(sgt->sgl);
dmabuf_mapping->mapping.dir = dir;
+ dmabuf_mapping->mapping.size = dmabuf->size;
dmabuf_mapping->dmabuf = dmabuf;
dmabuf_mapping->attachment = attachment;
dmabuf_mapping->sgt = sgt;
@@ -109,10 +109,14 @@ struct gxp_mapping *gxp_dmabuf_map(struct gxp_dev *gxp,
return &dmabuf_mapping->mapping;
err_alloc_mapping:
- gxp_dma_unmap_dmabuf_attachment(gxp, vd, virt_core_list, attachment, sgt, dir);
+ gxp_dma_unmap_dmabuf_attachment(gxp, domain, attachment, sgt, dir);
err_map_attachment:
dma_buf_detach(dmabuf, attachment);
err_attach:
dma_buf_put(dmabuf);
return ERR_PTR(ret);
}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 16, 0)
+MODULE_IMPORT_NS(DMA_BUF);
+#endif
diff --git a/gxp-dmabuf.h b/gxp-dmabuf.h
index 5803841..8e1e056 100644
--- a/gxp-dmabuf.h
+++ b/gxp-dmabuf.h
@@ -7,7 +7,7 @@
#ifndef __GXP_DMABUF_H__
#define __GXP_DMABUF_H__
-#include <linux/dma-direction.h>
+#include <linux/iommu.h>
#include <linux/types.h>
#include "gxp-internal.h"
@@ -16,8 +16,7 @@
/**
* gxp_dmabuf_map() - Map a dma-buf for access by the specified virtual device
* @gxp: The GXP device to map the dma-buf for
- * @vd: The virtual device includes the virtual cores the dma-buf is mapped for
- * @virt_core_list: A bitfield enumerating the virtual cores the mapping is for
+ * @domain: The iommu domain the dma-buf is mapped for
* @fd: A file descriptor for the dma-buf to be mapped
* @flags: The type of mapping to create; Currently unused
* @direction: DMA direction
@@ -28,8 +27,7 @@
* mapping of the dma-buf. Returns ERR_PTR on failure.
*/
struct gxp_mapping *gxp_dmabuf_map(struct gxp_dev *gxp,
- struct gxp_virtual_device *vd,
- uint virt_core_list, int fd, u32 flags,
- enum dma_data_direction dir);
+ struct gxp_iommu_domain *domain, int fd,
+ u32 flags, enum dma_data_direction dir);
#endif /* __GXP_DMABUF_H__ */
diff --git a/gxp-domain-pool.c b/gxp-domain-pool.c
index 53a5b38..a0f9ead 100644
--- a/gxp-domain-pool.c
+++ b/gxp-domain-pool.c
@@ -5,100 +5,75 @@
* Copyright (C) 2022 Google LLC
*/
-#include <linux/idr.h>
#include <linux/iommu.h>
#include <linux/slab.h>
+#include <gcip/gcip-domain-pool.h>
+
+#include "gxp-dma.h"
#include "gxp-domain-pool.h"
-#include "gxp-internal.h"
-int gxp_domain_pool_init(struct gxp_dev *gxp, struct gxp_domain_pool *pool,
+int gxp_domain_pool_init(struct gxp_dev *gxp, struct gcip_domain_pool *pool,
unsigned int size)
{
- unsigned int i;
- struct iommu_domain *domain;
-
- pool->size = size;
- pool->gxp = gxp;
-
- if (!size)
- return 0;
+ int ret = gcip_domain_pool_init(gxp->dev, pool, size);
+ __maybe_unused int i;
- dev_dbg(pool->gxp->dev, "Initializing domain pool with %u domains\n", size);
+ if (ret)
+ return ret;
- ida_init(&pool->idp);
- pool->array = vzalloc(sizeof(*pool->array) * size);
- if (!pool->array) {
- dev_err(gxp->dev, "Failed to allocate memory for domain pool array\n");
- return -ENOMEM;
- }
+#if IS_ENABLED(CONFIG_GXP_GEM5)
for (i = 0; i < size; i++) {
- domain = iommu_domain_alloc(pool->gxp->dev->bus);
- if (!domain) {
- dev_err(pool->gxp->dev,
- "Failed to allocate iommu domain %d of %u\n",
- i + 1, size);
+ struct iommu_domain *domain = pool->array[i];
+
+ /*
+ * Gem5 uses arm-smmu-v3 which requires domain finalization to do iommu map. Calling
+ * iommu_aux_attach_device to finalize the allocated domain and detach the device
+ * right after that.
+ */
+ ret = iommu_aux_attach_device(domain, gxp->dev);
+ if (ret) {
+ dev_err(gxp->dev,
+ "Failed to attach device to iommu domain %d of %u, ret=%d\n",
+ i + 1, size, ret);
gxp_domain_pool_destroy(pool);
- return -ENOMEM;
+ return ret;
}
- pool->array[i] = domain;
+
+ iommu_aux_detach_device(domain, gxp->dev);
}
+#endif /* CONFIG_GXP_GEM5 */
+
return 0;
}
-struct iommu_domain *gxp_domain_pool_alloc(struct gxp_domain_pool *pool)
+struct gxp_iommu_domain *gxp_domain_pool_alloc(struct gcip_domain_pool *pool)
{
- int id;
-
- if (!pool->size)
- return iommu_domain_alloc(pool->gxp->dev->bus);
+ struct iommu_domain *domain = gcip_domain_pool_alloc(pool);
+ struct gxp_iommu_domain *gdomain;
- id = ida_alloc_max(&pool->idp, pool->size - 1, GFP_KERNEL);
+ if (!domain)
+ return NULL;
- if (id < 0) {
- dev_err(pool->gxp->dev,
- "No more domains available from pool of size %u\n",
- pool->size);
+ gdomain = kmalloc(sizeof(*gdomain), GFP_KERNEL);
+ if (!gdomain) {
+ gcip_domain_pool_free(pool, domain);
return NULL;
}
- dev_dbg(pool->gxp->dev, "Allocated domain from pool with id = %d\n", id);
+ gdomain->domain = domain;
- return pool->array[id];
+ return gdomain;
}
-void gxp_domain_pool_free(struct gxp_domain_pool *pool, struct iommu_domain *domain)
+void gxp_domain_pool_free(struct gcip_domain_pool *pool,
+ struct gxp_iommu_domain *gdomain)
{
- int id;
-
- if (!pool->size) {
- iommu_domain_free(domain);
- return;
- }
- for (id = 0; id < pool->size; id++) {
- if (pool->array[id] == domain) {
- dev_dbg(pool->gxp->dev, "Released domain from pool with id = %d\n", id);
- ida_free(&pool->idp, id);
- return;
- }
- }
- dev_err(pool->gxp->dev, "%s: domain not found in pool", __func__);
+ gcip_domain_pool_free(pool, gdomain->domain);
+ kfree(gdomain);
}
-void gxp_domain_pool_destroy(struct gxp_domain_pool *pool)
+void gxp_domain_pool_destroy(struct gcip_domain_pool *pool)
{
- int i;
-
- if (!pool->size)
- return;
-
- dev_dbg(pool->gxp->dev, "Destroying domain pool with %u domains\n", pool->size);
-
- for (i = 0; i < pool->size; i++) {
- if (pool->array[i])
- iommu_domain_free(pool->array[i]);
- }
-
- ida_destroy(&pool->idp);
- vfree(pool->array);
+ gcip_domain_pool_destroy(pool);
}
diff --git a/gxp-domain-pool.h b/gxp-domain-pool.h
index ee95155..ad2d38a 100644
--- a/gxp-domain-pool.h
+++ b/gxp-domain-pool.h
@@ -8,22 +8,9 @@
#ifndef __GXP_DOMAIN_POOL_H__
#define __GXP_DOMAIN_POOL_H__
-#include <linux/idr.h>
-#include <linux/iommu.h>
-
-#include "gxp-internal.h"
-
-struct gxp_domain_pool {
- struct ida idp; /* ID allocator to keep track of used domains. */
- /*
- * Size of the pool. Can be set to 0, in which case the implementation will fall back to
- * dynamic domain allocation using the IOMMU API directly.
- */
- unsigned int size;
- struct iommu_domain **array; /* Array holding the pointers to pre-allocated domains. */
- struct gxp_dev *gxp; /* The gxp device used for logging warnings/errors. */
-};
+#include <gcip/gcip-domain-pool.h>
+#include "gxp-dma.h"
/*
* Initializes a domain pool.
@@ -35,19 +22,19 @@ struct gxp_domain_pool {
*
* returns 0 on success or negative error value.
*/
-int gxp_domain_pool_init(struct gxp_dev *gxp, struct gxp_domain_pool *pool,
+int gxp_domain_pool_init(struct gxp_dev *gxp, struct gcip_domain_pool *pool,
unsigned int size);
/*
* Allocates a domain from the pool
* returns NULL on error.
*/
-struct iommu_domain *gxp_domain_pool_alloc(struct gxp_domain_pool *pool);
+struct gxp_iommu_domain *gxp_domain_pool_alloc(struct gcip_domain_pool *pool);
/* Releases a domain from the pool. */
-void gxp_domain_pool_free(struct gxp_domain_pool *pool, struct iommu_domain *domain);
+void gxp_domain_pool_free(struct gcip_domain_pool *pool,
+ struct gxp_iommu_domain *gdomain);
/* Cleans up all resources used by the domain pool. */
-void gxp_domain_pool_destroy(struct gxp_domain_pool *pool);
-
+void gxp_domain_pool_destroy(struct gcip_domain_pool *pool);
#endif /* __GXP_DOMAIN_POOL_H__ */
diff --git a/gxp-doorbell.c b/gxp-doorbell.c
index 0fc6389..491fb5b 100644
--- a/gxp-doorbell.c
+++ b/gxp-doorbell.c
@@ -19,9 +19,9 @@ void gxp_doorbell_enable_for_core(struct gxp_dev *gxp, u32 doorbell_num,
u32 val;
/* Enable DOORBELL_NUM on requested core */
- val = gxp_read_32_core(gxp, core, GXP_REG_COMMON_INT_MASK_0);
+ val = gxp_read_32(gxp, GXP_CORE_REG_COMMON_INT_MASK_0(core));
val |= BIT(doorbell_num);
- gxp_write_32_core(gxp, core, GXP_REG_COMMON_INT_MASK_0, val);
+ gxp_write_32(gxp, GXP_CORE_REG_COMMON_INT_MASK_0(core), val);
}
void gxp_doorbell_set(struct gxp_dev *gxp, u32 doorbell_num)
diff --git a/gxp-firmware-data.c b/gxp-firmware-data.c
index d1def41..841e80e 100644
--- a/gxp-firmware-data.c
+++ b/gxp-firmware-data.c
@@ -5,757 +5,275 @@
* Copyright (C) 2021 Google LLC
*/
-#include <linux/bitops.h>
-#include <linux/dma-mapping.h>
-#include <linux/genalloc.h>
+#include <linux/slab.h>
+#include "gxp-config.h"
#include "gxp-debug-dump.h"
#include "gxp-firmware-data.h"
+#include "gxp-firmware.h" /* gxp_core_boot */
#include "gxp-host-device-structs.h"
#include "gxp-internal.h"
-#include "gxp-range-alloc.h"
+#include "gxp-vd.h"
#include "gxp.h"
-/*
- * The minimum alignment order (power of 2) of allocations in the firmware data
- * region.
- */
-#define FW_DATA_STORAGE_ORDER 3
-
/* A byte pattern to pre-populate the FW region with */
-#define FW_DATA_DEBUG_PATTERN 0x66
-
-/* IDs for dedicated doorbells used by some system components */
-#define DOORBELL_ID_CORE_WAKEUP(__core__) (0 + __core__)
-
-/* IDs for dedicated sync barriers used by some system components */
-#define SYNC_BARRIER_ID_UART 1
+#define FW_DATA_DEBUG_PATTERN 0x66
/* Default application parameters */
-#define DEFAULT_APP_ID 1
-#define DEFAULT_APP_USER_MEM_SIZE (120 * 1024)
-#define DEFAULT_APP_USER_MEM_ALIGNMENT 8
-#define DEFAULT_APP_THREAD_COUNT 2
-#define DEFAULT_APP_TCM_PER_BANK (100 * 1024)
-#define DEFAULT_APP_USER_DOORBELL_COUNT 2
-#define DEFAULT_APP_USER_BARRIER_COUNT 2
-
-/* Core-to-core mailbox communication constants */
-#define CORE_TO_CORE_MBX_CMD_COUNT 10
-#define CORE_TO_CORE_MBX_RSP_COUNT 10
-
-/* A block allocator managing and partitioning a memory region for device use */
-struct fw_memory_allocator {
- struct gen_pool *pool;
- struct gxp_dev *gxp;
- void *base_host_addr;
- uint32_t base_device_addr;
-};
-
-/* A memory region allocated for device use */
-struct fw_memory {
- void *host_addr;
- uint32_t device_addr;
- size_t sz;
-};
+#define DEFAULT_APP_ID 1
/*
* Holds information about system-wide HW and memory resources given to the FWs
* of GXP devices.
*/
struct gxp_fw_data_manager {
- /* Host-side pointers for book keeping */
- void *fw_data_virt;
- struct gxp_system_descriptor *system_desc;
-
- /* Doorbells allocator and reserved doorbell IDs */
- struct range_alloc *doorbell_allocator;
- int core_wakeup_doorbells[GXP_NUM_CORES];
- int semaphore_doorbells[GXP_NUM_CORES];
-
- /* Sync barriers allocator and reserved sync barrier IDs */
- struct range_alloc *sync_barrier_allocator;
- int uart_sync_barrier;
- int timer_regions_barrier;
- int watchdog_region_barrier;
- int uart_region_barrier;
- int doorbell_regions_barrier;
- int sync_barrier_regions_barrier;
- int semaphores_regions_barrier;
-
- /* System-wide device memory resources */
- struct fw_memory_allocator *allocator;
- struct fw_memory sys_desc_mem;
- struct fw_memory wdog_mem;
- struct fw_memory telemetry_mem;
- struct fw_memory debug_dump_mem;
-};
-
-/* A container holding information for a single GXP application. */
-struct app_metadata {
- struct gxp_fw_data_manager *mgr;
- uint application_id;
- uint core_count;
- uint core_list; /* bitmap of cores allocated to this app */
-
- /* Per-app doorbell IDs */
- int user_doorbells_count;
- int *user_doorbells;
-
- /* Per-app sync barrier IDs */
- int user_barriers_count;
- int *user_barriers;
-
- /* Per-app memory regions */
- struct fw_memory user_mem;
- struct fw_memory doorbells_mem;
- struct fw_memory sync_barriers_mem;
- struct fw_memory semaphores_mem;
- struct fw_memory cores_mem;
- struct fw_memory core_cmd_queues_mem[GXP_NUM_CORES];
- struct fw_memory core_rsp_queues_mem[GXP_NUM_CORES];
- struct fw_memory app_mem;
-};
-
-static struct fw_memory_allocator *mem_alloc_create(struct gxp_dev *gxp,
- void *host_base,
- uint32_t device_base,
- size_t size)
-{
- struct fw_memory_allocator *allocator;
- int ret = 0;
-
- allocator = kzalloc(sizeof(*allocator), GFP_KERNEL);
- if (!allocator)
- return ERR_PTR(-ENOMEM);
-
+ /* Cached core telemetry descriptors. */
+ struct gxp_core_telemetry_descriptor core_telemetry_desc;
/*
- * Use a genpool to allocate and free chunks of the virtual address
- * space reserved for FW data. The genpool doesn't use the passed
- * addresses internally to access any data, thus it is safe to use it to
- * manage memory that the host may not be able to access directly.
- * The allocator also records the host-side address so that the code
- * here can access and populate data in this region.
+ * A host-view of the System configuration descriptor. This same desc
+ * is provided to all VDs and all cores. This is the R/O section.
*/
- allocator->gxp = gxp;
- allocator->pool = gen_pool_create(FW_DATA_STORAGE_ORDER, /*nid=*/-1);
- if (!allocator->pool) {
- dev_err(gxp->dev, "Failed to create memory pool\n");
- kfree(allocator);
- return ERR_PTR(-ENOMEM);
- }
-
- ret = gen_pool_add(allocator->pool, device_base, size, /*nid=*/-1);
- if (ret) {
- dev_err(gxp->dev, "Failed to add memory to pool (ret = %d)\n",
- ret);
- gen_pool_destroy(allocator->pool);
- kfree(allocator);
- return ERR_PTR(ret);
- }
- allocator->base_host_addr = host_base;
- allocator->base_device_addr = device_base;
-
- return allocator;
-}
-
-static int mem_alloc_allocate(struct fw_memory_allocator *allocator,
- struct fw_memory *mem, size_t size,
- uint8_t alignment)
-{
- struct genpool_data_align data = { .align = alignment };
- uint32_t dev_addr;
-
- dev_addr = gen_pool_alloc_algo(allocator->pool, size,
- gen_pool_first_fit_align, &data);
- if (!dev_addr)
- return -ENOMEM;
-
- mem->host_addr = allocator->base_host_addr +
- (dev_addr - allocator->base_device_addr);
- mem->device_addr = dev_addr;
- mem->sz = size;
-
- return 0;
-}
-
-static void mem_alloc_free(struct fw_memory_allocator *allocator,
- struct fw_memory *mem)
-{
- gen_pool_free(allocator->pool, mem->device_addr, mem->sz);
-}
-
-static void mem_alloc_destroy(struct fw_memory_allocator *allocator)
-{
- WARN_ON(gen_pool_avail(allocator->pool) !=
- gen_pool_size(allocator->pool));
- gen_pool_destroy(allocator->pool);
- kfree(allocator);
-}
+ struct gxp_system_descriptor_ro *sys_desc_ro;
+ /*
+ * A host-view of the System configuration descriptor. This same desc
+ * is provided to all VDs and all cores. This is the R/W section.
+ */
+ struct gxp_system_descriptor_rw *sys_desc_rw;
+};
-static struct fw_memory init_doorbells(struct app_metadata *app)
+/*
+ * Here assumes sys_cfg contains gxp_system_descriptor_ro in the first page and
+ * gxp_system_descriptor_rw in the second page.
+ */
+static void set_system_cfg_region(struct gxp_dev *gxp, void *sys_cfg)
{
- struct gxp_doorbells_descriptor *db_region;
- struct fw_memory mem;
- uint32_t mem_size;
- uint32_t doorbell_count;
+ struct gxp_system_descriptor_ro *des_ro = sys_cfg;
+ struct gxp_system_descriptor_rw *des_rw = sys_cfg + PAGE_SIZE;
+ struct gxp_core_telemetry_descriptor *descriptor =
+ &gxp->data_mgr->core_telemetry_desc;
+ struct telemetry_descriptor_ro *tel_ro;
+ struct telemetry_descriptor_rw *tel_rw;
+ struct core_telemetry_descriptor *tel_des;
int i;
- doorbell_count = app->user_doorbells_count;
- mem_size = sizeof(*db_region) +
- doorbell_count * sizeof(db_region->doorbells[0]);
-
- mem_alloc_allocate(app->mgr->allocator, &mem, mem_size,
- __alignof__(struct gxp_doorbells_descriptor));
-
- db_region = mem.host_addr;
- db_region->application_id = app->application_id;
- db_region->protection_barrier = app->mgr->doorbell_regions_barrier;
- db_region->num_items = doorbell_count;
- for (i = 0; i < doorbell_count; i++) {
- db_region->doorbells[i].users_count = 0;
- db_region->doorbells[i].hw_doorbell_idx =
- app->user_doorbells[i];
+ if (gxp->debug_dump_mgr)
+ des_ro->debug_dump_dev_addr = gxp->debug_dump_mgr->buf.dsp_addr;
+ else
+ des_ro->debug_dump_dev_addr = 0;
+
+#define COPY_FIELDS(des, ro, rw) \
+ do { \
+ ro->host_status = des->host_status; \
+ ro->buffer_addr = des->buffer_addr; \
+ ro->buffer_size = des->buffer_size; \
+ rw->device_status = des->device_status; \
+ rw->data_available = des->watermark_level; \
+ } while (0)
+ for (i = 0; i < GXP_NUM_CORES; i++) {
+ tel_ro = &des_ro->telemetry_desc.per_core_loggers[i];
+ tel_rw = &des_rw->telemetry_desc.per_core_loggers[i];
+ tel_des = &descriptor->per_core_loggers[i];
+ COPY_FIELDS(tel_des, tel_ro, tel_rw);
+ tel_ro = &des_ro->telemetry_desc.per_core_tracers[i];
+ tel_rw = &des_rw->telemetry_desc.per_core_tracers[i];
+ tel_des = &descriptor->per_core_tracers[i];
+ COPY_FIELDS(tel_des, tel_ro, tel_rw);
}
+#undef COPY_FIELDS
- return mem;
+ /* Update the global descriptors. */
+ gxp->data_mgr->sys_desc_ro = des_ro;
+ gxp->data_mgr->sys_desc_rw = des_rw;
}
-static struct fw_memory init_sync_barriers(struct app_metadata *app)
+static void _gxp_fw_data_populate_vd_cfg(struct gxp_dev *gxp,
+ struct gxp_virtual_device *vd)
{
- struct gxp_sync_barriers_descriptor *sb_region;
- struct fw_memory mem;
- uint32_t mem_size;
- uint32_t barrier_count;
+ struct gxp_host_control_region *core_cfg;
+ struct gxp_job_descriptor job;
+ struct gxp_vd_descriptor *vd_desc;
int i;
- barrier_count = app->user_barriers_count;
- mem_size = sizeof(*sb_region) +
- barrier_count * sizeof(sb_region->barriers[0]);
-
- mem_alloc_allocate(app->mgr->allocator, &mem, mem_size,
- __alignof__(struct gxp_sync_barriers_descriptor));
-
- sb_region = mem.host_addr;
- sb_region->application_id = app->application_id;
- sb_region->protection_barrier = app->mgr->sync_barrier_regions_barrier;
- sb_region->num_items = barrier_count;
- for (i = 0; i < barrier_count; i++) {
- sb_region->barriers[i].users_count = 0;
- sb_region->barriers[i].hw_barrier_idx = app->user_barriers[i];
+ if (!gxp_core_boot(gxp)) {
+ dev_info(gxp->dev, "Skip setting VD and core CFG");
+ return;
}
-
- return mem;
-}
-
-static struct fw_memory init_watchdog(struct gxp_fw_data_manager *mgr)
-{
- struct gxp_watchdog_descriptor *wd_region;
- struct fw_memory mem;
-
- mem_alloc_allocate(mgr->allocator, &mem, sizeof(*wd_region),
- __alignof__(struct gxp_watchdog_descriptor));
-
- wd_region = mem.host_addr;
- wd_region->protection_barrier = mgr->watchdog_region_barrier;
- wd_region->target_value = 0;
- wd_region->participating_cores = 0;
- wd_region->responded_cores = 0;
- wd_region->tripped = 0;
-
- return mem;
-}
-
-static struct fw_memory init_telemetry(struct gxp_fw_data_manager *mgr)
-{
- struct gxp_telemetry_descriptor *tel_region;
- struct fw_memory mem;
-
- mem_alloc_allocate(mgr->allocator, &mem, sizeof(*tel_region),
- __alignof__(struct gxp_telemetry_descriptor));
-
- tel_region = mem.host_addr;
-
- /*
- * Telemetry is disabled for now.
- * Subsuequent calls to the FW data module can be used to populate or
- * depopulate the descriptor pointers on demand.
- */
- memset(tel_region, 0x00, sizeof(*tel_region));
-
- return mem;
-}
-
-static struct fw_memory init_debug_dump(struct gxp_dev *gxp)
-{
- struct fw_memory mem;
-
- if (gxp->debug_dump_mgr) {
- mem.host_addr = gxp->debug_dump_mgr->buf.vaddr;
- mem.device_addr = gxp->debug_dump_mgr->buf.daddr;
- mem.sz = gxp->debug_dump_mgr->buf.size;
- } else {
- mem.host_addr = 0;
- mem.device_addr = 0;
- mem.sz = 0;
+ if (!vd->vd_cfg.vaddr || !vd->core_cfg.vaddr) {
+ dev_warn(
+ gxp->dev,
+ "Missing VD and core CFG in image config, firmware is not bootable\n");
+ return;
}
-
- return mem;
-}
-
-static struct fw_memory init_app_user_memory(struct app_metadata *app,
- int memory_size)
-{
- struct fw_memory mem;
-
- mem_alloc_allocate(app->mgr->allocator, &mem, memory_size,
- DEFAULT_APP_USER_MEM_ALIGNMENT);
-
- return mem;
-}
-
-static struct fw_memory init_app_semaphores(struct app_metadata *app)
-{
- struct gxp_semaphores_descriptor *sm_region;
- struct fw_memory mem;
- uint32_t mem_size;
- uint32_t semaphore_count;
- int core;
- int i;
-
- semaphore_count = NUM_SYSTEM_SEMAPHORES;
- mem_size = sizeof(*sm_region) +
- semaphore_count * sizeof(sm_region->semaphores[0]);
-
- mem_alloc_allocate(app->mgr->allocator, &mem, mem_size,
- __alignof__(struct gxp_semaphores_descriptor));
-
- sm_region = mem.host_addr;
- sm_region->application_id = app->application_id;
- sm_region->protection_barrier = app->mgr->semaphores_regions_barrier;
-
- core = 0;
- for (i = 0; i < GXP_NUM_CORES; i++) {
- if (app->core_list & BIT(i))
- sm_region->wakeup_doorbells[core++] =
- app->mgr->semaphore_doorbells[i];
- sm_region->woken_pending_semaphores[i] = 0;
+ /* Set up VD config region. */
+ vd_desc = vd->vd_cfg.vaddr;
+ vd_desc->application_id = DEFAULT_APP_ID;
+ vd_desc->vd_is_initialized = 0;
+ /* Set up core config region. */
+ job.workers_count = vd->num_cores;
+ for (i = 0; i < ARRAY_SIZE(job.worker_to_fw); i++) {
+ /*
+ * Kernel-initiated workloads always act like the entire VD is
+ * one giant N-core job where N is the number of cores allocated
+ * to that VD.
+ * The MCU, on the other hand, can have multiple jobs dispatched
+ * to the same VD at the same time.
+ */
+ if (i < job.workers_count)
+ job.worker_to_fw[i] = i;
+ else
+ job.worker_to_fw[i] = -1;
}
-
- sm_region->num_items = semaphore_count;
- for (i = 0; i < semaphore_count; i++) {
- sm_region->semaphores[i].users_count = 0;
- sm_region->semaphores[i].count = 0;
- sm_region->semaphores[i].waiters = 0;
+ /* Give each VD a unique HW resources slot. */
+ job.hardware_resources_slot = gxp_vd_hw_slot_id(vd);
+ /* Assign the same job descriptor to all cores in this VD */
+ for (i = 0; i < GXP_NUM_CORES; i++) {
+ core_cfg = vd->core_cfg.vaddr +
+ vd->core_cfg.size / GXP_NUM_CORES * i;
+ core_cfg->job_descriptor = job;
}
-
- return mem;
}
-static struct fw_memory init_app_cores(struct app_metadata *app)
+static struct core_telemetry_descriptor *
+gxp_fw_data_get_core_telemetry_descriptor(struct gxp_dev *gxp, u8 type)
{
- struct gxp_cores_descriptor *cd_region;
- struct gxp_queue_info *q_info;
- struct fw_memory mem;
- uint32_t mem_size;
- int semaphore_id;
- int core_count;
- int i;
- const int cmd_queue_items = CORE_TO_CORE_MBX_CMD_COUNT;
- const int resp_queue_items = CORE_TO_CORE_MBX_RSP_COUNT;
-
- /* Core info structures. */
- core_count = app->core_count;
- mem_size =
- sizeof(*cd_region) + core_count * sizeof(cd_region->cores[0]);
-
- mem_alloc_allocate(app->mgr->allocator, &mem, mem_size,
- __alignof__(struct gxp_cores_descriptor));
-
- cd_region = mem.host_addr;
- cd_region->num_items = core_count;
-
- /* Command and response queues. */
- semaphore_id = 0;
- for (i = 0; i < core_count; i++) {
- /* Allocate per-core command queue storage. */
- mem_size = cmd_queue_items *
- sizeof(struct gxp_core_to_core_command);
- mem_alloc_allocate(
- app->mgr->allocator, &app->core_cmd_queues_mem[i],
- mem_size, __alignof__(struct gxp_core_to_core_command));
-
- /* Update per-core command queue info. */
- q_info = &cd_region->cores[i].incoming_commands_queue;
- q_info->header.storage =
- app->core_cmd_queues_mem[i].device_addr;
- q_info->header.head_idx = 0;
- q_info->header.tail_idx = 0;
- q_info->header.element_size =
- sizeof(struct gxp_core_to_core_command);
- q_info->header.elements_count = cmd_queue_items;
- q_info->access_sem_id = semaphore_id++;
- q_info->posted_slots_sem_id = semaphore_id++;
- q_info->free_slots_sem_id = semaphore_id++;
-
- /* Allocate per-core response queue storage. */
- mem_size = resp_queue_items *
- sizeof(struct gxp_core_to_core_response);
- mem_alloc_allocate(
- app->mgr->allocator, &app->core_rsp_queues_mem[i],
- mem_size,
- __alignof__(struct gxp_core_to_core_response));
-
- /* Update per-core response queue info. */
- q_info = &cd_region->cores[i].incoming_responses_queue;
- q_info->header.storage =
- app->core_rsp_queues_mem[i].device_addr;
- q_info->header.head_idx = 0;
- q_info->header.tail_idx = 0;
- q_info->header.element_size =
- sizeof(struct gxp_core_to_core_response);
- q_info->header.elements_count = resp_queue_items;
- q_info->access_sem_id = semaphore_id++;
- q_info->posted_slots_sem_id = semaphore_id++;
- q_info->free_slots_sem_id = semaphore_id++;
- }
-
- return mem;
-}
+ struct gxp_core_telemetry_descriptor *descriptor =
+ &gxp->data_mgr->core_telemetry_desc;
-static struct fw_memory init_application(struct app_metadata *app)
-{
- struct gxp_application_descriptor *app_region;
- struct fw_memory mem;
- const int user_mem_size = DEFAULT_APP_USER_MEM_SIZE;
-
- /* App's system memory. */
- app->user_mem = init_app_user_memory(app, user_mem_size);
-
- /* App's doorbells region. */
- app->doorbells_mem = init_doorbells(app);
-
- /* App's sync barriers region. */
- app->sync_barriers_mem = init_sync_barriers(app);
-
- /* App's semaphores region. */
- app->semaphores_mem = init_app_semaphores(app);
-
- /* App's cores info and core-to-core queues. */
- app->cores_mem = init_app_cores(app);
-
- /* App's descriptor. */
- mem_alloc_allocate(app->mgr->allocator, &mem, sizeof(*app_region),
- __alignof__(struct gxp_application_descriptor));
- app_region = mem.host_addr;
- app_region->application_id = app->application_id;
- app_region->core_count = app->core_count;
- app_region->cores_mask = app->core_list;
- app_region->threads_count = DEFAULT_APP_THREAD_COUNT;
- app_region->tcm_memory_per_bank = DEFAULT_APP_TCM_PER_BANK;
- app_region->system_memory_size = user_mem_size;
- app_region->system_memory_addr = app->user_mem.device_addr;
- app_region->doorbells_dev_addr = app->doorbells_mem.device_addr;
- app_region->sync_barriers_dev_addr = app->sync_barriers_mem.device_addr;
- app_region->semaphores_dev_addr = app->semaphores_mem.device_addr;
- app_region->cores_info_dev_addr = app->cores_mem.device_addr;
-
- return mem;
+ if (type == GXP_TELEMETRY_TYPE_LOGGING)
+ return descriptor->per_core_loggers;
+ else if (type == GXP_TELEMETRY_TYPE_TRACING)
+ return descriptor->per_core_tracers;
+ else
+ return ERR_PTR(-EINVAL);
}
int gxp_fw_data_init(struct gxp_dev *gxp)
{
struct gxp_fw_data_manager *mgr;
- int res;
- int i;
+ void *virt;
mgr = devm_kzalloc(gxp->dev, sizeof(*mgr), GFP_KERNEL);
if (!mgr)
return -ENOMEM;
- gxp->data_mgr = mgr;
- /*
- * TODO (b/200169232) Using memremap until devm_memremap is added to
- * the GKI ABI
- */
- mgr->fw_data_virt = memremap(gxp->fwdatabuf.paddr, gxp->fwdatabuf.size,
- MEMREMAP_WC);
+ virt = memremap(gxp->fwdatabuf.paddr, gxp->fwdatabuf.size, MEMREMAP_WC);
- if (IS_ERR_OR_NULL(mgr->fw_data_virt)) {
+ if (IS_ERR_OR_NULL(virt)) {
dev_err(gxp->dev, "Failed to map fw data region\n");
- res = -ENODEV;
- goto err;
- }
-
- /* Instantiate the doorbells allocator with all doorbells */
- mgr->doorbell_allocator =
- range_alloc_create(/*start=*/0, DOORBELL_COUNT);
- if (IS_ERR(mgr->doorbell_allocator)) {
- dev_err(gxp->dev, "Failed to create doorbells allocator\n");
- res = PTR_ERR(mgr->doorbell_allocator);
- mgr->doorbell_allocator = NULL;
- goto err;
- }
-
- /* Instantiate the sync barriers allocator with all sync barriers */
- mgr->sync_barrier_allocator =
- range_alloc_create(/*start=*/0, SYNC_BARRIER_COUNT);
- if (IS_ERR(mgr->sync_barrier_allocator)) {
- dev_err(gxp->dev, "Failed to create sync barriers allocator\n");
- res = PTR_ERR(mgr->sync_barrier_allocator);
- mgr->sync_barrier_allocator = NULL;
- goto err;
- }
-
- /* Allocate doorbells */
-
- /* Pinned: Cores wakeup doorbell */
- for (i = 0; i < GXP_NUM_CORES; i++) {
- mgr->core_wakeup_doorbells[i] = DOORBELL_ID_CORE_WAKEUP(i);
- res = range_alloc_get(mgr->doorbell_allocator,
- mgr->core_wakeup_doorbells[i]);
- if (res)
- goto err;
- }
-
- /* Semaphores operation doorbells */
- for (i = 0; i < GXP_NUM_CORES; i++) {
- range_alloc_get_any(mgr->doorbell_allocator,
- &mgr->semaphore_doorbells[i]);
- }
-
- /* Allocate sync barriers */
-
- /* Pinned: UART sync barrier */
- mgr->uart_sync_barrier = SYNC_BARRIER_ID_UART;
- mgr->uart_region_barrier = SYNC_BARRIER_ID_UART;
- res = range_alloc_get(mgr->sync_barrier_allocator,
- mgr->uart_sync_barrier);
- if (res)
- goto err;
-
- /* Doorbell regions for all apps */
- res = range_alloc_get_any(mgr->sync_barrier_allocator,
- &mgr->doorbell_regions_barrier);
- if (res)
- goto err;
-
- /* Sync barrier regions for all apps */
- res = range_alloc_get_any(mgr->sync_barrier_allocator,
- &mgr->sync_barrier_regions_barrier);
- if (res)
- goto err;
-
- /* Timer regions for all apps */
- res = range_alloc_get_any(mgr->sync_barrier_allocator,
- &mgr->timer_regions_barrier);
- if (res)
- goto err;
-
- /* Watchdog regions for all apps */
- res = range_alloc_get_any(mgr->sync_barrier_allocator,
- &mgr->watchdog_region_barrier);
- if (res)
- goto err;
-
- /* Semaphore regions for all apps */
- res = range_alloc_get_any(mgr->sync_barrier_allocator,
- &mgr->semaphores_regions_barrier);
- if (res)
- goto err;
-
- /* Shared firmware data memory region */
- mgr->allocator = mem_alloc_create(gxp, mgr->fw_data_virt,
- gxp->fwdatabuf.daddr,
- gxp->fwdatabuf.size);
- if (IS_ERR(mgr->allocator)) {
- dev_err(gxp->dev,
- "Failed to create the FW data memory allocator\n");
- res = PTR_ERR(mgr->allocator);
- mgr->allocator = NULL;
- goto err;
+ return -ENODEV;
}
+ gxp->fwdatabuf.vaddr = virt;
/* Populate the region with a pre-defined pattern. */
- memset(mgr->fw_data_virt, FW_DATA_DEBUG_PATTERN, gxp->fwdatabuf.size);
-
- /* Allocate the root system descriptor from the region */
- mem_alloc_allocate(mgr->allocator, &mgr->sys_desc_mem,
- sizeof(struct gxp_system_descriptor),
- __alignof__(struct gxp_system_descriptor));
- mgr->system_desc = mgr->sys_desc_mem.host_addr;
-
- /* Allocate the watchdog descriptor from the region */
- mgr->wdog_mem = init_watchdog(mgr);
- mgr->system_desc->watchdog_dev_addr = mgr->wdog_mem.device_addr;
-
- /* Allocate the descriptor for device-side telemetry */
- mgr->telemetry_mem = init_telemetry(mgr);
- mgr->system_desc->telemetry_dev_addr = mgr->telemetry_mem.device_addr;
-
- /* Set the debug dump region parameters if available */
- mgr->debug_dump_mem = init_debug_dump(gxp);
- mgr->system_desc->debug_dump_dev_addr = mgr->debug_dump_mem.device_addr;
-
- return res;
-
-err:
- range_alloc_destroy(mgr->sync_barrier_allocator);
- range_alloc_destroy(mgr->doorbell_allocator);
- devm_kfree(gxp->dev, mgr);
- return res;
-}
-
-void *gxp_fw_data_create_app(struct gxp_dev *gxp, uint core_list)
-{
- struct gxp_fw_data_manager *mgr = gxp->data_mgr;
- struct app_metadata *app;
- int i;
-
- app = kzalloc(sizeof(struct app_metadata), GFP_KERNEL);
- if (!app)
- return ERR_PTR(-ENOMEM);
-
- /* Create resource and memory allocations for new app */
- app->mgr = mgr;
- app->application_id = DEFAULT_APP_ID;
- app->core_count = hweight_long(core_list);
- app->core_list = core_list;
-
- /* User doorbells */
- app->user_doorbells_count = DEFAULT_APP_USER_DOORBELL_COUNT;
- app->user_doorbells =
- kcalloc(app->user_doorbells_count, sizeof(int), GFP_KERNEL);
- for (i = 0; i < app->user_doorbells_count; i++) {
- range_alloc_get_any(mgr->doorbell_allocator,
- &app->user_doorbells[i]);
- }
-
- /* User sync barrier */
- app->user_barriers_count = DEFAULT_APP_USER_BARRIER_COUNT;
- app->user_barriers =
- kcalloc(app->user_barriers_count, sizeof(int), GFP_KERNEL);
- for (i = 0; i < app->user_barriers_count; i++) {
- range_alloc_get_any(mgr->sync_barrier_allocator,
- &app->user_barriers[i]);
- }
-
- /* Application region. */
- app->app_mem = init_application(app);
- for (i = 0; i < GXP_NUM_CORES; i++) {
- if (core_list & BIT(i)) {
- mgr->system_desc->app_descriptor_dev_addr[i] =
- app->app_mem.device_addr;
- }
- }
+ memset(virt, FW_DATA_DEBUG_PATTERN, gxp->fwdatabuf.size);
+ gxp->data_mgr = mgr;
- return app;
+ return 0;
}
-void gxp_fw_data_destroy_app(struct gxp_dev *gxp, void *application)
+void gxp_fw_data_destroy(struct gxp_dev *gxp)
{
- struct app_metadata *app = application;
struct gxp_fw_data_manager *mgr = gxp->data_mgr;
- int i;
- for (i = 0; i < app->user_doorbells_count; i++)
- range_alloc_put(mgr->doorbell_allocator,
- app->user_doorbells[i]);
- kfree(app->user_doorbells);
-
- for (i = 0; i < app->user_barriers_count; i++)
- range_alloc_put(mgr->sync_barrier_allocator,
- app->user_barriers[i]);
- kfree(app->user_barriers);
-
- mem_alloc_free(mgr->allocator, &app->user_mem);
- mem_alloc_free(mgr->allocator, &app->doorbells_mem);
- mem_alloc_free(mgr->allocator, &app->sync_barriers_mem);
- mem_alloc_free(mgr->allocator, &app->semaphores_mem);
- mem_alloc_free(mgr->allocator, &app->cores_mem);
- for (i = 0; i < app->core_count; i++) {
- mem_alloc_free(mgr->allocator, &app->core_cmd_queues_mem[i]);
- mem_alloc_free(mgr->allocator, &app->core_rsp_queues_mem[i]);
- }
- mem_alloc_free(mgr->allocator, &app->app_mem);
+ if (gxp->fwdatabuf.vaddr)
+ memunmap(gxp->fwdatabuf.vaddr);
- kfree(app);
+ devm_kfree(gxp->dev, mgr);
+ gxp->data_mgr = NULL;
}
-void gxp_fw_data_destroy(struct gxp_dev *gxp)
+void gxp_fw_data_populate_vd_cfg(struct gxp_dev *gxp, struct gxp_virtual_device *vd)
{
- struct gxp_fw_data_manager *mgr = gxp->data_mgr;
-
- if (!mgr)
- return;
-
- mem_alloc_free(mgr->allocator, &mgr->telemetry_mem);
- mem_alloc_free(mgr->allocator, &mgr->wdog_mem);
- mem_alloc_free(mgr->allocator, &mgr->sys_desc_mem);
- mem_alloc_destroy(mgr->allocator);
-
- range_alloc_destroy(mgr->sync_barrier_allocator);
- range_alloc_destroy(mgr->doorbell_allocator);
-
- /* TODO (b/200169232) Remove this once we're using devm_memremap */
- if (mgr->fw_data_virt) {
- memunmap(mgr->fw_data_virt);
- mgr->fw_data_virt = NULL;
- }
-
- if (gxp->data_mgr) {
- devm_kfree(gxp->dev, gxp->data_mgr);
- gxp->data_mgr = NULL;
- }
+ if (gxp_fw_data_use_per_vd_config(vd))
+ _gxp_fw_data_populate_vd_cfg(gxp, vd);
}
-int gxp_fw_data_set_telemetry_descriptors(struct gxp_dev *gxp, u8 type,
- u32 host_status,
- dma_addr_t *buffer_addrs,
- u32 per_buffer_size)
+int gxp_fw_data_set_core_telemetry_descriptors(struct gxp_dev *gxp, u8 type,
+ u32 host_status,
+ struct gxp_coherent_buf *buffers,
+ u32 per_buffer_size)
{
- struct gxp_telemetry_descriptor *descriptor =
- gxp->data_mgr->telemetry_mem.host_addr;
- struct telemetry_descriptor *core_descriptors;
+ struct core_telemetry_descriptor *core_descriptors;
uint core;
+ bool enable;
- if (type == GXP_TELEMETRY_TYPE_LOGGING)
- core_descriptors = descriptor->per_core_loggers;
- else if (type == GXP_TELEMETRY_TYPE_TRACING)
- core_descriptors = descriptor->per_core_tracers;
- else
- return -EINVAL;
+ core_descriptors = gxp_fw_data_get_core_telemetry_descriptor(gxp, type);
+ if (IS_ERR(core_descriptors))
+ return PTR_ERR(core_descriptors);
- /* Validate that the provided IOVAs are addressable (i.e. 32-bit) */
- for (core = 0; core < GXP_NUM_CORES; core++) {
- if (buffer_addrs[core] > U32_MAX)
- return -EINVAL;
- }
+ enable = (host_status & GXP_CORE_TELEMETRY_HOST_STATUS_ENABLED);
+
+ if (enable) {
+ /* Validate that the provided IOVAs are addressable (i.e. 32-bit) */
+ for (core = 0; core < GXP_NUM_CORES; core++) {
+ if (buffers && buffers[core].dsp_addr > U32_MAX &&
+ buffers[core].size == per_buffer_size)
+ return -EINVAL;
+ }
- for (core = 0; core < GXP_NUM_CORES; core++) {
- core_descriptors[core].host_status = host_status;
- core_descriptors[core].buffer_addr = (u32)buffer_addrs[core];
- core_descriptors[core].buffer_size = per_buffer_size;
+ for (core = 0; core < GXP_NUM_CORES; core++) {
+ core_descriptors[core].host_status = host_status;
+ core_descriptors[core].buffer_addr =
+ (u32)buffers[core].dsp_addr;
+ core_descriptors[core].buffer_size = per_buffer_size;
+ }
+ } else {
+ for (core = 0; core < GXP_NUM_CORES; core++) {
+ core_descriptors[core].host_status = host_status;
+ core_descriptors[core].buffer_addr = 0;
+ core_descriptors[core].buffer_size = 0;
+ }
}
return 0;
}
-u32 gxp_fw_data_get_telemetry_device_status(struct gxp_dev *gxp, uint core,
- u8 type)
+u32 gxp_fw_data_get_core_telemetry_device_status(struct gxp_dev *gxp, uint core,
+ u8 type)
{
- struct gxp_telemetry_descriptor *descriptor =
- gxp->data_mgr->telemetry_mem.host_addr;
+ struct gxp_system_descriptor_rw *des_rw = gxp->data_mgr->sys_desc_rw;
if (core >= GXP_NUM_CORES)
return 0;
switch (type) {
case GXP_TELEMETRY_TYPE_LOGGING:
- return descriptor->per_core_loggers[core].device_status;
+ return des_rw->telemetry_desc.per_core_loggers[core]
+ .device_status;
case GXP_TELEMETRY_TYPE_TRACING:
- return descriptor->per_core_tracers[core].device_status;
+ return des_rw->telemetry_desc.per_core_tracers[core]
+ .device_status;
default:
return 0;
}
}
+
+struct gxp_mapped_resource gxp_fw_data_resource(struct gxp_dev *gxp)
+{
+ /*
+ * For direct mode, the config regions are programmed by host (us); for
+ * MCU mode, the config regions are programmed by MCU.
+ */
+ if (gxp_is_direct_mode(gxp)) {
+ struct gxp_mapped_resource tmp = gxp->fwdatabuf;
+
+ /* Leave the first piece be used for gxp_fw_data_init() */
+ tmp.vaddr += tmp.size / 2;
+ tmp.paddr += tmp.size / 2;
+ return tmp;
+ } else {
+ return gxp->shared_buf;
+ }
+}
+
+void *gxp_fw_data_system_cfg(struct gxp_dev *gxp)
+{
+ /* Use the end of the shared region for system cfg. */
+ return gxp_fw_data_resource(gxp).vaddr + GXP_SHARED_BUFFER_SIZE -
+ GXP_FW_DATA_SYSCFG_SIZE;
+}
+
+void gxp_fw_data_populate_system_config(struct gxp_dev *gxp)
+{
+ set_system_cfg_region(gxp, gxp_fw_data_system_cfg(gxp));
+}
diff --git a/gxp-firmware-data.h b/gxp-firmware-data.h
index e9851ed..89bf9e4 100644
--- a/gxp-firmware-data.h
+++ b/gxp-firmware-data.h
@@ -9,7 +9,18 @@
#ifndef __GXP_FIRMWARE_DATA_H__
#define __GXP_FIRMWARE_DATA_H__
+#include <linux/sizes.h>
+
+#include "gxp-dma.h"
#include "gxp-internal.h"
+#include "gxp-vd.h"
+
+#define GXP_FW_DATA_SYSCFG_SIZE SZ_8K
+
+enum gxp_fw_data_protocol {
+ /* Use the per-VD configuration region. */
+ FW_DATA_PROTOCOL_PER_VD_CONFIG = 2,
+};
/**
* gxp_fw_data_init() - Initializes the FW data manager submodule.
@@ -24,72 +35,97 @@
int gxp_fw_data_init(struct gxp_dev *gxp);
/**
- * gxp_fw_data_create_app() - Allocates HW and memory resources needed to create
- * a GXP device application (1:1 with a GXP driver
- * virtual device) used by the specified physical
- * cores.
- * @gxp: The parent GXP device
- * @core_list: A bitmap of the physical cores used in this application
- *
- * Return:
- * ptr - A pointer of the newly created application handle, an error pointer
- * (PTR_ERR) otherwise.
- * -ENOMEM - Insufficient memory to create the application
- */
-void *gxp_fw_data_create_app(struct gxp_dev *gxp, uint core_list);
-
-/**
- * gxp_fw_data_destroy_app() - Deallocates the HW and memory resources used by
- * the specified application.
+ * gxp_fw_data_destroy() - Destroys the FW data manager submodule and free all
+ * its resources.
* @gxp: The parent GXP device
- * @application: The handle to the application to deallocate
*/
-void gxp_fw_data_destroy_app(struct gxp_dev *gxp, void *application);
+void gxp_fw_data_destroy(struct gxp_dev *gxp);
/**
- * gxp_fw_data_destroy() - Destroys the FW data manager submodule and free all
- * its resources.
+ * gxp_fw_data_populate_vd_cfg() - Sets up the resources to VD's per-core config
+ * regions and per-VD config regions.
* @gxp: The parent GXP device
+ * @vd: The virtual device to be populated for
*/
-void gxp_fw_data_destroy(struct gxp_dev *gxp);
+void gxp_fw_data_populate_vd_cfg(struct gxp_dev *gxp,
+ struct gxp_virtual_device *vd);
/**
- * gxp_fw_data_set_telemetry_descriptors() - Set new logging or tracing buffers
- * for firmware to write to.
+ * gxp_fw_data_set_core_telemetry_descriptors() - Set new logging or tracing
+ * buffers for firmware to write
+ * to.
* @gxp: The GXP device to set buffer descriptors for
* @type: Either `GXP_TELEMETRY_TYPE_LOGGING` or `GXP_TELEMETRY_TYPE_TRACING`
- * @host_status: Bitfield describing the host's telemetry status. See the
+ * @host_status: Bitfield describing the host's core telemetry status. See the
* bit definitions in gxp-host-device-structs.h.
- * @buffer_addrs: An array containing the IOVA each physical core can access
- * its logging or tracing buffer at
+ * @buffers: An array of coherent buffers for logging and tracing
* @per_buffer_size: The size of each core's logging or tracing buffer in bytes
*
* `gxp_fw_data_init()` must have been called before this function.
*
- * Caller must hold gxp->telemetry_mgr's lock.
+ * Caller must hold gxp->core_telemetry_mgr's lock.
*
* Return:
* 0 - Success
* -EINVAL - Invalid @type provided or @buffer_addrs are not addressable by @gxp
*/
-int gxp_fw_data_set_telemetry_descriptors(struct gxp_dev *gxp, u8 type,
- u32 host_status,
- dma_addr_t *buffer_addrs,
- u32 per_buffer_size);
+int gxp_fw_data_set_core_telemetry_descriptors(struct gxp_dev *gxp, u8 type,
+ u32 host_status,
+ struct gxp_coherent_buf *buffers,
+ u32 per_buffer_size);
/**
- * gxp_fw_data_get_telemetry_device_status() - Returns a bitfield describing a
- * core's telemetry status.
- * @gxp: The GXP device to get device telemetry status for
- * @core: The core in @gxp to get the device telemetry status for
+ * gxp_fw_data_get_core_telemetry_device_status() - Returns a bitfield
+ * describing a core's
+ * telemetry status.
+ * @gxp: The GXP device to get core telemetry status for
+ * @core: The core in @gxp to get the core telemetry status for
* @type: Either `GXP_TELEMETRY_TYPE_LOGGING` or `GXP_TELEMETRY_TYPE_TRACING`
*
- * Caller must hold gxp->telemetry_mgr's lock.
+ * Caller must hold gxp->core_telemetry_mgr's lock.
*
* Return: The bitfield describing @core's telemetry status. If @core or @type
* are invalid, the result will always be 0.
*/
-u32 gxp_fw_data_get_telemetry_device_status(struct gxp_dev *gxp, uint core,
- u8 type);
+u32 gxp_fw_data_get_core_telemetry_device_status(struct gxp_dev *gxp, uint core,
+ u8 type);
+
+static inline bool gxp_fw_data_use_per_vd_config(struct gxp_virtual_device *vd)
+{
+ return vd->config_version >= FW_DATA_PROTOCOL_PER_VD_CONFIG;
+}
+
+/**
+ * gxp_fw_data_resource() - Returns the resource of data region for host<->core
+ * communication.
+ * @gxp: The GXP device
+ *
+ * This function requires either @gxp->fwdatabuf or @gxp->shared_buf be
+ * initialized, so it couldn't be called during device probe time.
+ *
+ * Return: The resource.
+ */
+struct gxp_mapped_resource gxp_fw_data_resource(struct gxp_dev *gxp);
+
+/**
+ * gxp_fw_data_system_cfg() - Returns the pointer to the system config region.
+ * @gxp: The GXP device
+ *
+ * This function requires either @gxp->fwdatabuf or @gxp->shared_buf be
+ * initialized, so it couldn't be called during device probe time.
+ *
+ * Return: The pointer. This function never fails.
+ */
+void *gxp_fw_data_system_cfg(struct gxp_dev *gxp);
+
+/**
+ * gxp_fw_data_populate_system_config() - Populate settings onto firmware system
+ * config region.
+ * @gxp: The GXP device
+ *
+ * This function is expected to be called after "after_probe" in the probe
+ * procedure since it uses gxp_fw_data_system_cfg().
+ */
+void gxp_fw_data_populate_system_config(struct gxp_dev *gxp);
#endif /* __GXP_FIRMWARE_DATA_H__ */
diff --git a/gxp-firmware-loader.c b/gxp-firmware-loader.c
new file mode 100644
index 0000000..5f64bd4
--- /dev/null
+++ b/gxp-firmware-loader.c
@@ -0,0 +1,301 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * GXP firmware loading management.
+ *
+ * Copyright (C) 2023 Google LLC
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+
+#include <gcip/gcip-common-image-header.h>
+#include <gcip/gcip-image-config.h>
+
+#include "gxp-config.h"
+#include "gxp-firmware-loader.h"
+#include "gxp-firmware.h"
+#include "gxp-internal.h"
+
+#if GXP_HAS_MCU
+#include <linux/gsa/gsa_dsp.h>
+
+#include "gxp-mcu-firmware.h"
+#endif
+
+#if GXP_HAS_MCU
+static int gxp_firmware_loader_gsa_auth(struct gxp_dev *gxp)
+{
+ struct gxp_firmware_loader_manager *mgr = gxp->fw_loader_mgr;
+ int ret;
+ uint core;
+ dma_addr_t headers_dma_addr;
+ void *header_vaddr;
+ const u8 *data;
+ struct gxp_mcu_firmware *mcu_fw = gxp_mcu_firmware_of(gxp);
+
+ if (!mcu_fw->is_secure) {
+ dev_warn(
+ gxp->dev,
+ "No need to do firmware authentication with non-secure privilege\n");
+ return 0;
+ }
+ if (!gxp->gsa_dev) {
+ dev_warn(
+ gxp->dev,
+ "No GSA device available, skipping firmware authentication\n");
+ return 0;
+ }
+ /* Authenticate MCU firmware */
+ header_vaddr = dma_alloc_coherent(gxp->gsa_dev, GCIP_FW_HEADER_SIZE,
+ &headers_dma_addr, GFP_KERNEL);
+ if (!header_vaddr) {
+ dev_err(gxp->dev,
+ "Failed to allocate coherent memory for header\n");
+ return -ENOMEM;
+ }
+ memcpy(header_vaddr, mgr->mcu_firmware->data, GCIP_FW_HEADER_SIZE);
+ ret = gsa_load_dsp_fw_image(gxp->gsa_dev, headers_dma_addr,
+ mcu_fw->image_buf.paddr);
+ if (ret) {
+ dev_err(gxp->dev, "MCU fw GSA authentication fails");
+ goto err_load_mcu_fw;
+ }
+
+ for (core = 0; core < GXP_NUM_CORES; core++) {
+ data = mgr->core_firmware[core]->data;
+ /* Authenticate core firmware */
+ memcpy(header_vaddr, data, GCIP_FW_HEADER_SIZE);
+ ret = gsa_load_dsp_fw_image(gxp->gsa_dev, headers_dma_addr,
+ gxp->fwbufs[core].paddr);
+ if (ret) {
+ dev_err(gxp->dev,
+ "Core %u firmware authentication fails", core);
+ goto err_load_core_fw;
+ }
+ }
+ dma_free_coherent(gxp->gsa_dev, GCIP_FW_HEADER_SIZE, header_vaddr,
+ headers_dma_addr);
+ return 0;
+err_load_core_fw:
+ gsa_unload_dsp_fw_image(gxp->gsa_dev);
+err_load_mcu_fw:
+ dma_free_coherent(gxp->gsa_dev, GCIP_FW_HEADER_SIZE, header_vaddr,
+ headers_dma_addr);
+ return ret;
+}
+
+static void gxp_firmware_loader_gsa_unload(struct gxp_dev *gxp)
+{
+ struct gxp_mcu_firmware *mcu_fw = gxp_mcu_firmware_of(gxp);
+
+ if (mcu_fw->is_secure)
+ gsa_unload_dsp_fw_image(gxp->gsa_dev);
+}
+#endif /* GXP_HAS_MCU */
+
+int gxp_firmware_loader_init(struct gxp_dev *gxp)
+{
+ struct gxp_firmware_loader_manager *mgr;
+
+ mgr = devm_kzalloc(gxp->dev, sizeof(*mgr), GFP_KERNEL);
+ if (!mgr)
+ return -ENOMEM;
+ gxp->fw_loader_mgr = mgr;
+ mutex_init(&mgr->lock);
+ return 0;
+}
+
+void gxp_firmware_loader_destroy(struct gxp_dev *gxp)
+{
+ gxp_firmware_loader_unload(gxp);
+}
+
+void gxp_firmware_loader_set_core_fw_name(struct gxp_dev *gxp,
+ const char *fw_name)
+{
+ struct gxp_firmware_loader_manager *mgr = gxp->fw_loader_mgr;
+
+ mutex_lock(&mgr->lock);
+ mgr->core_firmware_name = kstrdup(fw_name, GFP_KERNEL);
+ mutex_unlock(&mgr->lock);
+}
+
+char *gxp_firmware_loader_get_core_fw_name(struct gxp_dev *gxp)
+{
+ struct gxp_firmware_loader_manager *mgr = gxp->fw_loader_mgr;
+ char *name;
+
+ mutex_lock(&mgr->lock);
+ if (mgr->core_firmware_name)
+ name = kstrdup(mgr->core_firmware_name, GFP_KERNEL);
+ else
+ name = kstrdup(DSP_FIRMWARE_DEFAULT_PREFIX, GFP_KERNEL);
+ mutex_unlock(&mgr->lock);
+ return name;
+}
+
+/*
+ * Fetches and records image config of the first core firmware.
+ */
+static void gxp_firmware_loader_get_core_image_config(struct gxp_dev *gxp)
+{
+ struct gxp_firmware_loader_manager *mgr = gxp->fw_loader_mgr;
+ struct gcip_common_image_header *hdr =
+ (struct gcip_common_image_header *)mgr->core_firmware[0]->data;
+ struct gcip_image_config *cfg;
+
+ if (unlikely(mgr->core_firmware[0]->size < GCIP_FW_HEADER_SIZE))
+ return;
+ cfg = get_image_config_from_hdr(hdr);
+ if (cfg)
+ mgr->core_img_cfg = *cfg;
+ else
+ dev_warn(gxp->dev,
+ "Core 0 Firmware doesn't have a valid image config");
+}
+
+/*
+ * Call this function when mgr->core_firmware have been populated.
+ * This function sets is_loaded to true.
+ *
+ */
+static void gxp_firmware_loader_has_loaded(struct gxp_dev *gxp)
+{
+ struct gxp_firmware_loader_manager *mgr = gxp->fw_loader_mgr;
+
+ lockdep_assert_held(&mgr->lock);
+ gxp_firmware_loader_get_core_image_config(gxp);
+ mgr->is_loaded = true;
+}
+
+static void gxp_firmware_loader_unload_core_firmware(struct gxp_dev *gxp)
+{
+ struct gxp_firmware_loader_manager *mgr = gxp->fw_loader_mgr;
+ uint core;
+
+ lockdep_assert_held(&mgr->lock);
+ for (core = 0; core < GXP_NUM_CORES; core++) {
+ if (mgr->core_firmware[core]) {
+ release_firmware(mgr->core_firmware[core]);
+ mgr->core_firmware[core] = NULL;
+ }
+ }
+ kfree(mgr->core_firmware_name);
+ mgr->core_firmware_name = NULL;
+}
+
+#if GXP_HAS_MCU
+static void gxp_firmware_loader_unload_mcu_firmware(struct gxp_dev *gxp)
+{
+ struct gxp_firmware_loader_manager *mgr = gxp->fw_loader_mgr;
+
+ lockdep_assert_held(&mgr->lock);
+ if (!gxp_is_direct_mode(gxp)) {
+ if (mgr->mcu_firmware) {
+ gxp_mcu_firmware_unload(gxp, mgr->mcu_firmware);
+ release_firmware(mgr->mcu_firmware);
+ mgr->mcu_firmware = NULL;
+ }
+ kfree(mgr->mcu_firmware_name);
+ mgr->mcu_firmware_name = NULL;
+ }
+}
+#endif /* GXP_HAS_MCU */
+
+static int gxp_firmware_loader_load_locked(struct gxp_dev *gxp)
+{
+ struct gxp_firmware_loader_manager *mgr = gxp->fw_loader_mgr;
+ int ret;
+
+ lockdep_assert_held(&mgr->lock);
+ ret = gxp_firmware_load_core_firmware(gxp, mgr->core_firmware_name,
+ mgr->core_firmware);
+ if (ret)
+ return ret;
+
+#if GXP_HAS_MCU
+ if (!gxp_is_direct_mode(gxp)) {
+ ret = gxp_mcu_firmware_load(gxp, mgr->mcu_firmware_name,
+ &mgr->mcu_firmware);
+ if (ret)
+ goto err_unload_core;
+
+ ret = gxp_firmware_loader_gsa_auth(gxp);
+ if (ret)
+ goto err_unload_mcu;
+ }
+#endif
+ ret = gxp_firmware_rearrange_elf(gxp, mgr->core_firmware);
+ if (ret)
+ goto err_unload;
+ gxp_firmware_loader_has_loaded(gxp);
+ return 0;
+
+err_unload:
+#if GXP_HAS_MCU
+ if (!gxp_is_direct_mode(gxp))
+ gxp_firmware_loader_gsa_unload(gxp);
+err_unload_mcu:
+ if (!gxp_is_direct_mode(gxp))
+ gxp_firmware_loader_unload_mcu_firmware(gxp);
+err_unload_core:
+#endif
+ gxp_firmware_loader_unload_core_firmware(gxp);
+ return ret;
+}
+
+int gxp_firmware_loader_load_if_needed(struct gxp_dev *gxp)
+{
+ struct gxp_firmware_loader_manager *mgr = gxp->fw_loader_mgr;
+ int ret = 0;
+
+ mutex_lock(&mgr->lock);
+ if (mgr->is_loaded)
+ goto out;
+ ret = gxp_firmware_loader_load_locked(gxp);
+out:
+ mutex_unlock(&mgr->lock);
+ return ret;
+}
+
+void gxp_firmware_loader_unload(struct gxp_dev *gxp)
+{
+ struct gxp_firmware_loader_manager *mgr = gxp->fw_loader_mgr;
+
+ mutex_lock(&mgr->lock);
+ if (mgr->is_loaded) {
+#if GXP_HAS_MCU
+ gxp_firmware_loader_gsa_unload(gxp);
+ gxp_firmware_loader_unload_mcu_firmware(gxp);
+#endif
+ gxp_firmware_loader_unload_core_firmware(gxp);
+ }
+ mgr->is_loaded = false;
+ mutex_unlock(&mgr->lock);
+}
+
+#if GXP_HAS_MCU
+void gxp_firmware_loader_set_mcu_fw_name(struct gxp_dev *gxp,
+ const char *fw_name)
+{
+ struct gxp_firmware_loader_manager *mgr = gxp->fw_loader_mgr;
+
+ mutex_lock(&mgr->lock);
+ mgr->mcu_firmware_name = kstrdup(fw_name, GFP_KERNEL);
+ mutex_unlock(&mgr->lock);
+}
+
+char *gxp_firmware_loader_get_mcu_fw_name(struct gxp_dev *gxp)
+{
+ struct gxp_firmware_loader_manager *mgr = gxp->fw_loader_mgr;
+ char *name;
+
+ mutex_lock(&mgr->lock);
+ if (mgr->mcu_firmware_name)
+ name = kstrdup(mgr->mcu_firmware_name, GFP_KERNEL);
+ else
+ name = kstrdup(GXP_DEFAULT_MCU_FIRMWARE, GFP_KERNEL);
+ mutex_unlock(&mgr->lock);
+ return name;
+}
+#endif /* GXP_HAS_MCU */
diff --git a/gxp-firmware-loader.h b/gxp-firmware-loader.h
new file mode 100644
index 0000000..d081af2
--- /dev/null
+++ b/gxp-firmware-loader.h
@@ -0,0 +1,85 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * GXP firmware loading management.
+ *
+ * Copyright (C) 2023 Google LLC
+ */
+
+#ifndef __GXP_FIRMWARE_LOADER_H_
+#define __GXP_FIRMWARE_LOADER_H_
+
+#include <gcip/gcip-image-config.h>
+
+#include "gxp-config.h"
+#include "gxp-internal.h"
+
+struct gxp_firmware_loader_manager {
+ const struct firmware *core_firmware[GXP_NUM_CORES];
+ char *core_firmware_name;
+ /*
+ * Cached core 0 firmware image config, for easier fetching config entries.
+ * Not a pointer to the firmware buffer because we want to forcely change the
+ * privilege level to NS.
+ * Only valid on the firmware loaded.
+ */
+ struct gcip_image_config core_img_cfg;
+#if GXP_HAS_MCU
+ const struct firmware *mcu_firmware;
+ char *mcu_firmware_name;
+#endif
+ bool is_loaded;
+ /* Protects above fields */
+ struct mutex lock;
+};
+
+/*
+ * Initializes the firmware loader subsystem.
+ */
+int gxp_firmware_loader_init(struct gxp_dev *gxp);
+
+/*
+ * Tears down the firmware loader subsystem.
+ */
+void gxp_firmware_loader_destroy(struct gxp_dev *gxp);
+
+/*
+ * Requests and loads all firmware only if firmware is not loaded.
+ *
+ * Returns 0 on success, a negative errno on failure.
+ */
+int gxp_firmware_loader_load_if_needed(struct gxp_dev *gxp);
+
+/*
+ * Unloads firmware.
+ */
+void gxp_firmware_loader_unload(struct gxp_dev *gxp);
+
+/*
+ * Returns a copied core firmware name prefix, the caller needs to release it by
+ * kfree.
+ */
+char *gxp_firmware_loader_get_core_fw_name(struct gxp_dev *gxp);
+
+/*
+ * Set the core firmware name prefix to be requested in
+ * `gxp_firmware_loader_load_if_needed()`.
+ * It's safe for caller to release @fw_name after calling this function.
+ */
+void gxp_firmware_loader_set_core_fw_name(struct gxp_dev *gxp,
+ const char *fw_name);
+/*
+ *
+ * Returns a copied MCU firmware name, the caller needs to release it by
+ * kfree.
+ */
+char *gxp_firmware_loader_get_mcu_fw_name(struct gxp_dev *gxp);
+
+/*
+ * Set the MCU firmware name to be requested in
+ * `gxp_firmware_loader_load_if_needed()`.
+ * It's safe for caller to release @fw_name after calling this function.
+ */
+void gxp_firmware_loader_set_mcu_fw_name(struct gxp_dev *gxp,
+ const char *fw_name);
+
+#endif /* __GXP_FIRMWARE_LOADER_H_ */
diff --git a/gxp-firmware.c b/gxp-firmware.c
index eb31f23..d532fdf 100644
--- a/gxp-firmware.c
+++ b/gxp-firmware.c
@@ -16,9 +16,17 @@
#include <linux/slab.h>
#include <linux/types.h>
+#include <gcip/gcip-alloc-helper.h>
+#include <gcip/gcip-common-image-header.h>
+#include <gcip/gcip-image-config.h>
+
#include "gxp-bpm.h"
+#include "gxp-config.h"
+#include "gxp-core-telemetry.h"
#include "gxp-debug-dump.h"
#include "gxp-doorbell.h"
+#include "gxp-firmware-data.h"
+#include "gxp-firmware-loader.h"
#include "gxp-firmware.h"
#include "gxp-host-device-structs.h"
#include "gxp-internal.h"
@@ -26,14 +34,13 @@
#include "gxp-mailbox.h"
#include "gxp-notification.h"
#include "gxp-pm.h"
-#include "gxp-telemetry.h"
#include "gxp-vd.h"
-/* Files need to be copied to /lib/firmware */
-#define DSP_FIRMWARE_DEFAULT_PREFIX "gxp_fw_core"
+#if IS_ENABLED(CONFIG_GXP_TEST)
+#include "unittests/factory/fake-gxp-firmware.h"
+#endif
-#define FW_HEADER_SIZE (0x1000)
-#define FW_IMAGE_TYPE_OFFSET (0x400)
+#define FW_HEADER_SIZE GCIP_FW_HEADER_SIZE
static int gxp_dsp_fw_auth_disable;
module_param_named(dsp_fw_auth_disable, gxp_dsp_fw_auth_disable, int, 0660);
@@ -75,8 +82,10 @@ request_dsp_firmware(struct gxp_dev *gxp, char *name_prefix,
return ret;
err:
- for (core -= 1; core >= 0; core--)
+ for (core -= 1; core >= 0; core--) {
release_firmware(out_firmwares[core]);
+ out_firmwares[core] = NULL;
+ }
kfree(name_buf);
return ret;
}
@@ -96,16 +105,17 @@ static int elf_load_segments(struct gxp_dev *gxp, const u8 *elf_data,
(ehdr->e_ident[EI_MAG1] != ELFMAG1) ||
(ehdr->e_ident[EI_MAG2] != ELFMAG2) ||
(ehdr->e_ident[EI_MAG3] != ELFMAG3)) {
- dev_err(gxp->dev, "Cannot load FW! Invalid ELF format.\n");
- return -EINVAL;
+ dev_info(gxp->dev, "Firmware is not an ELF, treated as raw binary.");
+ return 0;
}
/* go through the available ELF segments */
for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
- u64 da = phdr->p_paddr;
- u32 memsz = phdr->p_memsz;
- u32 filesz = phdr->p_filesz;
- u32 offset = phdr->p_offset;
+ const u64 da = phdr->p_paddr;
+ const u32 memsz = phdr->p_memsz;
+ const u32 filesz = phdr->p_filesz;
+ const u32 offset = phdr->p_offset;
+ const u32 p_flags = phdr->p_flags;
void *ptr;
if (phdr->p_type != PT_LOAD)
@@ -117,30 +127,31 @@ static int elf_load_segments(struct gxp_dev *gxp, const u8 *elf_data,
if (!memsz)
continue;
- if (!((da >= (u32)buffer->daddr) &&
- ((da + memsz) <= ((u32)buffer->daddr +
- (u32)buffer->size)))) {
+ if (!(da >= buffer->daddr &&
+ da + memsz <= buffer->daddr + buffer->size)) {
/*
* Some BSS data may be referenced from TCM, and can be
* skipped while loading
*/
- dev_err(gxp->dev, "Segment out of bounds: da 0x%llx mem 0x%x. Skipping...\n",
+ dev_err(gxp->dev,
+ "Segment out of bounds: da %#llx mem %#x. Skipping...",
da, memsz);
continue;
}
- dev_notice(gxp->dev, "phdr: type %d da 0x%llx memsz 0x%x filesz 0x%x\n",
- phdr->p_type, da, memsz, filesz);
+ dev_info(gxp->dev,
+ "phdr: da %#llx memsz %#x filesz %#x perm %d", da,
+ memsz, filesz, p_flags);
if (filesz > memsz) {
- dev_err(gxp->dev, "Bad phdr filesz 0x%x memsz 0x%x\n",
+ dev_err(gxp->dev, "Bad phdr filesz %#x memsz %#x",
filesz, memsz);
ret = -EINVAL;
break;
}
if (offset + filesz > size) {
- dev_err(gxp->dev, "Truncated fw: need 0x%x avail 0x%zx\n",
+ dev_err(gxp->dev, "Truncated fw: need %#x avail %#zx",
offset + filesz, size);
ret = -EINVAL;
break;
@@ -149,8 +160,8 @@ static int elf_load_segments(struct gxp_dev *gxp, const u8 *elf_data,
/* grab the kernel address for this device address */
ptr = buffer->vaddr + (da - buffer->daddr);
if (!ptr) {
- dev_err(gxp->dev, "Bad phdr: da 0x%llx mem 0x%x\n",
- da, memsz);
+ dev_err(gxp->dev, "Bad phdr: da %#llx mem %#x", da,
+ memsz);
ret = -EINVAL;
break;
}
@@ -194,6 +205,9 @@ gxp_firmware_authenticate(struct gxp_dev *gxp,
return 0;
}
+ if (!gxp_is_direct_mode(gxp))
+ return 0;
+
for (core = 0; core < GXP_NUM_CORES; core++) {
data = firmwares[core]->data;
size = firmwares[core]->size;
@@ -258,83 +272,98 @@ error:
return ret;
}
-/* Forward declaration for usage inside gxp_firmware_load(..). */
-static void gxp_firmware_unload(struct gxp_dev *gxp, uint core);
-
-static void gxp_program_reset_vector(struct gxp_dev *gxp, uint core, bool verbose)
+static void gxp_program_reset_vector(struct gxp_dev *gxp, uint core,
+ uint phys_core, bool verbose)
{
u32 reset_vec;
- reset_vec = gxp_read_32_core(gxp, core,
- GXP_REG_ALT_RESET_VECTOR);
+ reset_vec = gxp_read_32(gxp, GXP_CORE_REG_ALT_RESET_VECTOR(phys_core));
if (verbose)
dev_notice(gxp->dev,
- "Current Aurora reset vector for core %u: 0x%x\n",
- core, reset_vec);
- gxp_write_32_core(gxp, core, GXP_REG_ALT_RESET_VECTOR,
- gxp->fwbufs[core].daddr);
+ "Current Aurora reset vector for core %u: %#x\n",
+ phys_core, reset_vec);
+ gxp_write_32(gxp, GXP_CORE_REG_ALT_RESET_VECTOR(phys_core),
+ gxp->fwbufs[core].daddr);
if (verbose)
dev_notice(gxp->dev,
- "New Aurora reset vector for core %u: 0x%llx\n",
- core, gxp->fwbufs[core].daddr);
+ "New Aurora reset vector for core %u: %#llx\n",
+ phys_core, gxp->fwbufs[core].daddr);
}
-static int gxp_firmware_load(struct gxp_dev *gxp, uint core)
+static void *get_scratchpad_base(struct gxp_dev *gxp,
+ struct gxp_virtual_device *vd, uint core)
{
- u32 offset;
- void __iomem *core_scratchpad_base;
- int ret;
-
- if (!gxp->firmwares[core])
- return -ENODEV;
-
- /* Load firmware to System RAM */
- ret = elf_load_segments(gxp,
- gxp->firmwares[core]->data + FW_HEADER_SIZE,
- gxp->firmwares[core]->size - FW_HEADER_SIZE,
- &gxp->fwbufs[core]);
- if (ret) {
- dev_err(gxp->dev, "Unable to load elf file\n");
- goto out_firmware_unload;
- }
+ void *mem;
+ size_t rw_size;
- memset(gxp->fwbufs[core].vaddr + AURORA_SCRATCHPAD_OFF, 0,
- AURORA_SCRATCHPAD_LEN);
+ if (vd && gxp_fw_data_use_per_vd_config(vd))
+ return vd->core_cfg.vaddr +
+ (vd->core_cfg.size / GXP_NUM_CORES) * core;
- core_scratchpad_base = gxp->fwbufs[core].vaddr + AURORA_SCRATCHPAD_OFF;
- offset = SCRATCHPAD_MSG_OFFSET(MSG_CORE_ALIVE);
- writel(0, core_scratchpad_base + offset);
- offset = SCRATCHPAD_MSG_OFFSET(MSG_TOP_ACCESS_OK);
- writel(0, core_scratchpad_base + offset);
+ if (!vd || !vd->rwdata_sgt[core])
+ return gxp->fwbufs[core].vaddr + AURORA_SCRATCHPAD_OFF;
- /* TODO(b/188970444): Cleanup logging of addresses */
- dev_notice(gxp->dev,
- "ELF loaded at virtual: %pK and physical: 0x%llx\n",
- gxp->fwbufs[core].vaddr, gxp->fwbufs[core].paddr);
+ /* Return the last AURORA_SCRATCHPAD_LEN of rwdata_sgt. */
+ mem = gcip_noncontiguous_sgt_to_mem(vd->rwdata_sgt[core]);
+ rw_size = gxp->fwbufs[core].size - vd->fw_ro_size;
+ return mem + rw_size - AURORA_SCRATCHPAD_LEN;
+}
- /* Configure bus performance monitors */
- gxp_bpm_configure(gxp, core, INST_BPM_OFFSET, BPM_EVENT_READ_XFER);
- gxp_bpm_configure(gxp, core, DATA_BPM_OFFSET, BPM_EVENT_WRITE_XFER);
+/* TODO(b/265562894): remove scratchpad region support */
+static void flush_scratchpad_region(struct gxp_dev *gxp,
+ struct gxp_virtual_device *vd, uint core)
+{
+ if (!vd || gxp_fw_data_use_per_vd_config(vd) || !vd->rwdata_sgt[core])
+ return;
+ dma_sync_sg_for_device(gxp->dev, vd->rwdata_sgt[core]->sgl,
+ vd->rwdata_sgt[core]->orig_nents,
+ DMA_BIDIRECTIONAL);
+}
- return 0;
+static void invalidate_scratchpad_region(struct gxp_dev *gxp,
+ struct gxp_virtual_device *vd,
+ uint core)
+{
+ if (!vd || gxp_fw_data_use_per_vd_config(vd) || !vd->rwdata_sgt[core])
+ return;
+ dma_sync_sg_for_cpu(gxp->dev, vd->rwdata_sgt[core]->sgl,
+ vd->rwdata_sgt[core]->orig_nents,
+ DMA_BIDIRECTIONAL);
+}
-out_firmware_unload:
- gxp_firmware_unload(gxp, core);
- return ret;
+static void reset_core_config_region(struct gxp_dev *gxp,
+ struct gxp_virtual_device *vd, uint core)
+{
+ struct gxp_host_control_region *core_cfg;
+
+ core_cfg = get_scratchpad_base(gxp, vd, core);
+ if (gxp_fw_data_use_per_vd_config(vd)) {
+ core_cfg->core_alive_magic = 0;
+ core_cfg->top_access_ok = 0;
+ core_cfg->boot_status = GXP_BOOT_STATUS_NONE;
+ gxp_firmware_set_boot_mode(gxp, vd, core,
+ GXP_BOOT_MODE_COLD_BOOT);
+ } else {
+ memset(core_cfg, 0, AURORA_SCRATCHPAD_LEN);
+ gxp_firmware_set_boot_mode(gxp, vd, core,
+ GXP_BOOT_MODE_REQUEST_COLD_BOOT);
+ }
}
-static int gxp_firmware_handshake(struct gxp_dev *gxp, uint core)
+static int gxp_firmware_handshake(struct gxp_dev *gxp,
+ struct gxp_virtual_device *vd, uint core,
+ uint phys_core)
{
- u32 offset;
- u32 expected_top_value;
- void __iomem *core_scratchpad_base;
+ u32 __maybe_unused expected_top_value;
+ /* Prevent the read loop below from being optimized. */
+ volatile struct gxp_host_control_region *core_cfg;
int ctr;
/* Wait for core to come up */
- dev_notice(gxp->dev, "Waiting for core %u to power up...\n", core);
+ dev_notice(gxp->dev, "Waiting for core %u to power up...\n", phys_core);
ctr = 1000;
while (ctr) {
- if (gxp_lpm_is_powered(gxp, core))
+ if (gxp_lpm_is_powered(gxp, CORE_TO_PSM(phys_core)))
break;
udelay(1 * GXP_TIME_DELAY_FACTOR);
ctr--;
@@ -348,9 +377,9 @@ static int gxp_firmware_handshake(struct gxp_dev *gxp, uint core)
/* Wait for 500ms. Then check if Q7 core is alive */
dev_notice(gxp->dev, "Waiting for core %u to respond...\n",
- core);
+ phys_core);
- core_scratchpad_base = gxp->fwbufs[core].vaddr + AURORA_SCRATCHPAD_OFF;
+ core_cfg = get_scratchpad_base(gxp, vd, core);
/*
* Currently, the hello_world FW writes a magic number
@@ -358,19 +387,29 @@ static int gxp_firmware_handshake(struct gxp_dev *gxp, uint core)
* space as an alive message
*/
ctr = 5000;
- offset = SCRATCHPAD_MSG_OFFSET(MSG_CORE_ALIVE);
+#if IS_ENABLED(CONFIG_GXP_TEST)
+ fake_gxp_firmware_flush_work_all();
+ /*
+ * As the fake firmware works are flushed, we don't have to busy-wait the response of
+ * the firmware. By setting @ctr to 1, just run the while loop below once for the code
+ * coverage.
+ */
+ ctr = 1;
+#endif
usleep_range(50 * GXP_TIME_DELAY_FACTOR, 60 * GXP_TIME_DELAY_FACTOR);
while (ctr--) {
- if (readl(core_scratchpad_base + offset) == Q7_ALIVE_MAGIC)
+ invalidate_scratchpad_region(gxp, vd, core);
+ if (core_cfg->core_alive_magic == Q7_ALIVE_MAGIC)
break;
usleep_range(1 * GXP_TIME_DELAY_FACTOR,
10 * GXP_TIME_DELAY_FACTOR);
}
- if (readl(core_scratchpad_base + offset) != Q7_ALIVE_MAGIC) {
- dev_err(gxp->dev, "Core %u did not respond!\n", core);
+ invalidate_scratchpad_region(gxp, vd, core);
+ if (core_cfg->core_alive_magic != Q7_ALIVE_MAGIC) {
+ dev_err(gxp->dev, "Core %u did not respond!\n", phys_core);
return -EIO;
}
- dev_notice(gxp->dev, "Core %u is alive!\n", core);
+ dev_notice(gxp->dev, "Core %u is alive!\n", phys_core);
#if !IS_ENABLED(CONFIG_GXP_GEM5)
/*
@@ -384,33 +423,89 @@ static int gxp_firmware_handshake(struct gxp_dev *gxp, uint core)
* handshakes in Gem5.
*/
ctr = 1000;
- offset = SCRATCHPAD_MSG_OFFSET(MSG_TOP_ACCESS_OK);
- expected_top_value = BIT(CORE_WAKEUP_DOORBELL(core));
+ expected_top_value = BIT(CORE_WAKEUP_DOORBELL(phys_core));
while (ctr--) {
- if (readl(core_scratchpad_base + offset) == expected_top_value)
+ invalidate_scratchpad_region(gxp, vd, core);
+ if (core_cfg->top_access_ok == expected_top_value)
break;
udelay(1 * GXP_TIME_DELAY_FACTOR);
}
- if (readl(core_scratchpad_base + offset) != expected_top_value) {
- dev_err(gxp->dev, "TOP access from core %u failed!\n", core);
+ if (core_cfg->top_access_ok != expected_top_value) {
+ dev_err(gxp->dev, "TOP access from core %u failed!\n", phys_core);
return -EIO;
}
- dev_notice(gxp->dev, "TOP access from core %u successful!\n", core);
+ dev_notice(gxp->dev, "TOP access from core %u successful!\n", phys_core);
#endif
/* Stop bus performance monitors */
- gxp_bpm_stop(gxp, core);
+ gxp_bpm_stop(gxp, phys_core);
dev_notice(gxp->dev, "Core%u Instruction read transactions: 0x%x\n",
- core, gxp_bpm_read_counter(gxp, core, INST_BPM_OFFSET));
- dev_notice(gxp->dev, "Core%u Data write transactions: 0x%x\n", core,
- gxp_bpm_read_counter(gxp, core, DATA_BPM_OFFSET));
+ core, gxp_bpm_read_counter(gxp, phys_core, INST_BPM_OFFSET));
+ dev_notice(gxp->dev, "Core%u Data write transactions: 0x%x\n",
+ phys_core,
+ gxp_bpm_read_counter(gxp, phys_core, DATA_BPM_OFFSET));
+
+ return 0;
+}
+
+static int
+gxp_firmware_load_into_memories(struct gxp_dev *gxp,
+ const struct firmware *firmwares[GXP_NUM_CORES])
+{
+ int core;
+ int ret;
+
+ for (core = 0; core < GXP_NUM_CORES; core++) {
+ /* Load firmware to System RAM */
+ if (FW_HEADER_SIZE > firmwares[core]->size) {
+ dev_err(gxp->dev,
+ "Invalid Core %u firmware Image size (%d > %zu)\n",
+ core, FW_HEADER_SIZE, firmwares[core]->size);
+ ret = -EINVAL;
+ goto error;
+ }
+ if ((firmwares[core]->size - FW_HEADER_SIZE) >
+ gxp->fwbufs[core].size) {
+ dev_err(gxp->dev,
+ "Core %u firmware image does not fit (%zu > %llu)\n",
+ core, firmwares[core]->size - FW_HEADER_SIZE,
+ gxp->fwbufs[core].size);
+ ret = -EINVAL;
+ goto error;
+ }
+ memcpy_toio(gxp->fwbufs[core].vaddr,
+ firmwares[core]->data + FW_HEADER_SIZE,
+ firmwares[core]->size - FW_HEADER_SIZE);
+ }
return 0;
+error:
+ /* Zero out firmware buffers if we got invalid size on any core. */
+ for (core -= 1; core >= 0; core--)
+ memset_io(gxp->fwbufs[core].vaddr, 0, gxp->fwbufs[core].size);
+ return ret;
}
-static void gxp_firmware_unload(struct gxp_dev *gxp, uint core)
+int gxp_firmware_rearrange_elf(struct gxp_dev *gxp,
+ const struct firmware *firmwares[GXP_NUM_CORES])
{
- /* NO-OP for now. */
+ int ret = 0;
+ uint core;
+
+ for (core = 0; core < GXP_NUM_CORES; core++) {
+ /* Re-arrange ELF firmware in System RAM */
+ ret = elf_load_segments(gxp,
+ firmwares[core]->data + FW_HEADER_SIZE,
+ firmwares[core]->size - FW_HEADER_SIZE,
+ &gxp->fwbufs[core]);
+ if (ret) {
+ dev_err(gxp->dev,
+ "Failed to parse ELF firmware on core %u\n",
+ core);
+ return ret;
+ }
+ }
+ return ret;
}
/* Helper function to parse name written to sysfs "load_dsp_firmware" node */
@@ -438,15 +533,10 @@ static ssize_t load_dsp_firmware_show(struct device *dev,
{
struct gxp_dev *gxp = dev_get_drvdata(dev);
ssize_t ret;
+ char *firmware_name = gxp_firmware_loader_get_core_fw_name(gxp);
- mutex_lock(&gxp->dsp_firmware_lock);
-
- ret = scnprintf(buf, PAGE_SIZE, "%s\n",
- gxp->firmware_name ? gxp->firmware_name :
- DSP_FIRMWARE_DEFAULT_PREFIX);
-
- mutex_unlock(&gxp->dsp_firmware_lock);
-
+ ret = scnprintf(buf, PAGE_SIZE, "%s\n", firmware_name);
+ kfree(firmware_name);
return ret;
}
@@ -455,10 +545,9 @@ static ssize_t load_dsp_firmware_store(struct device *dev,
const char *buf, size_t count)
{
struct gxp_dev *gxp = dev_get_drvdata(dev);
- const struct firmware *firmwares[GXP_NUM_CORES];
+ struct gxp_firmware_manager *mgr = gxp->firmware_mgr;
char *name_buf = NULL;
int ret;
- int core;
/*
* Lock the VD semaphore to ensure no core is executing the firmware
@@ -466,10 +555,10 @@ static ssize_t load_dsp_firmware_store(struct device *dev,
*/
down_read(&gxp->vd_semaphore);
- if (gxp->firmware_running) {
+ if (mgr->firmware_running) {
dev_warn(dev, "Cannot update firmware when any core is running\n");
ret = -EBUSY;
- goto out;
+ goto err_out;
}
name_buf = fw_name_from_attr_buf(buf);
@@ -477,45 +566,32 @@ static ssize_t load_dsp_firmware_store(struct device *dev,
dev_err(gxp->dev, "Invalid firmware prefix requested: %s\n",
buf);
ret = PTR_ERR(name_buf);
- goto out;
+ goto err_out;
}
- mutex_lock(&gxp->dsp_firmware_lock);
-
dev_notice(gxp->dev, "Requesting firmware be reloaded: %s\n", name_buf);
- ret = request_dsp_firmware(gxp, name_buf, firmwares);
+ /*
+ * It's possible a race condition bug here that someone opens a gxp
+ * device and loads the firmware between below unload/load functions in
+ * another thread, but this interface is only for developer debugging.
+ * We don't insist on preventing the race condition bug.
+ */
+ gxp_firmware_loader_unload(gxp);
+ gxp_firmware_loader_set_core_fw_name(gxp, name_buf);
+ ret = gxp_firmware_loader_load_if_needed(gxp);
if (ret) {
- dev_err(gxp->dev,
- "Failed to request firmwares with names \"%sX\" (ret=%d)\n",
- name_buf, ret);
- goto err_request_firmware;
+ dev_err(gxp->dev, "Failed to load core firmware: %s\n", name_buf);
+ goto err_firmware_load;
}
- ret = gxp_firmware_authenticate(gxp, firmwares);
- if (ret)
- goto err_authenticate_firmware;
-
- for (core = 0; core < GXP_NUM_CORES; core++) {
- if (gxp->firmwares[core])
- release_firmware(gxp->firmwares[core]);
- gxp->firmwares[core] = firmwares[core];
- }
-
- kfree(gxp->firmware_name);
- gxp->firmware_name = name_buf;
-
- mutex_unlock(&gxp->dsp_firmware_lock);
-out:
+ kfree(name_buf);
up_read(&gxp->vd_semaphore);
return count;
-err_authenticate_firmware:
- for (core = 0; core < GXP_NUM_CORES; core++)
- release_firmware(firmwares[core]);
-err_request_firmware:
+err_firmware_load:
kfree(name_buf);
- mutex_unlock(&gxp->dsp_firmware_lock);
+err_out:
up_read(&gxp->vd_semaphore);
return ret;
}
@@ -537,6 +613,12 @@ int gxp_fw_init(struct gxp_dev *gxp)
uint core;
struct resource r;
int ret;
+ struct gxp_firmware_manager *mgr;
+
+ mgr = devm_kzalloc(gxp->dev, sizeof(*mgr), GFP_KERNEL);
+ if (!mgr)
+ return -ENOMEM;
+ gxp->firmware_mgr = mgr;
/* Power on BLK_AUR to read the revision and processor ID registers */
gxp_pm_blk_on(gxp);
@@ -545,7 +627,7 @@ int gxp_fw_init(struct gxp_dev *gxp)
dev_notice(gxp->dev, "Aurora version: 0x%x\n", ver);
for (core = 0; core < GXP_NUM_CORES; core++) {
- proc_id = gxp_read_32_core(gxp, core, GXP_REG_PROCESSOR_ID);
+ proc_id = gxp_read_32(gxp, GXP_CORE_REG_PROCESSOR_ID(core));
dev_notice(gxp->dev, "Aurora core %u processor ID: 0x%x\n",
core, proc_id);
}
@@ -609,7 +691,7 @@ int gxp_fw_init(struct gxp_dev *gxp)
if (ret)
goto out_fw_destroy;
- gxp->firmware_running = 0;
+ mgr->firmware_running = 0;
return 0;
out_fw_destroy:
@@ -620,6 +702,10 @@ out_fw_destroy:
void gxp_fw_destroy(struct gxp_dev *gxp)
{
uint core;
+ struct gxp_firmware_manager *mgr = gxp->firmware_mgr;
+
+ if (IS_GXP_TEST && !mgr)
+ return;
device_remove_group(gxp->dev, &gxp_firmware_attr_group);
@@ -628,74 +714,89 @@ void gxp_fw_destroy(struct gxp_dev *gxp)
memunmap(gxp->fwbufs[core].vaddr);
gxp->fwbufs[core].vaddr = NULL;
}
-
- if (gxp->firmwares[core]) {
- release_firmware(gxp->firmwares[core]);
- gxp->firmwares[core] = NULL;
- }
}
-
- kfree(gxp->firmware_name);
}
-int gxp_firmware_request_if_needed(struct gxp_dev *gxp)
+int gxp_firmware_load_core_firmware(
+ struct gxp_dev *gxp, char *name_prefix,
+ const struct firmware *core_firmware[GXP_NUM_CORES])
{
- int ret = 0;
uint core;
+ int ret;
- mutex_lock(&gxp->dsp_firmware_lock);
-
- if (gxp->is_firmware_requested)
- goto out;
-
- ret = request_dsp_firmware(gxp, DSP_FIRMWARE_DEFAULT_PREFIX,
- gxp->firmwares);
+ if (name_prefix == NULL)
+ name_prefix = DSP_FIRMWARE_DEFAULT_PREFIX;
+ ret = request_dsp_firmware(gxp, name_prefix, core_firmware);
if (ret)
- goto out;
-
- ret = gxp_firmware_authenticate(gxp, gxp->firmwares);
+ return ret;
+ ret = gxp_firmware_load_into_memories(gxp, core_firmware);
if (ret)
- goto err_authenticate_firmware;
-
- gxp->is_firmware_requested = true;
-
-out:
- mutex_unlock(&gxp->dsp_firmware_lock);
- return ret;
+ goto error;
+ ret = gxp_firmware_authenticate(gxp, core_firmware);
+ if (ret)
+ goto error;
-err_authenticate_firmware:
+ return 0;
+error:
for (core = 0; core < GXP_NUM_CORES; core++) {
- release_firmware(gxp->firmwares[core]);
- gxp->firmwares[core] = NULL;
+ release_firmware(core_firmware[core]);
+ core_firmware[core] = NULL;
}
- mutex_unlock(&gxp->dsp_firmware_lock);
return ret;
}
-static int gxp_firmware_setup(struct gxp_dev *gxp, uint core)
+/* TODO(b/253464747): Refactor these interrupts handlers and gxp-doorbell.c. */
+static void enable_core_interrupts(struct gxp_dev *gxp, uint core)
+{
+ /*
+ * GXP_CORE_REG_COMMON_INT_MASK_0 is handled in doorbell module, so we
+ * don't need to enable it here.
+ */
+ gxp_write_32(gxp, GXP_CORE_REG_COMMON_INT_MASK_1(core), 0xffffffff);
+ gxp_write_32(gxp, GXP_CORE_REG_DEDICATED_INT_MASK(core), 0xffffffff);
+}
+
+void gxp_firmware_disable_ext_interrupts(struct gxp_dev *gxp, uint core)
+{
+ gxp_write_32(gxp, GXP_CORE_REG_COMMON_INT_MASK_0(core), 0);
+ gxp_write_32(gxp, GXP_CORE_REG_COMMON_INT_MASK_1(core), 0);
+ gxp_write_32(gxp, GXP_CORE_REG_DEDICATED_INT_MASK(core), 0);
+}
+
+static inline uint select_core(struct gxp_virtual_device *vd, uint virt_core,
+ uint phys_core)
+{
+ return gxp_fw_data_use_per_vd_config(vd) ? virt_core : phys_core;
+}
+
+static int gxp_firmware_setup(struct gxp_dev *gxp,
+ struct gxp_virtual_device *vd, uint core,
+ uint phys_core)
{
int ret = 0;
+ struct gxp_firmware_manager *mgr = gxp->firmware_mgr;
- if (gxp->firmware_running & BIT(core)) {
+ if (gxp_core_boot(gxp) && mgr->firmware_running & BIT(phys_core)) {
dev_err(gxp->dev, "Firmware is already running on core %u\n",
- core);
+ phys_core);
return -EBUSY;
}
- ret = gxp_firmware_load(gxp, core);
- if (ret) {
- dev_err(gxp->dev, "Failed to load firmware on core %u\n", core);
- return ret;
- }
+ /* Configure bus performance monitors */
+ gxp_bpm_configure(gxp, phys_core, INST_BPM_OFFSET, BPM_EVENT_READ_XFER);
+ gxp_bpm_configure(gxp, phys_core, DATA_BPM_OFFSET, BPM_EVENT_WRITE_XFER);
/* Mark this as a cold boot */
- gxp_firmware_set_boot_mode(gxp, core, GXP_BOOT_MODE_REQUEST_COLD_BOOT);
-
- ret = gxp_firmware_setup_hw_after_block_off(gxp, core,
- /*verbose=*/true);
- if (ret) {
- dev_err(gxp->dev, "Failed to power up core %u\n", core);
- gxp_firmware_unload(gxp, core);
+ if (gxp_core_boot(gxp)) {
+ reset_core_config_region(gxp, vd, core);
+ ret = gxp_firmware_setup_hw_after_block_off(gxp, core,
+ phys_core,
+ /*verbose=*/true);
+ if (ret) {
+ dev_err(gxp->dev, "Failed to power up core %u\n", core);
+ return ret;
+ }
+ enable_core_interrupts(gxp, phys_core);
}
return ret;
@@ -719,104 +820,135 @@ static void gxp_firmware_wakeup_cores(struct gxp_dev *gxp, uint core_list)
static int gxp_firmware_finish_startup(struct gxp_dev *gxp,
struct gxp_virtual_device *vd,
- uint virt_core, uint core)
+ uint virt_core, uint phys_core)
{
- int ret;
struct work_struct *work;
+ struct gxp_firmware_manager *mgr = gxp->firmware_mgr;
+ int ret = 0;
+ uint core = select_core(vd, virt_core, phys_core);
- ret = gxp_firmware_handshake(gxp, core);
- if (ret) {
- dev_err(gxp->dev, "Firmware handshake failed on core %u\n",
- core);
- gxp_pm_core_off(gxp, core);
- goto out_firmware_unload;
- }
+ if (gxp_core_boot(gxp)) {
+ ret = gxp_firmware_handshake(gxp, vd, core, phys_core);
+ if (ret) {
+ dev_err(gxp->dev,
+ "Firmware handshake failed on core %u\n",
+ phys_core);
+ goto err_firmware_off;
+ }
- /* Initialize mailbox */
- gxp->mailbox_mgr->mailboxes[core] =
- gxp_mailbox_alloc(gxp->mailbox_mgr, vd, virt_core, core);
- if (IS_ERR(gxp->mailbox_mgr->mailboxes[core])) {
- dev_err(gxp->dev,
- "Unable to allocate mailbox (core=%u, ret=%ld)\n", core,
- PTR_ERR(gxp->mailbox_mgr->mailboxes[core]));
- ret = PTR_ERR(gxp->mailbox_mgr->mailboxes[core]);
- gxp->mailbox_mgr->mailboxes[core] = NULL;
- goto out_firmware_unload;
+ /* Initialize mailbox */
+ if (gxp->mailbox_mgr->allocate_mailbox) {
+ gxp->mailbox_mgr->mailboxes[phys_core] =
+ gxp->mailbox_mgr->allocate_mailbox(
+ gxp->mailbox_mgr, vd, virt_core, phys_core);
+ if (IS_ERR(gxp->mailbox_mgr->mailboxes[phys_core])) {
+ dev_err(gxp->dev,
+ "Unable to allocate mailbox (core=%u, ret=%ld)\n",
+ phys_core,
+ PTR_ERR(gxp->mailbox_mgr
+ ->mailboxes[phys_core]));
+ ret = PTR_ERR(
+ gxp->mailbox_mgr->mailboxes[phys_core]);
+ gxp->mailbox_mgr->mailboxes[phys_core] = NULL;
+ goto err_firmware_off;
+ }
+ }
+ mgr->firmware_running |= BIT(phys_core);
}
- work = gxp_debug_dump_get_notification_handler(gxp, core);
+ work = gxp_debug_dump_get_notification_handler(gxp, phys_core);
if (work)
gxp_notification_register_handler(
- gxp, core, HOST_NOTIF_DEBUG_DUMP_READY, work);
+ gxp, phys_core, HOST_NOTIF_DEBUG_DUMP_READY, work);
- work = gxp_telemetry_get_notification_handler(gxp, core);
+ work = gxp_core_telemetry_get_notification_handler(gxp, phys_core);
if (work)
gxp_notification_register_handler(
- gxp, core, HOST_NOTIF_TELEMETRY_STATUS, work);
-
- gxp->firmware_running |= BIT(core);
+ gxp, phys_core, HOST_NOTIF_CORE_TELEMETRY_STATUS, work);
return ret;
-out_firmware_unload:
- gxp_firmware_unload(gxp, core);
+err_firmware_off:
+ if (gxp_core_boot(gxp))
+ gxp_pm_core_off(gxp, phys_core);
return ret;
}
static void gxp_firmware_stop_core(struct gxp_dev *gxp,
struct gxp_virtual_device *vd,
- uint virt_core, uint core)
+ uint virt_core, uint phys_core)
{
- if (!(gxp->firmware_running & BIT(core)))
- dev_err(gxp->dev, "Firmware is not running on core %u\n", core);
+ struct gxp_firmware_manager *mgr = gxp->firmware_mgr;
- gxp->firmware_running &= ~BIT(core);
+ if (gxp_core_boot(gxp) && !(mgr->firmware_running & BIT(phys_core)))
+ dev_err(gxp->dev, "Firmware is not running on core %u\n",
+ phys_core);
- gxp_notification_unregister_handler(gxp, core,
- HOST_NOTIF_DEBUG_DUMP_READY);
- gxp_notification_unregister_handler(gxp, core,
- HOST_NOTIF_TELEMETRY_STATUS);
+ mgr->firmware_running &= ~BIT(phys_core);
- gxp_mailbox_release(gxp->mailbox_mgr, vd, virt_core,
- gxp->mailbox_mgr->mailboxes[core]);
- dev_notice(gxp->dev, "Mailbox %u released\n", core);
+ gxp_notification_unregister_handler(gxp, phys_core,
+ HOST_NOTIF_DEBUG_DUMP_READY);
+ gxp_notification_unregister_handler(gxp, phys_core,
+ HOST_NOTIF_CORE_TELEMETRY_STATUS);
+
+ if (gxp_core_boot(gxp)) {
+ if (gxp->mailbox_mgr->release_mailbox) {
+ gxp->mailbox_mgr->release_mailbox(
+ gxp->mailbox_mgr, vd, virt_core,
+ gxp->mailbox_mgr->mailboxes[phys_core]);
+ dev_notice(gxp->dev, "Mailbox %u released\n",
+ phys_core);
+ }
- if (vd->state == GXP_VD_RUNNING)
- gxp_pm_core_off(gxp, core);
- gxp_firmware_unload(gxp, core);
+ if (vd->state == GXP_VD_RUNNING) {
+ /*
+ * Disable interrupts to prevent cores from being woken up
+ * unexpectedly.
+ */
+ gxp_firmware_disable_ext_interrupts(gxp, phys_core);
+ gxp_pm_core_off(gxp, phys_core);
+ }
+ }
}
int gxp_firmware_run(struct gxp_dev *gxp, struct gxp_virtual_device *vd,
uint core_list)
{
int ret;
- uint core, virt_core;
+ uint phys_core, virt_core;
uint failed_cores = 0;
int failed_ret;
- for (core = 0; core < GXP_NUM_CORES; core++) {
- if (core_list & BIT(core)) {
- ret = gxp_firmware_setup(gxp, core);
- if (ret) {
- failed_cores |= BIT(core);
- failed_ret = ret;
- dev_err(gxp->dev, "Failed to run firmware on core %u\n",
- core);
- }
+ virt_core = 0;
+ for (phys_core = 0; phys_core < GXP_NUM_CORES; phys_core++) {
+ uint core = select_core(vd, virt_core, phys_core);
+
+ if (!(core_list & BIT(phys_core)))
+ continue;
+
+ ret = gxp_firmware_setup(gxp, vd, core, phys_core);
+ if (ret) {
+ failed_cores |= BIT(phys_core);
+ failed_ret = ret;
+ dev_err(gxp->dev, "Failed to run firmware on core %u\n",
+ phys_core);
}
+ virt_core++;
}
if (failed_cores != 0) {
/*
* Shut down the cores which call `gxp_firmware_setup`
* successfully
*/
- for (core = 0; core < GXP_NUM_CORES; core++) {
- if (core_list & BIT(core)) {
- if (!(failed_cores & BIT(core))) {
- gxp_pm_core_off(gxp, core);
- gxp_firmware_unload(gxp, core);
- }
+ virt_core = 0;
+ for (phys_core = 0; phys_core < GXP_NUM_CORES; phys_core++) {
+ if (!(core_list & BIT(phys_core)))
+ continue;
+ if (!(failed_cores & BIT(phys_core))) {
+ if (gxp_core_boot(gxp))
+ gxp_pm_core_off(gxp, phys_core);
}
+ virt_core++;
}
return failed_ret;
}
@@ -826,57 +958,58 @@ int gxp_firmware_run(struct gxp_dev *gxp, struct gxp_virtual_device *vd,
* gxp_doorbell_enable_for_core here to set GXP_REG_COMMON_INT_MASK_0
* first to enable the firmware handshakes.
*/
- for (core = 0; core < GXP_NUM_CORES; core++) {
- if (!(core_list & BIT(core)))
+ for (phys_core = 0; phys_core < GXP_NUM_CORES; phys_core++) {
+ if (!(core_list & BIT(phys_core)))
continue;
- gxp_doorbell_enable_for_core(gxp, CORE_WAKEUP_DOORBELL(core),
- core);
+ gxp_doorbell_enable_for_core(
+ gxp, CORE_WAKEUP_DOORBELL(phys_core), phys_core);
}
#endif
/* Switch clock mux to the normal state to guarantee LPM works */
- gxp_pm_force_clkmux_normal(gxp);
- gxp_firmware_wakeup_cores(gxp, core_list);
+ if (gxp_core_boot(gxp)) {
+ gxp_pm_force_clkmux_normal(gxp);
+ gxp_firmware_wakeup_cores(gxp, core_list);
+ }
+
virt_core = 0;
- for (core = 0; core < GXP_NUM_CORES; core++) {
- if (core_list & BIT(core)) {
- ret = gxp_firmware_finish_startup(gxp, vd, virt_core,
- core);
- if (ret) {
- failed_cores |= BIT(core);
- dev_err(gxp->dev,
- "Failed to run firmware on core %u\n",
- core);
- }
- virt_core++;
+ for (phys_core = 0; phys_core < GXP_NUM_CORES; phys_core++) {
+ if (!(core_list & BIT(phys_core)))
+ continue;
+ ret = gxp_firmware_finish_startup(gxp, vd, virt_core,
+ phys_core);
+ if (ret) {
+ failed_cores |= BIT(phys_core);
+ dev_err(gxp->dev, "Failed to run firmware on core %u\n",
+ phys_core);
}
+ virt_core++;
}
if (failed_cores != 0) {
virt_core = 0;
- for (core = 0; core < GXP_NUM_CORES; core++) {
- if (core_list & BIT(core)) {
- if (!(failed_cores & BIT(core))) {
- gxp_firmware_stop_core(gxp, vd,
- virt_core, core);
- }
- virt_core++;
- }
+ for (phys_core = 0; phys_core < GXP_NUM_CORES; phys_core++) {
+ if (!(core_list & BIT(phys_core)))
+ continue;
+ if (!(failed_cores & BIT(phys_core)))
+ gxp_firmware_stop_core(gxp, vd, virt_core,
+ phys_core);
+ virt_core++;
}
}
/* Check if we need to set clock mux to low state as requested */
- gxp_pm_resume_clkmux(gxp);
+ if (gxp_core_boot(gxp))
+ gxp_pm_resume_clkmux(gxp);
return ret;
}
int gxp_firmware_setup_hw_after_block_off(struct gxp_dev *gxp, uint core,
- bool verbose)
+ uint phys_core, bool verbose)
{
- gxp_program_reset_vector(gxp, core, verbose);
- return gxp_pm_core_on(gxp, core, verbose);
+ gxp_program_reset_vector(gxp, core, phys_core, verbose);
+ return gxp_pm_core_on(gxp, phys_core, verbose);
}
-
void gxp_firmware_stop(struct gxp_dev *gxp, struct gxp_virtual_device *vd,
uint core_list)
{
@@ -890,30 +1023,55 @@ void gxp_firmware_stop(struct gxp_dev *gxp, struct gxp_virtual_device *vd,
}
}
-void gxp_firmware_set_boot_mode(struct gxp_dev *gxp, uint core, u32 mode)
+void gxp_firmware_set_boot_mode(struct gxp_dev *gxp,
+ struct gxp_virtual_device *vd, uint core,
+ u32 mode)
{
- void __iomem *boot_mode_addr;
+ struct gxp_host_control_region *core_cfg;
/* Callers shouldn't call the function under this condition. */
if (!gxp->fwbufs[core].vaddr)
return;
- boot_mode_addr = gxp->fwbufs[core].vaddr + AURORA_SCRATCHPAD_OFF +
- SCRATCHPAD_MSG_OFFSET(MSG_BOOT_MODE);
-
- writel(mode, boot_mode_addr);
+ core_cfg = get_scratchpad_base(gxp, vd, core);
+ core_cfg->boot_mode = mode;
+ flush_scratchpad_region(gxp, vd, core);
}
-u32 gxp_firmware_get_boot_mode(struct gxp_dev *gxp, uint core)
+u32 gxp_firmware_get_boot_mode(struct gxp_dev *gxp,
+ struct gxp_virtual_device *vd, uint core)
{
- void __iomem *boot_mode_addr;
+ struct gxp_host_control_region *core_cfg;
/* Callers shouldn't call the function under this condition. */
if (!gxp->fwbufs[core].vaddr)
return 0;
- boot_mode_addr = gxp->fwbufs[core].vaddr + AURORA_SCRATCHPAD_OFF +
- SCRATCHPAD_MSG_OFFSET(MSG_BOOT_MODE);
+ core_cfg = get_scratchpad_base(gxp, vd, core);
+ invalidate_scratchpad_region(gxp, vd, core);
+ return core_cfg->boot_mode;
+}
+
+void gxp_firmware_set_boot_status(struct gxp_dev *gxp,
+ struct gxp_virtual_device *vd, uint core,
+ u32 status)
+{
+ struct gxp_host_control_region *core_cfg;
+
+ core_cfg = get_scratchpad_base(gxp, vd, core);
+ core_cfg->boot_status = status;
+}
- return readl(boot_mode_addr);
+u32 gxp_firmware_get_boot_status(struct gxp_dev *gxp,
+ struct gxp_virtual_device *vd, uint core)
+{
+ struct gxp_host_control_region *core_cfg;
+
+ core_cfg = get_scratchpad_base(gxp, vd, core);
+ return core_cfg->boot_status;
+}
+
+bool gxp_core_boot(struct gxp_dev *gxp)
+{
+ return gxp_is_direct_mode(gxp);
}
diff --git a/gxp-firmware.h b/gxp-firmware.h
index 008af5a..e1f44ce 100644
--- a/gxp-firmware.h
+++ b/gxp-firmware.h
@@ -8,14 +8,27 @@
#define __GXP_FIRMWARE_H__
#include <linux/bitops.h>
+#include <linux/sizes.h>
+#include <gcip/gcip-image-config.h>
+
+#include "gxp-config.h"
#include "gxp-internal.h"
#if !IS_ENABLED(CONFIG_GXP_TEST)
+#ifdef CHIP_AURORA_SCRATCHPAD_OFF
+
+#define AURORA_SCRATCHPAD_OFF CHIP_AURORA_SCRATCHPAD_OFF
+#define AURORA_SCRATCHPAD_LEN CHIP_AURORA_SCRATCHPAD_LEN
+
+#else /* CHIP_AURORA_SCRATCHPAD_OFF */
+
#define AURORA_SCRATCHPAD_OFF 0x000FF000 /* Last 4KB of ELF load region */
#define AURORA_SCRATCHPAD_LEN 0x00001000 /* 4KB */
+#endif /* CHIP_AURORA_SCRATCHPAD_OFF */
+
#else /* CONFIG_GXP_TEST */
/* Firmware memory is shrunk in unit tests. */
#define AURORA_SCRATCHPAD_OFF 0x000F0000
@@ -27,6 +40,21 @@
#define SCRATCHPAD_MSG_OFFSET(_msg_) (_msg_ << 2)
+#define PRIVATE_FW_DATA_SIZE SZ_2M
+#define SHARED_FW_DATA_SIZE SZ_1M
+
+/* Indexes same as image_config.IommuMappingIdx in the firmware side. */
+enum gxp_imgcfg_idx {
+ CORE_CFG_REGION_IDX,
+ VD_CFG_REGION_IDX,
+ SYS_CFG_REGION_IDX,
+};
+
+struct gxp_firmware_manager {
+ /* Firmware status bitmap. Accessors must hold `vd_semaphore`. */
+ u32 firmware_running;
+};
+
enum aurora_msg {
MSG_CORE_ALIVE,
MSG_TOP_ACCESS_OK,
@@ -37,11 +65,11 @@ enum aurora_msg {
/* The caller must have locked gxp->vd_semaphore for reading. */
static inline bool gxp_is_fw_running(struct gxp_dev *gxp, uint core)
{
- return (gxp->firmware_running & BIT(core)) != 0;
+ return (gxp->firmware_mgr->firmware_running & BIT(core)) != 0;
}
/*
- * Initializes the firmware loading/unloading subsystem. This includes
+ * Initializes the core firmware loading/unloading subsystem. This includes
* initializing the LPM and obtaining the memory regions needed to load the FW.
* The function needs to be called once after a block power up event.
*/
@@ -54,21 +82,32 @@ int gxp_fw_init(struct gxp_dev *gxp);
void gxp_fw_destroy(struct gxp_dev *gxp);
/*
- * Check if the DSP firmware files have been requested yet, and if not, request
- * them.
+ * Requests and loads core firmware into memories.
+ * If the loaded firmware is ELF, rearranges it.
+ *
+ * Returns 0 on success, a negative errno on failure.
+ */
+int gxp_firmware_load_core_firmware(
+ struct gxp_dev *gxp, char *name_prefix,
+ const struct firmware *core_firmwares[GXP_NUM_CORES]);
+
+/*
+ * Rearranges firmware data if the firmware is ELF.
*
- * Returns 0 if the files have already been requested or were successfully
- * requested by this call; Returns an errno if this call attempted to request
- * the files and it failed.
+ * Returns 0 on success, a negative errno on failure.
*/
-int gxp_firmware_request_if_needed(struct gxp_dev *gxp);
+int gxp_firmware_rearrange_elf(struct gxp_dev *gxp,
+ const struct firmware *firmwares[GXP_NUM_CORES]);
/*
* Re-program the reset vector and power on the core's LPM if the block had
* been shut down.
+ *
+ * @core should be virt core when using per-VD config method, otherwise should
+ * be phys core.
*/
int gxp_firmware_setup_hw_after_block_off(struct gxp_dev *gxp, uint core,
- bool verbose);
+ uint phys_core, bool verbose);
/*
* Loads the firmware for the cores in system memory and powers up the cores
@@ -87,12 +126,37 @@ void gxp_firmware_stop(struct gxp_dev *gxp, struct gxp_virtual_device *vd,
* Sets the specified core's boot mode or suspend request value.
* This function should be called only after the firmware has been run.
*/
-void gxp_firmware_set_boot_mode(struct gxp_dev *gxp, uint core, u32 mode);
+void gxp_firmware_set_boot_mode(struct gxp_dev *gxp,
+ struct gxp_virtual_device *vd, uint core,
+ u32 mode);
/*
* Returns the specified core's boot mode or boot status.
* This function should be called only after the firmware has been run.
*/
-u32 gxp_firmware_get_boot_mode(struct gxp_dev *gxp, uint core);
+u32 gxp_firmware_get_boot_mode(struct gxp_dev *gxp,
+ struct gxp_virtual_device *vd, uint core);
+
+/*
+ * Sets the specified core's boot status or suspend request value.
+ */
+void gxp_firmware_set_boot_status(struct gxp_dev *gxp,
+ struct gxp_virtual_device *vd, uint core,
+ u32 status);
+
+/*
+ * Returns the specified core's boot status or boot status.
+ * This function should be called only after the firmware has been run.
+ */
+u32 gxp_firmware_get_boot_status(struct gxp_dev *gxp,
+ struct gxp_virtual_device *vd, uint core);
+
+/* Returns whether the core firmware running states are managed by us. */
+bool gxp_core_boot(struct gxp_dev *gxp);
+
+/*
+ * Disable external interrupts to core.
+ */
+void gxp_firmware_disable_ext_interrupts(struct gxp_dev *gxp, uint core);
#endif /* __GXP_FIRMWARE_H__ */
diff --git a/gxp-host-device-structs.h b/gxp-host-device-structs.h
index 8e4723c..efb39a9 100644
--- a/gxp-host-device-structs.h
+++ b/gxp-host-device-structs.h
@@ -11,19 +11,98 @@
* headers or data structures.
*
*/
+
#ifndef __GXP_HOST_DEVICE_STRUCTURES_H__
#define __GXP_HOST_DEVICE_STRUCTURES_H__
#define MAX_NUM_CORES 4
-#define NUM_SYSTEM_SEMAPHORES 64
-/* Bit masks for the status fields in the telemetry structures. */
-/* The telemetry buffers have been setup by the host. */
-#define GXP_TELEMETRY_HOST_STATUS_ENABLED (1 << 0)
-/* The telemetry buffers are being used by the device. */
-#define GXP_TELEMETRY_DEVICE_STATUS_ENABLED (1 << 0)
+/* The number of physical doorbells and sync barriers allocated to each VD */
+#define GXP_NUM_DOORBELLS_PER_VD 7
+#define GXP_NUM_SYNC_BARRIERS_PER_VD 4
+
+/* The first allowed doorbell and sync barrier to be used for VDs' usage */
+#define GXP_DOORBELLS_START 4 /* The first 4 are used for boot */
+#define GXP_SYNC_BARRIERS_START 1 /* The first 1 is used for UART */
+
+/* Definitions for host->device boot mode requests */
+/*
+ * No boot action is needed. This is a valid mode once a core is running.
+ * However, it's an invalid state when a FW is powering on. The DSP core will
+ * write it to the boot mode register once it starts a transition.
+ * This is helpful in case the core reboots/crashes while performing the
+ * transition so it doesn't get stuck in a boot loop.
+ */
+#define GXP_BOOT_MODE_NONE 0
+
+/*
+ * Request that the core performs a normal cold boot on the next power-on event.
+ * This does not actually wake the core up, but is required before powering the
+ * core up if cold boot is desired.
+ * Core power-on could be performed using any wake-up source like the doorbells.
+ * Upon success, the boot status should be GXP_BOOT_STATUS_ACTIVE.
+ */
+#define GXP_BOOT_MODE_COLD_BOOT 1
+
+/*
+ * Request that the core suspends on the next suspend signal arrival. This does
+ * not trigger a suspend operation. A subsequent mailbox command or notification
+ * is needed to trigger the actual transition. Upon success, the boot status
+ * should be GXP_BOOT_STATUS_SUSPENDED.
+ */
+#define GXP_BOOT_MODE_SUSPEND 2
+
+/*
+ * Request that the core to preempt the active workload on the next suspend
+ * signal arrival.Upon success, the boot status should be
+ * GXP_BOOT_STATUS_SUSPENDED.
+ */
+#define GXP_BOOT_MODE_PREEMPT 3
+
+/*
+ * Request the core resumes on the next power on-event. This does not trigger a
+ * resume operation, but is required before powering the core up if warm
+ * boot/resume is desired.
+ * Core power-on could be performed using any wake-up source like direct LPM
+ * transition into PS0. Upon success, the boot status should be
+ * GXP_BOOT_STATUS_ACTIVE
+ */
+#define GXP_BOOT_MODE_RESUME 4
+
+/*
+ * Request the core shutdown. A subsequent mailbox command or notification
+ * is needed to trigger the actual transition. Upon success, the boot status
+ * should be GXP_BOOT_STATUS_OFF.
+ */
+#define GXP_BOOT_MODE_SHUTDOWN 5
+
+/* Definitions for host->device boot status */
+/* Initial status */
+#define GXP_BOOT_STATUS_NONE 0
+
+/* Final status */
+#define GXP_BOOT_STATUS_ACTIVE 1
+#define GXP_BOOT_STATUS_SUSPENDED 2
+#define GXP_BOOT_STATUS_OFF 3
+
+/* Transition status */
+#define GXP_BOOT_STATUS_INVALID_MODE 4
+#define GXP_BOOT_STATUS_BOOTING 5
+#define GXP_BOOT_STATUS_BOOTING_FAILED 6
+#define GXP_BOOT_STATUS_SUSPENDING 7
+#define GXP_BOOT_STATUS_SUSPENDING_FAILED 8
+#define GXP_BOOT_STATUS_SUSPENDING_FAILED_ACTIVE_WL 9
+#define GXP_BOOT_STATUS_WAITING_FOR_WORKLOAD 10
+#define GXP_BOOT_STATUS_WAITING_FOR_DMA 11
+#define GXP_BOOT_STATUS_SHUTTING_DOWN 12
+
+/* Bit masks for the status fields in the core telemetry structures. */
+/* The core telemetry buffers have been setup by the host. */
+#define GXP_CORE_TELEMETRY_HOST_STATUS_ENABLED (1 << 0)
+/* The core telemetry buffers are being used by the device. */
+#define GXP_CORE_TELEMETRY_DEVICE_STATUS_ENABLED (1 << 0)
/* There was an attempt to use the buffers but their content was invalid. */
-#define GXP_TELEMETRY_DEVICE_STATUS_SANITY_CHECK_FAILED (1 << 1)
+#define GXP_CORE_TELEMETRY_DEVICE_STATUS_SANITY_CHECK_FAILED (1 << 1)
/* Definitions for host->device boot mode requests */
/*
@@ -69,81 +148,21 @@
/* Invalid boot mode request code */
#define GXP_BOOT_MODE_STATUS_INVALID_MODE 10
-/* A structure describing the state of the doorbells on the system. */
-struct gxp_doorbells_descriptor {
- /* The app this descriptor belongs to. */
- uint32_t application_id;
- /* The physical ID of the sync barrier protecting this region. */
- uint32_t protection_barrier;
- /* The number of doorbells described in this region. */
- uint32_t num_items;
- /* The list of doorbells available for usage. */
- struct dooorbell_metadata_t {
- /*
- * The number of users using this doorbell. 0 when it's
- * available.
- */
- uint32_t users_count;
- /* The 0-based index of the doorbell described by this entry. */
- uint32_t hw_doorbell_idx;
- } doorbells[];
-};
-
-/* A structure describing the state of the sync barriers on the system. */
-struct gxp_sync_barriers_descriptor {
- /* The app this descriptor belongs to. */
- uint32_t application_id;
- /* The physical ID of the sync barrier protecting this region. */
- uint32_t protection_barrier;
- /* The number of sync barriers described in this region. */
- uint32_t num_items;
- /* The list of sync barriers available for usage. */
- struct sync_barrier_metadata_t {
- /*
- * The number of users using this barrier. 0 when it's
- * available.
- */
- uint32_t users_count;
- /*
- * The 0-based index of the sync barrier described by this
- * entry.
- */
- uint32_t hw_barrier_idx;
- } barriers[];
-};
-
-/* A structure describing the state of the watchdog on the system. */
-struct gxp_watchdog_descriptor {
- /* The physical ID of the sync barrier protecting this region. */
- uint32_t protection_barrier;
- /*
- * The number of timer ticks before the watchdog expires.
- * This is in units of 244.14 ns.
- */
- uint32_t target_value;
- /* A bit mask of the cores expected to tickle the watchdog. */
- uint32_t participating_cores;
- /* A bit mask of the cores that have tickled the watchdog. */
- uint32_t responded_cores;
- /* A flag indicating whether or not the watchdog has tripped. */
- uint32_t tripped;
-};
-
/*
- * A structure describing the telemetry (logging and tracing) parameters and
- * buffers.
+ * A structure describing the core telemetry (logging and tracing) parameters
+ * and buffers.
*/
-struct gxp_telemetry_descriptor {
- /* A struct for describing the parameters for telemetry buffers */
- struct telemetry_descriptor {
+struct gxp_core_telemetry_descriptor {
+ /* A struct for describing the parameters for core telemetry buffers. */
+ struct core_telemetry_descriptor {
/*
- * The telemetry status from the host's point of view. See the
- * top of the file for the appropriate flags.
+ * The core telemetry status from the host's point of view. See
+ * the top of the file for the appropriate flags.
*/
uint32_t host_status;
/*
- * The telemetry status from the device point of view. See the
- * top of the file for the appropriate flags.
+ * The core telemetry status from the device point of view. See
+ * the top of the file for the appropriate flags.
*/
uint32_t device_status;
/*
@@ -160,168 +179,177 @@ struct gxp_telemetry_descriptor {
};
/*
- * A structure describing the state and allocations of the SW-based semaphores
- * on the system.
+ * A structure for describing the state of the job this worker core is part of.
+ * This struct is expected to change per dispatch/context switch/preepmtion as
+ * it describes the HW resources, FW IDs, and other parameters that may change
+ * across job dispatches.
+ * It also establishes a slot used for the various HW resources this VD is
+ * expected to use.
+ * Each FW in a VD is expected to be provided its own copy of this structure
+ * based on the job that it's part of.
*/
-struct gxp_semaphores_descriptor {
- /* The app this descriptor belongs to. */
- uint32_t application_id;
- /* The physical ID of the sync barrier protecting this region. */
- uint32_t protection_barrier;
+struct gxp_job_descriptor {
+ /* The number of workers participating in this job. */
+ uint32_t workers_count;
+
/*
- * An array where each element is dedicated to a core. The element is a
- * bit map describing of all the semaphores in the list below that have
- * been unlocked but haven't been processed yet by the receiptient core.
+ * A mapping between a worker ID and the FW ID handling it. The FW ID
+ * used for handling worker 'w' is defined in worker_to_fw[w].
*/
- uint64_t woken_pending_semaphores[MAX_NUM_CORES];
+ int32_t worker_to_fw[MAX_NUM_CORES];
+
/*
- * A mapping of which doorbells to use as a wakeup signal source per
- * core.
+ * A slot ID between 0 and MAX_NUM_CORES (exclusive) that indicates
+ * which block of HW resources this VD is expected to use. All system
+ * HW resources (such as doorbells, sync barriers, etc) are split across
+ * the slots evenly; usually starting at a specific physical ID and
+ * spanning a number consecutive instances. The start ID for each HW
+ * resource category is defined in GXP_<resource_name>_START; and the
+ * number of resources alloted to each slot is defined in
+ * GXP_NUM_<resource_name>_PER_VD.
*/
- uint32_t wakeup_doorbells[MAX_NUM_CORES];
- /* The number of items described in this region. */
- uint32_t num_items;
- /* The list of semaphores available for usage. */
- struct semaphore_metadata {
- /*
- * The number of users using this semaphore. 0 when it's for
- * creation.
- * Note: this is not the count value of the semaphore, but just
- * an indication if this slot is available.
- */
- uint32_t users_count;
- /*
- * This is the semaphore count. Cores will block when they call
- * 'Wait()' while this count is 0.
- */
- uint32_t count;
- /*
- * A bit map of 'NUM_DSP_CORES' bits indicating which cores are
- * currently waiting on this semaphore to become available.
- */
- uint32_t waiters;
- } semaphores[NUM_SYSTEM_SEMAPHORES];
+ uint32_t hardware_resources_slot;
};
-/* A basic unidirectional queue. */
-struct gxp_queue_info {
- /* A header describing the queue and its state. */
- struct queue_header {
- /* A device-side pointer of the storage managed by this queue */
- uint32_t storage;
- /* The index to the head of the queue. */
- uint32_t head_idx;
- /* The index to the tail of the queue. */
- uint32_t tail_idx;
- /* The size of an element stored this queue. */
- uint32_t element_size;
- /* The number of elements that can be stored in this queue. */
- uint32_t elements_count;
- } header;
- /* The semaphore ID controlling exclusive access to this core. */
- uint32_t access_sem_id;
+/*
+ * A per-FW control structure used to communicate between the host (MCU or
+ * kernel) and the DSP core. The region is expected to be hosted in uncached
+ * memory.
+ */
+struct gxp_host_control_region {
/*
- * The ID for the semaphore containing the number of unprocessed items
- * pushed to this queue.
+ * Written to by the FW to indicate to the host that the core is
+ * alive.
*/
- uint32_t posted_slots_sem_id;
+ uint32_t core_alive_magic;
+
/*
- * The ID for the semaphore containing the number of free slots
- * available to store data in this queue.
+ * Written to by the FW to indicate to the host that the core can read
+ * TOP registers.
*/
- uint32_t free_slots_sem_id;
-};
+ uint32_t top_access_ok;
-/* A struct describing a single core's set of incoming queues. */
-struct gxp_core_info {
/*
- * The metadata for the queue holding incoming commands from other
- * cores.
+ * Written to by the host to specify the request FW boot mode. See the
+ * GXP_BOOT_MODE_* definitions for valid values. Always set by the FW to
+ * GXP_BOOT_MODE_NONE once the requested boot mode transition is
+ * completed.
*/
- struct gxp_queue_info incoming_commands_queue;
+ uint32_t boot_mode;
+
/*
- * The metadata for the queue holding incoming responses from other
- * cores.
+ * Written to by the FW to indicate the boot status. See
+ * GXP_BOOT_STATUS_* definitions for valid values.
*/
- struct gxp_queue_info incoming_responses_queue;
-};
+ uint32_t boot_status;
+
+ /* Reserved fields for future expansion */
+ uint32_t reserved_boot[12];
+
+ /* To be used to communicate statistics for timing events during boot */
+ uint32_t timing_entries[16];
+
+ /* To be used to communicate crash events in case of failures */
+ uint32_t valid_crash_info;
+ uint32_t crash_exccause;
+ uint32_t crash_excvaddr;
+ uint32_t crash_epc1;
+ uint32_t reserved_crash_info[12];
-/* A structure describing all the cores' per-core metadata. */
-struct gxp_cores_descriptor {
- /* The number of cores described in this descriptor. */
- uint32_t num_items;
- /* The descriptors for each core. */
- struct gxp_core_info cores[];
+ /* Reserved for more categories */
+ uint32_t reserved[16];
+
+ /*
+ * The per-core job descriptor. This struct will be inspected by the FW
+ * at the beginning of every dispatch.
+ */
+ struct gxp_job_descriptor job_descriptor;
};
/*
- * The top level descriptor describing memory regions used to access system-wide
- * structures and resources.
+ * A structure describing the telemetry (logging and tracing) parameters and
+ * buffers; this describes R/O aspects of the telemetry buffers.
*/
-struct gxp_system_descriptor {
- /* A device address for the application data descriptor. */
- uint32_t app_descriptor_dev_addr[MAX_NUM_CORES];
- /* A device address for the watchdog descriptor. */
- uint32_t watchdog_dev_addr;
- /* A device address for the telemetry descriptor */
- uint32_t telemetry_dev_addr;
- /* A device address for the common debug dump region */
- uint32_t debug_dump_dev_addr;
+struct gxp_telemetry_descriptor_ro {
+ struct telemetry_descriptor_ro {
+ /*
+ * The telemetry status from the host's point of view. See the
+ * top of the file for the appropriate flags.
+ */
+ uint32_t host_status;
+
+ /*
+ * The device address for the buffer used for storing events.
+ * The head and tail indices are described inside the data
+ * pointed to by `buffer_addr`.
+ */
+ uint32_t buffer_addr;
+
+ /* The size of the buffer (in bytes) */
+ uint32_t buffer_size;
+ } per_core_loggers[MAX_NUM_CORES], per_core_tracers[MAX_NUM_CORES];
};
-/* A structure describing the metadata belonging to a specific application. */
-struct gxp_application_descriptor {
+/*
+ * A structure describing the external state of the VD. This structure is read
+ * once by the FW upon the first cold boot and is never checked again.
+ */
+struct gxp_vd_descriptor {
/* The ID for this GXP application. */
uint32_t application_id;
- /* The number of cores this application has. */
- uint16_t core_count;
+
/*
- * The cores mask; a bit at index `n` indicates that core `n` is part of
- * this app.
+ * Whether or not this VD has been initialized by one of its cores.
+ * This variable is protected by sync barrier at offset 0. Should be
+ * initialized by the host to 0.
*/
- uint16_t cores_mask;
- /* The number of threads allocated for each core. */
- uint16_t threads_count;
- /* The size of system memory given to this app. */
- uint32_t system_memory_size;
- /* The device-address of the system memory given to this app. */
- uint32_t system_memory_addr;
- /* The size of TCM memory allocated per bank for this app. */
- uint32_t tcm_memory_per_bank; /* in units of 4 kB */
- /* A device address for the doorbells descriptor. */
- uint32_t doorbells_dev_addr;
- /* A device address for the sync barriers descriptor. */
- uint32_t sync_barriers_dev_addr;
- /* A device address for the semaphores descriptor. */
- uint32_t semaphores_dev_addr;
- /* A device address for the cores cmd/rsp queues descriptor. */
- uint32_t cores_info_dev_addr;
+ uint32_t vd_is_initialized;
};
-/* The structure describing a core-to-core command. */
-struct gxp_core_to_core_command {
- /* The source of port number (the core's virtual ID) of the command. */
- uint32_t source;
- /* The command's sequence number. */
- uint64_t sequence_number;
- /* The command payload device address. */
- uint64_t device_address;
- /* The size of the payload in bytes. */
- uint32_t size;
- /* The generic command flags. */
- uint32_t flags;
+/*
+ * A descriptor for data that is common to the entire system; usually accessed
+ * by physical core. This region is mapped as R/O for all VDs. Should be
+ * writable by the host (MCU/Kernel)
+ */
+struct gxp_system_descriptor_ro {
+ /* A device address for the common debug dump region */
+ uint32_t debug_dump_dev_addr;
+
+ /*
+ * A R/O descriptor for the telemetry data. Describing buffer
+ * parameters.
+ */
+ struct gxp_telemetry_descriptor_ro telemetry_desc;
};
-/* The structure describing a core-to-core response. */
-struct gxp_core_to_core_response {
- /* The source of port number (the core's virtual ID) of the response. */
- uint32_t source;
- /* The response's sequence number. */
- uint64_t sequence_number;
- /* The response error code (if any). */
- uint16_t error_code;
- /* The response return value (filled-in by the user). */
- int32_t cmd_retval;
+/*
+ * A structure describing the telemetry (logging and tracing) parameters; this
+ * describes R/W aspects of the telemetry system.
+ */
+struct gxp_telemetry_descriptor_rw {
+ /* A struct for describing R/W status parameters of the buffer */
+ struct telemetry_descriptor_rw {
+ /*
+ * The telemetry status from the device point of view. See the
+ * top of the file for the appropriate flags.
+ */
+ uint32_t device_status;
+
+ /*
+ * Whether or not this telemetry category has data available
+ * for the host
+ */
+ uint32_t data_available;
+ } per_core_loggers[MAX_NUM_CORES], per_core_tracers[MAX_NUM_CORES];
+};
+
+/*
+ * A descriptor for data that is common to the entire system; usually accessed
+ * by physical core. This region is mapped as R/W for all VDs.
+ */
+struct gxp_system_descriptor_rw {
+ /* A R/W descriptor for the telemetry data */
+ struct gxp_telemetry_descriptor_rw telemetry_desc;
};
#endif /* __GXP_HOST_DEVICE_STRUCTURES_H__ */
diff --git a/gxp-hw-mailbox-driver.c b/gxp-hw-mailbox-driver.c
deleted file mode 100644
index 8430a65..0000000
--- a/gxp-hw-mailbox-driver.c
+++ /dev/null
@@ -1,323 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GXP hardware-based mailbox driver implementation.
- *
- * Copyright (C) 2021 Google LLC
- */
-
-#include <asm/barrier.h>
-#include <linux/bitops.h>
-#include <linux/interrupt.h>
-#include <linux/kthread.h>
-#include <linux/of_irq.h>
-#include <linux/spinlock.h>
-
-#include "gxp-mailbox-driver.h"
-#include "gxp-mailbox-regs.h"
-#include "gxp-mailbox.h"
-
-static u32 csr_read(struct gxp_mailbox *mailbox, uint reg_offset)
-{
- return readl(mailbox->csr_reg_base + reg_offset);
-}
-
-static void csr_write(struct gxp_mailbox *mailbox, uint reg_offset, u32 value)
-{
- writel(value, mailbox->csr_reg_base + reg_offset);
-}
-
-static u32 data_read(struct gxp_mailbox *mailbox, uint reg_offset)
-{
- return readl(mailbox->data_reg_base + reg_offset);
-}
-
-static void data_write(struct gxp_mailbox *mailbox, uint reg_offset,
- u32 value)
-{
- writel(value, mailbox->data_reg_base + reg_offset);
-}
-
-/* IRQ Handling */
-
-/* Interrupt to signal a response from the device to host */
-#define MBOX_DEVICE_TO_HOST_RESPONSE_IRQ_MASK BIT(0)
-
-static irqreturn_t mailbox_irq_handler(int irq, void *arg)
-{
- u32 masked_status;
- struct gxp_mailbox *mailbox = (struct gxp_mailbox *) arg;
- struct work_struct **handlers = mailbox->interrupt_handlers;
- u32 next_int;
-
- /* Contains only the non-masked, pending interrupt bits */
- masked_status = gxp_mailbox_get_host_mask_status(mailbox);
-
- /* Clear all pending IRQ bits */
- gxp_mailbox_clear_host_interrupt(mailbox, masked_status);
-
- if (masked_status & MBOX_DEVICE_TO_HOST_RESPONSE_IRQ_MASK) {
- mailbox->handle_irq(mailbox);
- masked_status &= ~MBOX_DEVICE_TO_HOST_RESPONSE_IRQ_MASK;
- }
-
- while ((next_int = ffs(masked_status))) {
- next_int--; /* ffs returns 1-based indices */
- masked_status &= ~BIT(next_int);
-
- if (handlers[next_int])
- schedule_work(handlers[next_int]);
- else
- pr_err_ratelimited(
- "mailbox%d: received unknown interrupt bit 0x%X\n",
- mailbox->core_id, next_int);
- }
-
- return IRQ_HANDLED;
-}
-
-static void register_irq(struct gxp_mailbox *mailbox)
-{
- int err;
- unsigned int virq;
-
- virq = irq_of_parse_and_map(mailbox->gxp->dev->of_node,
- mailbox->core_id);
- if (!virq) {
- pr_err("Unable to parse interrupt for core %d from the DT\n",
- mailbox->core_id);
- return;
- }
-
- err = request_irq(virq, mailbox_irq_handler, /*flags=*/ 0,
- "aurora_mbx_irq", (void *) mailbox);
- if (err) {
- pr_err("Unable to register IRQ num=%d; error=%d\n", virq, err);
- return;
- }
-
- mailbox->interrupt_virq = virq;
- pr_debug("Core %d's mailbox interrupt registered as IRQ %u.\n",
- mailbox->core_id, virq);
-}
-
-static void unregister_irq(struct gxp_mailbox *mailbox)
-{
- if (mailbox->interrupt_virq) {
- pr_debug("Freeing IRQ %d\n", mailbox->interrupt_virq);
- free_irq(mailbox->interrupt_virq, mailbox);
- mailbox->interrupt_virq = 0;
- }
-}
-
-/* gxp-mailbox-driver.h interface */
-
-void gxp_mailbox_driver_init(struct gxp_mailbox *mailbox)
-{
- spin_lock_init(&mailbox->cmd_tail_resp_head_lock);
- spin_lock_init(&mailbox->cmd_head_resp_tail_lock);
-}
-
-void gxp_mailbox_driver_exit(struct gxp_mailbox *mailbox)
-{
- /* Nothing to cleanup */
-}
-
-void gxp_mailbox_driver_enable_interrupts(struct gxp_mailbox *mailbox)
-{
- register_irq(mailbox);
-}
-
-void gxp_mailbox_driver_disable_interrupts(struct gxp_mailbox *mailbox)
-{
- unregister_irq(mailbox);
-}
-
-void __iomem *gxp_mailbox_get_csr_base(struct gxp_dev *gxp, uint index)
-{
- return gxp->mbx[index].vaddr;
-}
-
-void __iomem *gxp_mailbox_get_data_base(struct gxp_dev *gxp, uint index)
-{
- return gxp->mbx[index].vaddr + 0x80;
-}
-
-/* gxp-mailbox-driver.h: CSR-based calls */
-
-void gxp_mailbox_reset_hw(struct gxp_mailbox *mailbox)
-{
- csr_write(mailbox, MBOX_MCUCTLR_OFFSET, 1);
-}
-
-void gxp_mailbox_generate_device_interrupt(struct gxp_mailbox *mailbox,
- u32 int_mask)
-{
- /*
- * Ensure all memory writes have been committed to memory before
- * signalling to the device to read from them. This avoids the scenario
- * where the interrupt trigger write gets delivered to the MBX HW before
- * the DRAM transactions made it to DRAM since they're Normal
- * transactions and can be re-ordered and backed off behind other
- * transfers.
- */
- wmb();
-
- csr_write(mailbox, MBOX_INTGR0_OFFSET, int_mask);
-}
-
-u32 gxp_mailbox_get_device_mask_status(struct gxp_mailbox *mailbox)
-{
- return csr_read(mailbox, MBOX_INTMSR0_OFFSET);
-}
-
-void gxp_mailbox_clear_host_interrupt(struct gxp_mailbox *mailbox, u32 int_mask)
-{
- csr_write(mailbox, MBOX_INTCR1_OFFSET, int_mask);
-}
-
-void gxp_mailbox_mask_host_interrupt(struct gxp_mailbox *mailbox, u32 int_mask)
-{
- csr_write(mailbox, MBOX_INTMR1_OFFSET, int_mask);
-}
-
-u32 gxp_mailbox_get_host_mask_status(struct gxp_mailbox *mailbox)
-{
- return csr_read(mailbox, MBOX_INTMSR1_OFFSET);
-}
-
-/* gxp-mailbox-driver.h: Data register-based calls */
-
-void gxp_mailbox_write_status(struct gxp_mailbox *mailbox, u32 status)
-{
- data_write(mailbox, MBOX_STATUS_OFFSET, status);
-}
-
-void gxp_mailbox_write_descriptor(struct gxp_mailbox *mailbox,
- dma_addr_t descriptor_addr)
-{
- data_write(mailbox, MBOX_DESCRIPTOR_ADDR_OFFSET, (u32)descriptor_addr);
-}
-
-void gxp_mailbox_write_cmd_queue_tail(struct gxp_mailbox *mailbox, u16 val)
-{
- u32 current_resp_head;
- u32 new_cmd_tail;
- unsigned long flags;
-
- spin_lock_irqsave(&mailbox->cmd_tail_resp_head_lock, flags);
-
- current_resp_head = data_read(mailbox, MBOX_CMD_TAIL_RESP_HEAD_OFFSET) &
- RESP_HEAD_MASK;
- new_cmd_tail = (u32)val << CMD_TAIL_SHIFT;
- data_write(mailbox, MBOX_CMD_TAIL_RESP_HEAD_OFFSET,
- new_cmd_tail | current_resp_head);
-
- spin_unlock_irqrestore(&mailbox->cmd_tail_resp_head_lock, flags);
-}
-
-void gxp_mailbox_write_resp_queue_head(struct gxp_mailbox *mailbox, u16 val)
-{
- u32 current_cmd_tail;
- u32 new_resp_head;
- unsigned long flags;
-
- spin_lock_irqsave(&mailbox->cmd_tail_resp_head_lock, flags);
-
- current_cmd_tail = data_read(mailbox, MBOX_CMD_TAIL_RESP_HEAD_OFFSET) &
- CMD_TAIL_MASK;
- new_resp_head = (u32)val << RESP_HEAD_SHIFT;
- data_write(mailbox, MBOX_CMD_TAIL_RESP_HEAD_OFFSET,
- current_cmd_tail | new_resp_head);
-
- spin_unlock_irqrestore(&mailbox->cmd_tail_resp_head_lock, flags);
-}
-
-u16 gxp_mailbox_read_cmd_queue_head(struct gxp_mailbox *mailbox)
-{
- u32 reg_val;
- unsigned long flags;
-
- spin_lock_irqsave(&mailbox->cmd_head_resp_tail_lock, flags);
-
- reg_val = data_read(mailbox, MBOX_CMD_HEAD_RESP_TAIL_OFFSET);
-
- spin_unlock_irqrestore(&mailbox->cmd_head_resp_tail_lock, flags);
-
- return (u16)((reg_val & CMD_HEAD_MASK) >> CMD_HEAD_SHIFT);
-}
-
-u16 gxp_mailbox_read_resp_queue_tail(struct gxp_mailbox *mailbox)
-{
- u32 reg_val;
- unsigned long flags;
-
- spin_lock_irqsave(&mailbox->cmd_head_resp_tail_lock, flags);
-
- reg_val = data_read(mailbox, MBOX_CMD_HEAD_RESP_TAIL_OFFSET);
-
- spin_unlock_irqrestore(&mailbox->cmd_head_resp_tail_lock, flags);
-
- return (u16)((reg_val & RESP_TAIL_MASK) >> RESP_TAIL_SHIFT);
-}
-
-void gxp_mailbox_write_cmd_queue_head(struct gxp_mailbox *mailbox, u16 val)
-{
- u32 current_resp_tail;
- u32 new_cmd_head;
- unsigned long flags;
-
- spin_lock_irqsave(&mailbox->cmd_head_resp_tail_lock, flags);
-
- current_resp_tail = data_read(mailbox, MBOX_CMD_HEAD_RESP_TAIL_OFFSET) &
- RESP_TAIL_MASK;
- new_cmd_head = (u32)val << CMD_HEAD_SHIFT;
- data_write(mailbox, MBOX_CMD_HEAD_RESP_TAIL_OFFSET,
- new_cmd_head | current_resp_tail);
-
- spin_unlock_irqrestore(&mailbox->cmd_head_resp_tail_lock, flags);
-}
-
-void gxp_mailbox_write_resp_queue_tail(struct gxp_mailbox *mailbox, u16 val)
-{
- u32 current_cmd_head;
- u32 new_resp_tail;
- unsigned long flags;
-
- spin_lock_irqsave(&mailbox->cmd_head_resp_tail_lock, flags);
-
- current_cmd_head = data_read(mailbox, MBOX_CMD_HEAD_RESP_TAIL_OFFSET) &
- CMD_HEAD_MASK;
- new_resp_tail = (u32)val << RESP_TAIL_SHIFT;
- data_write(mailbox, MBOX_CMD_HEAD_RESP_TAIL_OFFSET,
- current_cmd_head | new_resp_tail);
-
- spin_unlock_irqrestore(&mailbox->cmd_head_resp_tail_lock, flags);
-}
-
-u16 gxp_mailbox_read_cmd_queue_tail(struct gxp_mailbox *mailbox)
-{
- u32 reg_val;
- unsigned long flags;
-
- spin_lock_irqsave(&mailbox->cmd_tail_resp_head_lock, flags);
-
- reg_val = data_read(mailbox, MBOX_CMD_TAIL_RESP_HEAD_OFFSET);
-
- spin_unlock_irqrestore(&mailbox->cmd_tail_resp_head_lock, flags);
-
- return (u16)((reg_val & CMD_TAIL_MASK) >> CMD_TAIL_SHIFT);
-}
-
-u16 gxp_mailbox_read_resp_queue_head(struct gxp_mailbox *mailbox)
-{
- u32 reg_val;
- unsigned long flags;
-
- spin_lock_irqsave(&mailbox->cmd_tail_resp_head_lock, flags);
-
- reg_val = data_read(mailbox, MBOX_CMD_TAIL_RESP_HEAD_OFFSET);
-
- spin_unlock_irqrestore(&mailbox->cmd_tail_resp_head_lock, flags);
-
- return (u16)((reg_val & RESP_HEAD_MASK) >> RESP_HEAD_SHIFT);
-}
diff --git a/gxp-internal.h b/gxp-internal.h
index 82e5303..e00401b 100644
--- a/gxp-internal.h
+++ b/gxp-internal.h
@@ -7,9 +7,11 @@
#ifndef __GXP_INTERNAL_H__
#define __GXP_INTERNAL_H__
+#include <linux/atomic.h>
#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/firmware.h>
+#include <linux/idr.h>
#include <linux/io.h>
#include <linux/iommu.h>
#include <linux/list.h>
@@ -17,15 +19,28 @@
#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/of_address.h>
+#include <linux/platform_device.h>
#include <linux/rwsem.h>
#include <linux/spinlock.h>
+#include <gcip/gcip-thermal.h>
+
#include "gxp-config.h"
+#define IS_GXP_TEST IS_ENABLED(CONFIG_GXP_TEST)
+
+#define GXP_NAME "gxp"
+
+enum gxp_chip_revision {
+ GXP_CHIP_A0,
+ GXP_CHIP_B0,
+ /* used when the revision is not explicitly specified */
+ GXP_CHIP_ANY,
+};
+
/* Holds Client's TPU mailboxes info used during mapping */
struct gxp_tpu_mbx_desc {
uint phys_core_list;
- uint virt_core_list;
size_t cmdq_size, respq_size;
};
@@ -44,36 +59,34 @@ struct gxp_tpu_dev {
};
/* Forward declarations from submodules */
+struct gcip_domain_pool;
struct gxp_client;
struct gxp_mailbox_manager;
struct gxp_debug_dump_manager;
-struct gxp_domain_pool;
struct gxp_dma_manager;
struct gxp_fw_data_manager;
struct gxp_power_manager;
-struct gxp_telemetry_manager;
+struct gxp_core_telemetry_manager;
struct gxp_thermal_manager;
-struct gxp_wakelock_manager;
+struct gxp_usage_stats;
+struct gxp_power_states;
+struct gxp_iommu_domain;
struct gxp_dev {
struct device *dev; /* platform bus device */
struct miscdevice misc_dev; /* misc device structure */
struct dentry *d_entry; /* debugfs dir for this device */
struct gxp_mapped_resource regs; /* ioremapped CSRs */
- struct gxp_mapped_resource mbx[GXP_NUM_CORES]; /* mailbox CSRs */
+ struct gxp_mapped_resource lpm_regs; /* ioremapped LPM CSRs, may be equal to @regs */
+ struct gxp_mapped_resource mbx[GXP_NUM_MAILBOXES]; /* mailbox CSRs */
struct gxp_mapped_resource fwbufs[GXP_NUM_CORES]; /* FW carveout */
struct gxp_mapped_resource fwdatabuf; /* Shared FW data carveout */
struct gxp_mapped_resource cmu; /* CMU CSRs */
struct gxp_mailbox_manager *mailbox_mgr;
struct gxp_power_manager *power_mgr;
struct gxp_debug_dump_manager *debug_dump_mgr;
- const struct firmware *firmwares[GXP_NUM_CORES];
- char *firmware_name;
- bool is_firmware_requested;
- /* Protects `firmwares` and `firmware_name` */
- struct mutex dsp_firmware_lock;
- /* Firmware status bitmap. Accessors must hold `vd_semaphore` */
- u32 firmware_running;
+ struct gxp_firmware_loader_manager *fw_loader_mgr;
+ struct gxp_firmware_manager *firmware_mgr;
/*
* Lock to ensure only one thread at a time is ever calling
* `pin_user_pages_fast()` during mapping, otherwise it will fail.
@@ -94,21 +107,152 @@ struct gxp_dev {
struct gxp_client *debugfs_client;
struct mutex debugfs_client_lock;
bool debugfs_wakelock_held;
- struct gxp_thermal_manager *thermal_mgr;
struct gxp_dma_manager *dma_mgr;
struct gxp_fw_data_manager *data_mgr;
struct gxp_tpu_dev tpu_dev;
- struct gxp_telemetry_manager *telemetry_mgr;
- struct gxp_wakelock_manager *wakelock_mgr;
+ struct gxp_core_telemetry_manager *core_telemetry_mgr;
+ struct gxp_iommu_domain *default_domain;
+ struct gcip_thermal *thermal;
/*
* Pointer to GSA device for firmware authentication.
* May be NULL if the chip does not support firmware authentication
*/
struct device *gsa_dev;
u32 memory_per_core;
- struct gxp_domain_pool *domain_pool;
+ struct gcip_domain_pool *domain_pool;
struct list_head client_list;
struct mutex client_list_lock;
+ /* Pointer and mutex of secure virtual device */
+ struct gxp_virtual_device *secure_vd;
+ struct mutex secure_vd_lock;
+ /*
+ * Buffer shared across firmware.
+ * Its paddr is 0 if the shared buffer is not available.
+ */
+ struct gxp_mapped_resource shared_buf;
+ /*
+ * If the @shared_buf is used as split slices, it will keep track of
+ * which indexes of slices are used by ID allocator.
+ */
+ struct ida shared_slice_idp;
+ struct gxp_usage_stats *usage_stats; /* Stores the usage stats */
+
+ void __iomem *sysreg_shareability; /* sysreg shareability csr base */
+ /* Next virtual device ID. */
+ atomic_t next_vdid;
+
+ /* To manage DMA fences. */
+ struct gcip_dma_fence_manager *gfence_mgr;
+
+ /* callbacks for chip-dependent implementations */
+
+ /*
+ * For parsing chip-dependent device tree attributes.
+ *
+ * Called as the first step in the common device probing procedure.
+ *
+ * Do NOT use non-device managed allocations in this function, to
+ * prevent memory leak when the probe procedure fails.
+ *
+ * Return a non-zero value can fail the probe procedure.
+ *
+ * This callback is optional.
+ */
+ int (*parse_dt)(struct platform_device *pdev, struct gxp_dev *gxp);
+ /*
+ * Called when common device probing procedure is done.
+ *
+ * Return a non-zero value can fail the probe procedure.
+ *
+ * This callback is optional.
+ */
+ int (*after_probe)(struct gxp_dev *gxp);
+ /*
+ * Called before common device removal procedure.
+ *
+ * This callback is optional.
+ */
+ void (*before_remove)(struct gxp_dev *gxp);
+ /*
+ * Device ioctl handler for chip-dependent ioctl calls.
+ * Should return -ENOTTY when the ioctl should be handled by common
+ * device ioctl handler.
+ *
+ * This callback is optional.
+ */
+ long (*handle_ioctl)(struct file *file, uint cmd, ulong arg);
+ /*
+ * Device mmap handler for chip-dependent mmap calls.
+ * Should return -EOPNOTSUPP when the mmap should be handled by common
+ * device mmap handler.
+ *
+ * This callback is optional.
+ */
+ int (*handle_mmap)(struct file *file, struct vm_area_struct *vma);
+ /*
+ * Called for sending power states request.
+ *
+ * Return a non-zero value can fail the block wakelock acquisition.
+ *
+ * This callback is optional.
+ */
+ int (*request_power_states)(struct gxp_client *client,
+ struct gxp_power_states power_states);
+ /*
+ * Called when the client acquired the BLOCK wakelock and allocated a virtual device.
+ * The caller will hold @gxp->vd_semaphore for writing.
+ *
+ * Return a non-zero value can fail the block acquiring.
+ *
+ * This callback is optional.
+ */
+ int (*after_vd_block_ready)(struct gxp_dev *gxp,
+ struct gxp_virtual_device *vd);
+ /*
+ * Called before releasing the BLOCK wakelock or the virtual device.
+ * The caller will hold @gxp->vd_semaphore for writing.
+ *
+ * This callback is optional.
+ */
+ void (*before_vd_block_unready)(struct gxp_dev *gxp,
+ struct gxp_virtual_device *vd);
+ /*
+ * Called in .power_up callback of gcip_pm, after the block is powered.
+ *
+ * This function is called with holding gcip_pm lock.
+ *
+ * Return a non-zero value can fail gcip_pm_get.
+ *
+ * This callback is optional.
+ */
+ int (*pm_after_blk_on)(struct gxp_dev *gxp);
+ /*
+ * Called in .power_down callback of gcip_pm, before the block is shutdown.
+ *
+ * This function is called with holding gcip_pm lock.
+ *
+ * This callback is optional.
+ */
+ void (*pm_before_blk_off)(struct gxp_dev *gxp);
+ /*
+ * Called in gxp_map_tpu_mbx_queue(), after the TPU mailbox buffers are mapped.
+ *
+ * This function is called with holding the write lock of @client->semaphore and the read
+ * lock of @gxp->vd_semaphore.
+ *
+ * This callback is optional.
+ */
+ int (*after_map_tpu_mbx_queue)(struct gxp_dev *gxp,
+ struct gxp_client *client);
+ /*
+ * Called in gxp_unmap_tpu_mbx_queue(), before unmapping the TPU mailbox buffers.
+ *
+ * This function is called with holding the write lock of @client->semaphore.
+ *
+ * This callback is optional.
+ */
+ void (*before_unmap_tpu_mbx_queue)(struct gxp_dev *gxp,
+ struct gxp_client *client);
};
/* GXP device IO functions */
@@ -123,22 +267,6 @@ static inline void gxp_write_32(struct gxp_dev *gxp, uint reg_offset, u32 value)
writel(value, gxp->regs.vaddr + reg_offset);
}
-static inline u32 gxp_read_32_core(struct gxp_dev *gxp, uint core,
- uint reg_offset)
-{
- uint offset = GXP_CORE_0_BASE + (GXP_CORE_SIZE * core) + reg_offset;
-
- return gxp_read_32(gxp, offset);
-}
-
-static inline void gxp_write_32_core(struct gxp_dev *gxp, uint core,
- uint reg_offset, u32 value)
-{
- uint offset = GXP_CORE_0_BASE + (GXP_CORE_SIZE * core) + reg_offset;
-
- gxp_write_32(gxp, offset, value);
-}
-
static inline int gxp_acquire_rmem_resource(struct gxp_dev *gxp,
struct resource *r, char *phandle)
{
@@ -158,4 +286,15 @@ static inline int gxp_acquire_rmem_resource(struct gxp_dev *gxp,
return ret;
}
+/*
+ * To specify whether AP and DSP cores directly communicate by the core mailboxes.
+ * All platform drivers of each chip should implement this.
+ */
+bool gxp_is_direct_mode(struct gxp_dev *gxp);
+
+/*
+ * Returns the chip revision.
+ */
+enum gxp_chip_revision gxp_get_chip_revision(struct gxp_dev *gxp);
+
#endif /* __GXP_INTERNAL_H__ */
diff --git a/gxp-lpm.c b/gxp-lpm.c
index 1ac8e27..1e51b40 100644
--- a/gxp-lpm.c
+++ b/gxp-lpm.c
@@ -12,6 +12,7 @@
#include <linux/types.h>
#include "gxp-bpm.h"
+#include "gxp-config.h"
#include "gxp-doorbell.h"
#include "gxp-internal.h"
#include "gxp-lpm.h"
@@ -21,7 +22,7 @@
int i = 100000; \
while (i) { \
lpm_state = \
- lpm_read_32_psm(gxp, psm, PSM_STATUS_OFFSET) & \
+ lpm_read_32_psm(gxp, psm, PSM_REG_STATUS_OFFSET) & \
PSM_CURR_STATE_MASK; \
if (condition) \
break; \
@@ -31,26 +32,24 @@
return i != 0; \
} while (0)
-void gxp_lpm_enable_state(struct gxp_dev *gxp, uint psm, uint state)
+void gxp_lpm_enable_state(struct gxp_dev *gxp, enum gxp_lpm_psm psm, uint state)
{
- uint offset = LPM_REG_ENABLE_STATE_0 + (LPM_STATE_TABLE_SIZE * state);
-
/* PS0 should always be enabled */
- if (state == 0)
+ if (state == LPM_ACTIVE_STATE || state > LPM_PG_STATE)
return;
/* Disable all low power states */
- lpm_write_32_psm(gxp, psm, LPM_REG_ENABLE_STATE_1, 0x0);
- lpm_write_32_psm(gxp, psm, LPM_REG_ENABLE_STATE_2, 0x0);
- lpm_write_32_psm(gxp, psm, LPM_REG_ENABLE_STATE_3, 0x0);
+ lpm_write_32_psm(gxp, psm, PSM_REG_ENABLE_STATE1_OFFSET, 0x0);
+ lpm_write_32_psm(gxp, psm, PSM_REG_ENABLE_STATE2_OFFSET, 0x0);
+ lpm_write_32_psm(gxp, psm, PSM_REG_ENABLE_STATE3_OFFSET, 0x0);
/* Enable the requested low power state */
- lpm_write_32_psm(gxp, psm, offset, 0x1);
+ lpm_write_32_psm(gxp, psm, state, 0x1);
}
-bool gxp_lpm_is_initialized(struct gxp_dev *gxp, uint psm)
+bool gxp_lpm_is_initialized(struct gxp_dev *gxp, enum gxp_lpm_psm psm)
{
- u32 status = lpm_read_32_psm(gxp, psm, PSM_STATUS_OFFSET);
+ u32 status = lpm_read_32_psm(gxp, psm, PSM_REG_STATUS_OFFSET);
/*
* state_valid bit goes active and stays high forever the first time you
@@ -62,9 +61,9 @@ bool gxp_lpm_is_initialized(struct gxp_dev *gxp, uint psm)
return false;
}
-bool gxp_lpm_is_powered(struct gxp_dev *gxp, uint psm)
+bool gxp_lpm_is_powered(struct gxp_dev *gxp, enum gxp_lpm_psm psm)
{
- u32 status = lpm_read_32_psm(gxp, psm, PSM_STATUS_OFFSET);
+ u32 status = lpm_read_32_psm(gxp, psm, PSM_REG_STATUS_OFFSET);
u32 state;
if (!(status & PSM_STATE_VALID_MASK))
@@ -73,14 +72,14 @@ bool gxp_lpm_is_powered(struct gxp_dev *gxp, uint psm)
return state == LPM_ACTIVE_STATE || state == LPM_CG_STATE;
}
-uint gxp_lpm_get_state(struct gxp_dev *gxp, uint psm)
+uint gxp_lpm_get_state(struct gxp_dev *gxp, enum gxp_lpm_psm psm)
{
- u32 status = lpm_read_32_psm(gxp, psm, PSM_STATUS_OFFSET);
+ u32 status = lpm_read_32_psm(gxp, psm, PSM_REG_STATUS_OFFSET);
return status & PSM_CURR_STATE_MASK;
}
-static int set_state_internal(struct gxp_dev *gxp, uint psm, uint target_state)
+static int set_state_internal(struct gxp_dev *gxp, enum gxp_lpm_psm psm, uint target_state)
{
u32 val;
int i = 10000;
@@ -88,13 +87,13 @@ static int set_state_internal(struct gxp_dev *gxp, uint psm, uint target_state)
/* Set SW sequencing mode and PS target */
val = LPM_SW_PSM_MODE;
val |= target_state << LPM_CFG_SW_PS_TARGET_OFFSET;
- lpm_write_32_psm(gxp, psm, PSM_CFG_OFFSET, val);
+ lpm_write_32_psm(gxp, psm, PSM_REG_CFG_OFFSET, val);
/* Start the SW sequence */
- lpm_write_32_psm(gxp, psm, PSM_START_OFFSET, 0x1);
+ lpm_write_32_psm(gxp, psm, PSM_REG_START_OFFSET, 0x1);
/* Wait for LPM init done (0x60041688) */
- while (i && !(lpm_read_32_psm(gxp, psm, PSM_STATUS_OFFSET)
+ while (i && !(lpm_read_32_psm(gxp, psm, PSM_REG_STATUS_OFFSET)
& PSM_INIT_DONE_MASK)) {
udelay(1 * GXP_TIME_DELAY_FACTOR);
i--;
@@ -108,7 +107,7 @@ static int set_state_internal(struct gxp_dev *gxp, uint psm, uint target_state)
return 0;
}
-int gxp_lpm_set_state(struct gxp_dev *gxp, uint psm, uint target_state,
+int gxp_lpm_set_state(struct gxp_dev *gxp, enum gxp_lpm_psm psm, uint target_state,
bool verbose)
{
uint curr_state = gxp_lpm_get_state(gxp, psm);
@@ -120,7 +119,7 @@ int gxp_lpm_set_state(struct gxp_dev *gxp, uint psm, uint target_state,
dev_warn(gxp->dev,
"Forcing a transition to PS%u on core%u, status: %x\n",
target_state, psm,
- lpm_read_32_psm(gxp, psm, PSM_STATUS_OFFSET));
+ lpm_read_32_psm(gxp, psm, PSM_REG_STATUS_OFFSET));
gxp_lpm_enable_state(gxp, psm, target_state);
@@ -137,21 +136,21 @@ int gxp_lpm_set_state(struct gxp_dev *gxp, uint psm, uint target_state,
gxp->dev,
"Finished forced transition on core %u. target: PS%u, actual: PS%u, status: %x\n",
psm, target_state, gxp_lpm_get_state(gxp, psm),
- lpm_read_32_psm(gxp, psm, PSM_STATUS_OFFSET));
+ lpm_read_32_psm(gxp, psm, PSM_REG_STATUS_OFFSET));
/* Set HW sequencing mode */
- lpm_write_32_psm(gxp, psm, PSM_CFG_OFFSET, LPM_HW_MODE);
+ lpm_write_32_psm(gxp, psm, PSM_REG_CFG_OFFSET, LPM_HW_MODE);
return 0;
}
-static int psm_enable(struct gxp_dev *gxp, uint psm)
+static int psm_enable(struct gxp_dev *gxp, enum gxp_lpm_psm psm)
{
int i = 10000;
/* Return early if LPM is already initialized */
if (gxp_lpm_is_initialized(gxp, psm)) {
- if (psm != LPM_TOP_PSM) {
+ if (psm != LPM_PSM_TOP) {
/* Ensure core is in PS3 */
return gxp_lpm_set_state(gxp, psm, LPM_PG_STATE,
/*verbose=*/true);
@@ -161,10 +160,10 @@ static int psm_enable(struct gxp_dev *gxp, uint psm)
}
/* Write PSM start bit */
- lpm_write_32_psm(gxp, psm, PSM_START_OFFSET, PSM_START);
+ lpm_write_32_psm(gxp, psm, PSM_REG_START_OFFSET, PSM_START);
/* Wait for LPM init done (0x60041688) */
- while (i && !(lpm_read_32_psm(gxp, psm, PSM_STATUS_OFFSET)
+ while (i && !(lpm_read_32_psm(gxp, psm, PSM_REG_STATUS_OFFSET)
& PSM_INIT_DONE_MASK)) {
udelay(1 * GXP_TIME_DELAY_FACTOR);
i--;
@@ -174,7 +173,7 @@ static int psm_enable(struct gxp_dev *gxp, uint psm)
return 1;
/* Set PSM to HW mode (0x60041680) */
- lpm_write_32_psm(gxp, psm, PSM_CFG_OFFSET, PSM_HW_MODE);
+ lpm_write_32_psm(gxp, psm, PSM_REG_CFG_OFFSET, PSM_HW_MODE);
return 0;
}
@@ -182,7 +181,7 @@ static int psm_enable(struct gxp_dev *gxp, uint psm)
void gxp_lpm_init(struct gxp_dev *gxp)
{
/* Enable Top PSM */
- if (psm_enable(gxp, LPM_TOP_PSM))
+ if (psm_enable(gxp, LPM_PSM_TOP))
dev_err(gxp->dev, "Timed out when enabling Top PSM!\n");
}
@@ -192,8 +191,8 @@ void gxp_lpm_destroy(struct gxp_dev *gxp)
dev_dbg(gxp->dev, "Kicking Top PSM out of ACG\n");
/* Disable all low-power states for TOP */
- lpm_write_32_psm(gxp, LPM_TOP_PSM, LPM_REG_ENABLE_STATE_1, 0x0);
- lpm_write_32_psm(gxp, LPM_TOP_PSM, LPM_REG_ENABLE_STATE_2, 0x0);
+ lpm_write_32_psm(gxp, LPM_PSM_TOP, PSM_REG_ENABLE_STATE1_OFFSET, 0x0);
+ lpm_write_32_psm(gxp, LPM_PSM_TOP, PSM_REG_ENABLE_STATE2_OFFSET, 0x0);
}
int gxp_lpm_up(struct gxp_dev *gxp, uint core)
@@ -202,14 +201,15 @@ int gxp_lpm_up(struct gxp_dev *gxp, uint core)
gxp_doorbell_clear(gxp, CORE_WAKEUP_DOORBELL(core));
/* Enable core PSM */
- if (psm_enable(gxp, core)) {
+ if (psm_enable(gxp, CORE_TO_PSM(core))) {
dev_err(gxp->dev, "Timed out when enabling Core%u PSM!\n",
core);
return -ETIMEDOUT;
}
- /* Enable PS1 (Clk Gated) */
- gxp_lpm_enable_state(gxp, core, LPM_CG_STATE);
+ /* Enable PS1 (Clk Gated). Only required for core PSMs. */
+ if (core < GXP_NUM_CORES)
+ gxp_lpm_enable_state(gxp, CORE_TO_PSM(core), LPM_CG_STATE);
gxp_bpm_start(gxp, core);
@@ -218,10 +218,10 @@ int gxp_lpm_up(struct gxp_dev *gxp, uint core)
void gxp_lpm_down(struct gxp_dev *gxp, uint core)
{
- if (gxp_lpm_get_state(gxp, core) == LPM_PG_STATE)
+ if (gxp_lpm_get_state(gxp, CORE_TO_PSM(core)) == LPM_PG_STATE)
return;
/* Enable PS3 (Pwr Gated) */
- gxp_lpm_enable_state(gxp, core, LPM_PG_STATE);
+ gxp_lpm_enable_state(gxp, CORE_TO_PSM(core), LPM_PG_STATE);
/* Set wakeup doorbell to trigger an automatic transition to PS3 */
gxp_doorbell_enable_for_core(gxp, CORE_WAKEUP_DOORBELL(core), core);
@@ -232,21 +232,21 @@ void gxp_lpm_down(struct gxp_dev *gxp, uint core)
* Clear the core's interrupt mask and the wakeup doorbell to ensure
* the core will not wake unexpectedly.
*/
- gxp_write_32_core(gxp, core, GXP_REG_COMMON_INT_MASK_0, 0);
+ gxp_write_32(gxp, GXP_CORE_REG_COMMON_INT_MASK_0(core), 0);
gxp_doorbell_clear(gxp, CORE_WAKEUP_DOORBELL(core));
/* Ensure core is in PS3 */
- gxp_lpm_set_state(gxp, core, LPM_PG_STATE, /*verbose=*/true);
+ gxp_lpm_set_state(gxp, CORE_TO_PSM(core), LPM_PG_STATE, /*verbose=*/true);
}
-bool gxp_lpm_wait_state_ne(struct gxp_dev *gxp, uint psm, uint state)
+bool gxp_lpm_wait_state_ne(struct gxp_dev *gxp, enum gxp_lpm_psm psm, uint state)
{
uint lpm_state;
gxp_lpm_wait_until(lpm_state, lpm_state != state);
}
-bool gxp_lpm_wait_state_eq(struct gxp_dev *gxp, uint psm, uint state)
+bool gxp_lpm_wait_state_eq(struct gxp_dev *gxp, enum gxp_lpm_psm psm, uint state)
{
uint lpm_state;
diff --git a/gxp-lpm.h b/gxp-lpm.h
index ee1a749..5af1c89 100644
--- a/gxp-lpm.h
+++ b/gxp-lpm.h
@@ -10,15 +10,9 @@
#include <linux/types.h>
+#include "gxp-config.h"
#include "gxp.h"
-enum lpm_psm_csrs {
- LPM_REG_ENABLE_STATE_0 = 0x080,
- LPM_REG_ENABLE_STATE_1 = 0x180,
- LPM_REG_ENABLE_STATE_2 = 0x280,
- LPM_REG_ENABLE_STATE_3 = 0x380,
-};
-
enum lpm_state {
LPM_ACTIVE_STATE = 0,
LPM_CG_STATE = 1,
@@ -26,15 +20,19 @@ enum lpm_state {
LPM_PG_STATE = 3,
};
-#define LPM_STATE_TABLE_SIZE (LPM_REG_ENABLE_STATE_1 - LPM_REG_ENABLE_STATE_0)
+enum psm_reg_offset {
+ PSM_REG_ENABLE_STATE0_OFFSET,
+ PSM_REG_ENABLE_STATE1_OFFSET,
+ PSM_REG_ENABLE_STATE2_OFFSET,
+ PSM_REG_ENABLE_STATE3_OFFSET,
+ PSM_REG_START_OFFSET,
+ PSM_REG_STATUS_OFFSET,
+ PSM_REG_CFG_OFFSET,
+};
#define LPM_INSTRUCTION_OFFSET 0x00000944
#define LPM_INSTRUCTION_MASK 0x03000000
-/*
- * The TOP PSM comes immediately after the last PSM of core, so define its PSM
- * number in terms of the number of cores.
- */
-#define LPM_TOP_PSM GXP_NUM_CORES
+
#define LPM_HW_MODE 0
#define LPM_SW_PSM_MODE 1
@@ -42,10 +40,6 @@ enum lpm_state {
#define CORE_WAKEUP_DOORBELL(__core__) (0 + (__core__))
-#define AUR_DVFS_DOMAIN 17
-#define AUR_DVFS_DEBUG_REQ (1 << 31)
-#define AUR_DEBUG_CORE_FREQ (AUR_DVFS_DEBUG_REQ | (3 << 27))
-
#define PSM_INIT_DONE_MASK 0x80
#define PSM_CURR_STATE_MASK 0x0F
#define PSM_STATE_VALID_MASK 0x10
@@ -75,71 +69,90 @@ void gxp_lpm_down(struct gxp_dev *gxp, uint core);
* Return whether the specified PSM is initialized.
* PSM0-PSM3 are for core0-core3, PSM4 is the TOP LPM.
*/
-bool gxp_lpm_is_initialized(struct gxp_dev *gxp, uint psm);
+bool gxp_lpm_is_initialized(struct gxp_dev *gxp, enum gxp_lpm_psm psm);
/*
* Return whether the specified PSM is powered.
*/
-bool gxp_lpm_is_powered(struct gxp_dev *gxp, uint psm);
+bool gxp_lpm_is_powered(struct gxp_dev *gxp, enum gxp_lpm_psm psm);
/*
* Wait for the specified @psm to be in any state other than @state
* Return whether the waiting is successful or the timeout occurs.
*/
-bool gxp_lpm_wait_state_ne(struct gxp_dev *gxp, uint psm, uint state);
+bool gxp_lpm_wait_state_ne(struct gxp_dev *gxp, enum gxp_lpm_psm psm, uint state);
/*
* Wait for the specified @psm to be in the specified @state
* Return whether the waiting is successful or the timeout occurs.
*/
-bool gxp_lpm_wait_state_eq(struct gxp_dev *gxp, uint psm, uint state);
+bool gxp_lpm_wait_state_eq(struct gxp_dev *gxp, enum gxp_lpm_psm psm, uint state);
/*
* Force a state transition on the specified PSM.
*/
-int gxp_lpm_set_state(struct gxp_dev *gxp, uint psm, uint target_state,
+int gxp_lpm_set_state(struct gxp_dev *gxp, enum gxp_lpm_psm psm, uint target_state,
bool verbose);
/*
* Get current LPM state of the specified PSM.
*/
-uint gxp_lpm_get_state(struct gxp_dev *gxp, uint psm);
+uint gxp_lpm_get_state(struct gxp_dev *gxp, enum gxp_lpm_psm psm);
/*
* Enable a state on the specified PSM.
*/
-void gxp_lpm_enable_state(struct gxp_dev *gxp, uint psm, uint state);
+void gxp_lpm_enable_state(struct gxp_dev *gxp, enum gxp_lpm_psm psm, uint state);
static inline u32 lpm_read_32(struct gxp_dev *gxp, uint reg_offset)
{
- uint offset = GXP_LPM_BASE + reg_offset;
-
- return gxp_read_32(gxp, offset);
+#ifndef GXP_SEPARATE_LPM_OFFSET
+ reg_offset = GXP_LPM_BASE + reg_offset;
+#endif
+ return readl(gxp->lpm_regs.vaddr + reg_offset);
}
static inline void lpm_write_32(struct gxp_dev *gxp, uint reg_offset, u32 value)
{
- uint offset = GXP_LPM_BASE + reg_offset;
+#ifndef GXP_SEPARATE_LPM_OFFSET
+ reg_offset = GXP_LPM_BASE + reg_offset;
+#endif
+ writel(value, gxp->lpm_regs.vaddr + reg_offset);
+}
- gxp_write_32(gxp, offset, value);
+static u32 get_reg_offset(struct gxp_dev *gxp, enum psm_reg_offset reg_offset, enum gxp_lpm_psm psm)
+{
+ switch (reg_offset) {
+ case PSM_REG_ENABLE_STATE0_OFFSET:
+ case PSM_REG_ENABLE_STATE1_OFFSET:
+ case PSM_REG_ENABLE_STATE2_OFFSET:
+ case PSM_REG_ENABLE_STATE3_OFFSET:
+ return gxp_lpm_psm_get_state_offset(psm, (uint)reg_offset);
+ case PSM_REG_START_OFFSET:
+ return gxp_lpm_psm_get_start_offset(psm);
+ case PSM_REG_STATUS_OFFSET:
+ return gxp_lpm_psm_get_status_offset(psm);
+ case PSM_REG_CFG_OFFSET:
+ return gxp_lpm_psm_get_cfg_offset(psm);
+ }
+
+ return 0;
}
-static inline u32 lpm_read_32_psm(struct gxp_dev *gxp, uint psm,
- uint reg_offset)
+static inline u32 lpm_read_32_psm(struct gxp_dev *gxp, enum gxp_lpm_psm psm,
+ enum psm_reg_offset reg_offset)
{
- uint offset =
- GXP_LPM_PSM_0_BASE + (GXP_LPM_PSM_SIZE * psm) + reg_offset;
+ uint offset = get_reg_offset(gxp, reg_offset, psm);
- return gxp_read_32(gxp, offset);
+ return lpm_read_32(gxp, offset);
}
-static inline void lpm_write_32_psm(struct gxp_dev *gxp, uint psm,
- uint reg_offset, u32 value)
+static inline void lpm_write_32_psm(struct gxp_dev *gxp, enum gxp_lpm_psm psm,
+ enum psm_reg_offset reg_offset, u32 value)
{
- uint offset =
- GXP_LPM_PSM_0_BASE + (GXP_LPM_PSM_SIZE * psm) + reg_offset;
+ u32 offset = get_reg_offset(gxp, reg_offset, psm);
- gxp_write_32(gxp, offset, value);
+ lpm_write_32(gxp, offset, value);
}
#endif /* __GXP_LPM_H__ */
diff --git a/gxp-mailbox-driver.c b/gxp-mailbox-driver.c
new file mode 100644
index 0000000..40fdba1
--- /dev/null
+++ b/gxp-mailbox-driver.c
@@ -0,0 +1,511 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * GXP hardware-based mailbox driver implementation.
+ *
+ * Copyright (C) 2021 Google LLC
+ */
+
+#include <asm/barrier.h>
+#include <linux/bitops.h>
+#include <linux/interrupt.h>
+#include <linux/kthread.h>
+#include <linux/of_irq.h>
+#include <linux/spinlock.h>
+
+#include "gxp-config.h" /* GXP_USE_LEGACY_MAILBOX */
+#include "gxp-mailbox-driver.h"
+#include "gxp-mailbox-regs.h"
+#include "gxp-mailbox.h"
+
+static u32 data_read(struct gxp_mailbox *mailbox, uint reg_offset)
+{
+ return readl(mailbox->data_reg_base + reg_offset);
+}
+
+static void data_write(struct gxp_mailbox *mailbox, uint reg_offset, u32 value)
+{
+ writel(value, mailbox->data_reg_base + reg_offset);
+}
+
+/* IRQ Handling */
+
+/* Interrupt to signal a response from the device to host */
+#define MBOX_DEVICE_TO_HOST_RESPONSE_IRQ_MASK BIT(0)
+
+static irqreturn_t mailbox_irq_handler(int irq, void *arg)
+{
+ u32 masked_status;
+ struct gxp_mailbox *mailbox = (struct gxp_mailbox *)arg;
+ struct work_struct **handlers = mailbox->interrupt_handlers;
+ u32 next_int;
+
+ /* Contains only the non-masked, pending interrupt bits */
+ masked_status = gxp_mailbox_get_host_mask_status(mailbox);
+
+ /* Clear all pending IRQ bits */
+ gxp_mailbox_clear_host_interrupt(mailbox, masked_status);
+
+ if (masked_status & MBOX_DEVICE_TO_HOST_RESPONSE_IRQ_MASK) {
+ mailbox->handle_irq(mailbox);
+ masked_status &= ~MBOX_DEVICE_TO_HOST_RESPONSE_IRQ_MASK;
+ }
+
+ while ((next_int = ffs(masked_status))) {
+ next_int--; /* ffs returns 1-based indices */
+ masked_status &= ~BIT(next_int);
+
+ if (handlers[next_int])
+ schedule_work(handlers[next_int]);
+ else
+ pr_err_ratelimited(
+ "mailbox%d: received unknown interrupt bit 0x%X\n",
+ mailbox->core_id, next_int);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void register_irq(struct gxp_mailbox *mailbox)
+{
+ int err;
+ unsigned int virq;
+
+ virq = irq_of_parse_and_map(mailbox->gxp->dev->of_node,
+ mailbox->core_id);
+ if (!virq) {
+ pr_err("Unable to parse interrupt for core %d from the DT\n",
+ mailbox->core_id);
+ return;
+ }
+
+ err = request_irq(virq, mailbox_irq_handler, /*flags=*/0,
+ "aurora_mbx_irq", (void *)mailbox);
+ if (err) {
+ pr_err("Unable to register IRQ num=%d; error=%d\n", virq, err);
+ return;
+ }
+
+ mailbox->interrupt_virq = virq;
+ pr_debug("Core %d's mailbox interrupt registered as IRQ %u.\n",
+ mailbox->core_id, virq);
+}
+
+static void unregister_irq(struct gxp_mailbox *mailbox)
+{
+ if (mailbox->interrupt_virq) {
+ pr_debug("Freeing IRQ %d\n", mailbox->interrupt_virq);
+ free_irq(mailbox->interrupt_virq, mailbox);
+ mailbox->interrupt_virq = 0;
+ }
+}
+
+/* gxp-mailbox-driver.h interface */
+
+u32 gxp_circ_queue_cnt(u32 head, u32 tail, u32 queue_size, u32 wrap_bit)
+{
+ if (CIRCULAR_QUEUE_WRAPPED(tail, wrap_bit) !=
+ CIRCULAR_QUEUE_WRAPPED(head, wrap_bit))
+ return queue_size - CIRCULAR_QUEUE_REAL_INDEX(head, wrap_bit) +
+ CIRCULAR_QUEUE_REAL_INDEX(tail, wrap_bit);
+ else
+ return tail - head;
+}
+
+u32 gxp_circ_queue_inc(u32 index, u32 inc, u32 queue_size, u32 wrap_bit)
+{
+ u32 new_index = CIRCULAR_QUEUE_REAL_INDEX(index, wrap_bit) + inc;
+
+ if (new_index >= queue_size)
+ return (index + inc - queue_size) ^ wrap_bit;
+ else
+ return index + inc;
+}
+
+void gxp_mailbox_driver_init(struct gxp_mailbox *mailbox)
+{
+ spin_lock_init(&mailbox->cmd_tail_resp_head_lock);
+ spin_lock_init(&mailbox->cmd_head_resp_tail_lock);
+}
+
+void gxp_mailbox_driver_exit(struct gxp_mailbox *mailbox)
+{
+ /* Nothing to cleanup */
+}
+
+void gxp_mailbox_driver_enable_interrupts(struct gxp_mailbox *mailbox)
+{
+ register_irq(mailbox);
+}
+
+void gxp_mailbox_driver_disable_interrupts(struct gxp_mailbox *mailbox)
+{
+ unregister_irq(mailbox);
+}
+
+void __iomem *gxp_mailbox_get_csr_base(struct gxp_dev *gxp, uint index)
+{
+ return gxp->mbx[index].vaddr;
+}
+
+void __iomem *gxp_mailbox_get_data_base(struct gxp_dev *gxp, uint index)
+{
+ return gxp->mbx[index].vaddr + MBOX_DATA_REG_BASE;
+}
+
+/* gxp-mailbox-driver.h: Data register-based calls */
+
+void gxp_mailbox_write_status(struct gxp_mailbox *mailbox, u32 status)
+{
+ data_write(mailbox, MBOX_DATA_STATUS_OFFSET, status);
+}
+
+void gxp_mailbox_write_descriptor(struct gxp_mailbox *mailbox,
+ dma_addr_t descriptor_addr)
+{
+ data_write(mailbox, MBOX_DATA_DESCRIPTOR_ADDR_OFFSET, (u32)descriptor_addr);
+}
+
+void gxp_mailbox_write_cmd_queue_tail(struct gxp_mailbox *mailbox, u16 val)
+{
+ u32 current_resp_head;
+ u32 new_cmd_tail;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mailbox->cmd_tail_resp_head_lock, flags);
+
+ current_resp_head = data_read(mailbox, MBOX_DATA_CMD_TAIL_RESP_HEAD_OFFSET) &
+ RESP_HEAD_MASK;
+ new_cmd_tail = (u32)val << CMD_TAIL_SHIFT;
+ data_write(mailbox, MBOX_DATA_CMD_TAIL_RESP_HEAD_OFFSET,
+ new_cmd_tail | current_resp_head);
+
+ spin_unlock_irqrestore(&mailbox->cmd_tail_resp_head_lock, flags);
+}
+
+void gxp_mailbox_write_resp_queue_head(struct gxp_mailbox *mailbox, u16 val)
+{
+ u32 current_cmd_tail;
+ u32 new_resp_head;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mailbox->cmd_tail_resp_head_lock, flags);
+
+ current_cmd_tail = data_read(mailbox, MBOX_DATA_CMD_TAIL_RESP_HEAD_OFFSET) &
+ CMD_TAIL_MASK;
+ new_resp_head = (u32)val << RESP_HEAD_SHIFT;
+ data_write(mailbox, MBOX_DATA_CMD_TAIL_RESP_HEAD_OFFSET,
+ current_cmd_tail | new_resp_head);
+
+ spin_unlock_irqrestore(&mailbox->cmd_tail_resp_head_lock, flags);
+}
+
+u16 gxp_mailbox_read_cmd_queue_head(struct gxp_mailbox *mailbox)
+{
+ u32 reg_val;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mailbox->cmd_head_resp_tail_lock, flags);
+
+ reg_val = data_read(mailbox, MBOX_DATA_CMD_HEAD_RESP_TAIL_OFFSET);
+
+ spin_unlock_irqrestore(&mailbox->cmd_head_resp_tail_lock, flags);
+
+ return (u16)((reg_val & CMD_HEAD_MASK) >> CMD_HEAD_SHIFT);
+}
+
+u16 gxp_mailbox_read_resp_queue_tail(struct gxp_mailbox *mailbox)
+{
+ u32 reg_val;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mailbox->cmd_head_resp_tail_lock, flags);
+
+ reg_val = data_read(mailbox, MBOX_DATA_CMD_HEAD_RESP_TAIL_OFFSET);
+
+ spin_unlock_irqrestore(&mailbox->cmd_head_resp_tail_lock, flags);
+
+ return (u16)((reg_val & RESP_TAIL_MASK) >> RESP_TAIL_SHIFT);
+}
+
+void gxp_mailbox_write_cmd_queue_head(struct gxp_mailbox *mailbox, u16 val)
+{
+ u32 current_resp_tail;
+ u32 new_cmd_head;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mailbox->cmd_head_resp_tail_lock, flags);
+
+ current_resp_tail = data_read(mailbox, MBOX_DATA_CMD_HEAD_RESP_TAIL_OFFSET) &
+ RESP_TAIL_MASK;
+ new_cmd_head = (u32)val << CMD_HEAD_SHIFT;
+ data_write(mailbox, MBOX_DATA_CMD_HEAD_RESP_TAIL_OFFSET,
+ new_cmd_head | current_resp_tail);
+
+ spin_unlock_irqrestore(&mailbox->cmd_head_resp_tail_lock, flags);
+}
+
+void gxp_mailbox_write_resp_queue_tail(struct gxp_mailbox *mailbox, u16 val)
+{
+ u32 current_cmd_head;
+ u32 new_resp_tail;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mailbox->cmd_head_resp_tail_lock, flags);
+
+ current_cmd_head = data_read(mailbox, MBOX_DATA_CMD_HEAD_RESP_TAIL_OFFSET) &
+ CMD_HEAD_MASK;
+ new_resp_tail = (u32)val << RESP_TAIL_SHIFT;
+ data_write(mailbox, MBOX_DATA_CMD_HEAD_RESP_TAIL_OFFSET,
+ current_cmd_head | new_resp_tail);
+
+ spin_unlock_irqrestore(&mailbox->cmd_head_resp_tail_lock, flags);
+}
+
+u16 gxp_mailbox_read_cmd_queue_tail(struct gxp_mailbox *mailbox)
+{
+ u32 reg_val;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mailbox->cmd_tail_resp_head_lock, flags);
+
+ reg_val = data_read(mailbox, MBOX_DATA_CMD_TAIL_RESP_HEAD_OFFSET);
+
+ spin_unlock_irqrestore(&mailbox->cmd_tail_resp_head_lock, flags);
+
+ return (u16)((reg_val & CMD_TAIL_MASK) >> CMD_TAIL_SHIFT);
+}
+
+u16 gxp_mailbox_read_resp_queue_head(struct gxp_mailbox *mailbox)
+{
+ u32 reg_val;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mailbox->cmd_tail_resp_head_lock, flags);
+
+ reg_val = data_read(mailbox, MBOX_DATA_CMD_TAIL_RESP_HEAD_OFFSET);
+
+ spin_unlock_irqrestore(&mailbox->cmd_tail_resp_head_lock, flags);
+
+ return (u16)((reg_val & RESP_HEAD_MASK) >> RESP_HEAD_SHIFT);
+}
+
+void gxp_mailbox_set_cmd_queue_tail(struct gxp_mailbox *mailbox, u32 value)
+{
+ mailbox->cmd_queue_tail = value;
+ gxp_mailbox_write_cmd_queue_tail(mailbox, value);
+}
+
+void gxp_mailbox_set_resp_queue_head(struct gxp_mailbox *mailbox, u32 value)
+{
+ mailbox->resp_queue_head = value;
+ gxp_mailbox_write_resp_queue_head(mailbox, value);
+}
+
+int gxp_mailbox_inc_cmd_queue_tail_nolock(struct gxp_mailbox *mailbox, u32 inc,
+ u32 wrap_bit)
+{
+ u32 head;
+ u32 remain_size;
+ u32 new_tail;
+
+ if (inc > mailbox->cmd_queue_size)
+ return -EINVAL;
+
+ head = gxp_mailbox_read_cmd_queue_head(mailbox);
+ remain_size = mailbox->cmd_queue_size -
+ gxp_circ_queue_cnt(head, mailbox->cmd_queue_tail,
+ mailbox->cmd_queue_size, wrap_bit);
+ /* no enough space left */
+ if (inc > remain_size)
+ return -EBUSY;
+
+ new_tail = gxp_circ_queue_inc(mailbox->cmd_queue_tail, inc,
+ mailbox->cmd_queue_size, wrap_bit);
+ gxp_mailbox_set_cmd_queue_tail(mailbox, new_tail);
+ return 0;
+}
+
+int gxp_mailbox_inc_cmd_queue_tail_locked(struct gxp_mailbox *mailbox, u32 inc,
+ u32 wrap_bit)
+{
+ lockdep_assert_held(&mailbox->cmd_queue_lock);
+ return gxp_mailbox_inc_cmd_queue_tail_nolock(mailbox, inc, wrap_bit);
+}
+
+int gxp_mailbox_inc_resp_queue_head_nolock(struct gxp_mailbox *mailbox, u32 inc,
+ u32 wrap_bit)
+{
+ u32 tail;
+ u32 size;
+ u32 new_head;
+
+ if (inc > mailbox->resp_queue_size)
+ return -EINVAL;
+
+ tail = gxp_mailbox_read_resp_queue_tail(mailbox);
+ size = gxp_circ_queue_cnt(mailbox->resp_queue_head, tail,
+ mailbox->resp_queue_size, wrap_bit);
+ if (inc > size)
+ return -EINVAL;
+ new_head = gxp_circ_queue_inc(mailbox->resp_queue_head, inc,
+ mailbox->resp_queue_size, wrap_bit);
+ gxp_mailbox_set_resp_queue_head(mailbox, new_head);
+
+ return 0;
+}
+
+int gxp_mailbox_inc_resp_queue_head_locked(struct gxp_mailbox *mailbox, u32 inc,
+ u32 wrap_bit)
+{
+ lockdep_assert_held(&mailbox->resp_queue_lock);
+ return gxp_mailbox_inc_resp_queue_head_nolock(mailbox, inc, wrap_bit);
+}
+
+#if !GXP_USE_LEGACY_MAILBOX
+u32 gxp_mailbox_gcip_ops_get_cmd_queue_head(struct gcip_mailbox *mailbox)
+{
+ struct gxp_mailbox *gxp_mbx = mailbox->data;
+
+ return gxp_mailbox_read_cmd_queue_head(gxp_mbx);
+}
+
+u32 gxp_mailbox_gcip_ops_get_cmd_queue_tail(struct gcip_mailbox *mailbox)
+{
+ struct gxp_mailbox *gxp_mbx = mailbox->data;
+
+ return gxp_mbx->cmd_queue_tail;
+}
+
+void gxp_mailbox_gcip_ops_inc_cmd_queue_tail(struct gcip_mailbox *mailbox,
+ u32 inc)
+{
+ struct gxp_mailbox *gxp_mbx = mailbox->data;
+
+ lockdep_assert_held(&gxp_mbx->cmd_queue_lock);
+ gxp_mailbox_inc_cmd_queue_tail_nolock(gxp_mbx, inc,
+ mailbox->queue_wrap_bit);
+}
+
+int gxp_mailbox_gcip_ops_acquire_cmd_queue_lock(struct gcip_mailbox *mailbox,
+ bool try)
+{
+ struct gxp_mailbox *gxp_mbx = mailbox->data;
+
+ mutex_lock(&gxp_mbx->cmd_queue_lock);
+ return 1;
+}
+
+void gxp_mailbox_gcip_ops_release_cmd_queue_lock(struct gcip_mailbox *mailbox)
+{
+ struct gxp_mailbox *gxp_mbx = mailbox->data;
+
+ mutex_unlock(&gxp_mbx->cmd_queue_lock);
+}
+
+u32 gxp_mailbox_gcip_ops_get_resp_queue_size(struct gcip_mailbox *mailbox)
+{
+ struct gxp_mailbox *gxp_mbx = mailbox->data;
+
+ return gxp_mbx->resp_queue_size;
+}
+
+u32 gxp_mailbox_gcip_ops_get_resp_queue_head(struct gcip_mailbox *mailbox)
+{
+ struct gxp_mailbox *gxp_mbx = mailbox->data;
+
+ return gxp_mbx->resp_queue_head;
+}
+
+u32 gxp_mailbox_gcip_ops_get_resp_queue_tail(struct gcip_mailbox *mailbox)
+{
+ struct gxp_mailbox *gxp_mbx = mailbox->data;
+
+ return gxp_mailbox_read_resp_queue_tail(gxp_mbx);
+}
+
+void gxp_mailbox_gcip_ops_inc_resp_queue_head(struct gcip_mailbox *mailbox,
+ u32 inc)
+{
+ struct gxp_mailbox *gxp_mbx = mailbox->data;
+
+ lockdep_assert_held(&gxp_mbx->resp_queue_lock);
+ gxp_mailbox_inc_resp_queue_head_nolock(gxp_mbx, inc,
+ mailbox->queue_wrap_bit);
+}
+
+int gxp_mailbox_gcip_ops_acquire_resp_queue_lock(struct gcip_mailbox *mailbox,
+ bool try)
+{
+ struct gxp_mailbox *gxp_mbx = mailbox->data;
+
+ mutex_lock(&gxp_mbx->resp_queue_lock);
+ return 1;
+}
+
+void gxp_mailbox_gcip_ops_release_resp_queue_lock(struct gcip_mailbox *mailbox)
+{
+ struct gxp_mailbox *gxp_mbx = mailbox->data;
+
+ mutex_unlock(&gxp_mbx->resp_queue_lock);
+}
+
+void gxp_mailbox_gcip_ops_acquire_wait_list_lock(struct gcip_mailbox *mailbox,
+ bool irqsave,
+ unsigned long *flags)
+{
+ struct gxp_mailbox *gxp_mbx = mailbox->data;
+
+ mutex_lock(&gxp_mbx->wait_list_lock);
+}
+
+void gxp_mailbox_gcip_ops_release_wait_list_lock(struct gcip_mailbox *mailbox,
+ bool irqrestore,
+ unsigned long flags)
+{
+ struct gxp_mailbox *gxp_mbx = mailbox->data;
+
+ mutex_unlock(&gxp_mbx->wait_list_lock);
+}
+
+int gxp_mailbox_gcip_ops_wait_for_cmd_queue_not_full(
+ struct gcip_mailbox *mailbox)
+{
+ struct gxp_mailbox *gxp_mbx = mailbox->data;
+ u32 tail = gxp_mbx->cmd_queue_tail;
+
+ /*
+ * If the cmd queue is full, it's up to the caller to retry.
+ */
+ if (gxp_mailbox_read_cmd_queue_head(gxp_mbx) ==
+ (tail ^ mailbox->queue_wrap_bit)) {
+ return -EAGAIN;
+ }
+
+ return 0;
+}
+
+int gxp_mailbox_gcip_ops_after_enqueue_cmd(struct gcip_mailbox *mailbox,
+ void *cmd)
+{
+ struct gxp_mailbox *gxp_mbx = mailbox->data;
+
+ /* triggers doorbell */
+ gxp_mailbox_generate_device_interrupt(gxp_mbx, BIT(0));
+ return 1;
+}
+
+void gxp_mailbox_gcip_ops_after_fetch_resps(struct gcip_mailbox *mailbox,
+ u32 num_resps)
+{
+ struct gxp_mailbox *gxp_mbx = mailbox->data;
+ u32 size = gxp_mbx->resp_queue_size;
+
+ /*
+ * Now that the response queue has been drained, send an interrupt
+ * to the device in case firmware was waiting for us to consume
+ * responses.
+ */
+ if (num_resps == size)
+ gxp_mailbox_generate_device_interrupt(gxp_mbx, BIT(0));
+}
+#endif /* !GXP_USE_LEGACY_MAILBOX */
diff --git a/gxp-mailbox-driver.h b/gxp-mailbox-driver.h
index 9271694..30292d2 100644
--- a/gxp-mailbox-driver.h
+++ b/gxp-mailbox-driver.h
@@ -2,13 +2,34 @@
/*
* GXP mailbox driver.
*
- * Copyright (C) 2020 Google LLC
+ * Copyright (C) 2020-2022 Google LLC
*/
#ifndef __GXP_MAILBOX_DRIVER_H__
#define __GXP_MAILBOX_DRIVER_H__
+#include "gxp-config.h"
#include "gxp-mailbox.h"
+#if !GXP_USE_LEGACY_MAILBOX
+#include <gcip/gcip-mailbox.h>
+#endif
+
+/* Utilities of circular queue operations */
+
+#define CIRCULAR_QUEUE_INDEX_MASK(wrap_bit) (wrap_bit - 1)
+#define CIRCULAR_QUEUE_WRAPPED(idx, wrap_bit) ((idx)&wrap_bit)
+#define CIRCULAR_QUEUE_REAL_INDEX(idx, wrap_bit) \
+ ((idx)&CIRCULAR_QUEUE_INDEX_MASK(wrap_bit))
+
+/*
+ * Returns the number of elements in a circular queue given its @head, @tail,
+ * and @queue_size.
+ */
+u32 gxp_circ_queue_cnt(u32 head, u32 tail, u32 queue_size, u32 wrap_bit);
+
+/* Increases @index of a circular queue by @inc. */
+u32 gxp_circ_queue_inc(u32 index, u32 inc, u32 queue_size, u32 wrap_bit);
+
void gxp_mailbox_driver_init(struct gxp_mailbox *mailbox);
void gxp_mailbox_driver_exit(struct gxp_mailbox *mailbox);
@@ -48,4 +69,105 @@ void gxp_mailbox_write_resp_queue_tail(struct gxp_mailbox *mailbox, u16 val);
u16 gxp_mailbox_read_cmd_queue_tail(struct gxp_mailbox *mailbox);
u16 gxp_mailbox_read_resp_queue_head(struct gxp_mailbox *mailbox);
+/* Sets mailbox->cmd_queue_tail and corresponding CSR on device. */
+void gxp_mailbox_set_cmd_queue_tail(struct gxp_mailbox *mailbox, u32 value);
+
+/* Sets mailbox->resp_queue_head and corresponding CSR on device. */
+void gxp_mailbox_set_resp_queue_head(struct gxp_mailbox *mailbox, u32 value);
+
+/*
+ * Increases the command queue tail by @inc.
+ *
+ * The queue uses the mirrored circular buffer arrangement. Each index (head and
+ * tail) has a wrap bit, represented by the constant CIRCULAR_QUEUE_WRAP_BIT.
+ * Whenever an index is increased and will exceed the end of the queue, the wrap
+ * bit is xor-ed.
+ *
+ * This method will update both mailbox->cmd_queue_tail and CSR on device.
+ *
+ * Returns 0 on success.
+ * If command queue tail will exceed command queue head after adding @inc,
+ * -EBUSY is returned and all fields remain unchanged. The caller should
+ * handle this case and implement a mechanism to wait until the consumer
+ * consumes commands.
+ *
+ * This doesn't acquire any locks internally. The caller may have to hold its own
+ * lock before calling this function. If the caller must hold `@mailbox->cmd_queue_lock`
+ * before calling this, please use `gxp_mailbox_inc_cmd_queue_tail_locked` function instead.
+ */
+int gxp_mailbox_inc_cmd_queue_tail_nolock(struct gxp_mailbox *mailbox, u32 inc,
+ u32 wrap_bit);
+
+/*
+ * Wrapper function of `gxp_mailbox_inc_cmd_queue_tail_nolock`.
+ * Caller must hold @mailbox->cmd_queue_lock.
+ */
+int gxp_mailbox_inc_cmd_queue_tail_locked(struct gxp_mailbox *mailbox, u32 inc,
+ u32 wrap_bit);
+
+/*
+ * Increases the response queue head by @inc.
+ *
+ * The queue uses the mirrored circular buffer arrangement. Each index (head and
+ * tail) has a wrap bit, represented by the constant CIRCULAR_QUEUE_WRAP_BIT.
+ * Whenever an index is increased and will exceed the end of the queue, the wrap
+ * bit is xor-ed.
+ *
+ * This method will update both mailbox->resp_queue_head and CSR on device.
+ *
+ * Returns 0 on success.
+ * -EINVAL is returned if the queue head will exceed tail of queue, and no
+ * fields or CSR is updated in this case.
+ *
+ * This doesn't acquire any locks internally. The caller may have to hold its own
+ * lock before calling this function. If the caller must hold `@mailbox->cmd_queue_lock`
+ * before calling this, please use `gxp_mailbox_inc_cmd_queue_tail_locked` function instead.
+ */
+int gxp_mailbox_inc_resp_queue_head_nolock(struct gxp_mailbox *mailbox, u32 inc,
+ u32 wrap_bit);
+
+/*
+ * Wrapper function of `gxp_mailbox_inc_resp_queue_head_nolock`.
+ * Caller must hold @mailbox->resp_queue_lock.
+ */
+int gxp_mailbox_inc_resp_queue_head_locked(struct gxp_mailbox *mailbox, u32 inc,
+ u32 wrap_bit);
+
+#if !GXP_USE_LEGACY_MAILBOX
+/*
+ * Following functions are used when setting the operators of `struct gcip_mailbox_ops`.
+ * To use these functions, @mailbox->data should be set as an instance of `struct gxp_mailbox`.
+ */
+u32 gxp_mailbox_gcip_ops_get_cmd_queue_head(struct gcip_mailbox *mailbox);
+u32 gxp_mailbox_gcip_ops_get_cmd_queue_tail(struct gcip_mailbox *mailbox);
+void gxp_mailbox_gcip_ops_inc_cmd_queue_tail(struct gcip_mailbox *mailbox,
+ u32 inc);
+int gxp_mailbox_gcip_ops_acquire_cmd_queue_lock(struct gcip_mailbox *mailbox,
+ bool try);
+void gxp_mailbox_gcip_ops_release_cmd_queue_lock(struct gcip_mailbox *mailbox);
+
+u32 gxp_mailbox_gcip_ops_get_resp_queue_size(struct gcip_mailbox *mailbox);
+u32 gxp_mailbox_gcip_ops_get_resp_queue_head(struct gcip_mailbox *mailbox);
+u32 gxp_mailbox_gcip_ops_get_resp_queue_tail(struct gcip_mailbox *mailbox);
+void gxp_mailbox_gcip_ops_inc_resp_queue_head(struct gcip_mailbox *mailbox,
+ u32 inc);
+int gxp_mailbox_gcip_ops_acquire_resp_queue_lock(struct gcip_mailbox *mailbox,
+ bool try);
+void gxp_mailbox_gcip_ops_release_resp_queue_lock(struct gcip_mailbox *mailbox);
+
+void gxp_mailbox_gcip_ops_acquire_wait_list_lock(struct gcip_mailbox *mailbox,
+ bool irqsave,
+ unsigned long *flags);
+void gxp_mailbox_gcip_ops_release_wait_list_lock(struct gcip_mailbox *mailbox,
+ bool irqrestore,
+ unsigned long flags);
+
+int gxp_mailbox_gcip_ops_wait_for_cmd_queue_not_full(
+ struct gcip_mailbox *mailbox);
+int gxp_mailbox_gcip_ops_after_enqueue_cmd(struct gcip_mailbox *mailbox,
+ void *cmd);
+void gxp_mailbox_gcip_ops_after_fetch_resps(struct gcip_mailbox *mailbox,
+ u32 num_resps);
+#endif /* !GXP_USE_LEGACY_MAILBOX */
+
#endif /* __GXP_MAILBOX_DRIVER_H__ */
diff --git a/gxp-mailbox-impl.c b/gxp-mailbox-impl.c
new file mode 100644
index 0000000..6d84dbf
--- /dev/null
+++ b/gxp-mailbox-impl.c
@@ -0,0 +1,790 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Legacy implementation of the GXP mailbox interface.
+ * This file must be used only when the kernel driver has to compile the implementation of the
+ * mailbox by itself (i.e, when the target chip can't be compiled with GCIP).
+ *
+ * Copyright (C) 2022 Google LLC
+ */
+
+#include <linux/slab.h>
+
+#include "gxp-dma.h"
+#include "gxp-mailbox-driver.h"
+#include "gxp-mailbox-impl.h"
+#include "gxp-mailbox.h"
+#include "gxp.h"
+
+#define CIRCULAR_QUEUE_WRAP_BIT BIT(15)
+
+#define MBOX_CMD_QUEUE_NUM_ENTRIES 1024
+#define MBOX_RESP_QUEUE_NUM_ENTRIES 1024
+
+static int gxp_mailbox_ops_allocate_resources(struct gxp_mailbox *mailbox,
+ struct gxp_virtual_device *vd,
+ uint virt_core)
+{
+ int ret;
+
+ /* Allocate and initialize the command queue */
+ ret = gxp_dma_alloc_coherent_buf(
+ mailbox->gxp, vd->domain,
+ sizeof(struct gxp_command) * MBOX_CMD_QUEUE_NUM_ENTRIES,
+ GFP_KERNEL, 0, &mailbox->cmd_queue_buf);
+ if (ret)
+ goto err_cmd_queue;
+
+ mailbox->cmd_queue_size = MBOX_CMD_QUEUE_NUM_ENTRIES;
+ mailbox->cmd_queue_tail = 0;
+
+ /* Allocate and initialize the response queue */
+ ret = gxp_dma_alloc_coherent_buf(
+ mailbox->gxp, vd->domain,
+ sizeof(struct gxp_response) * MBOX_RESP_QUEUE_NUM_ENTRIES,
+ GFP_KERNEL, 0, &mailbox->resp_queue_buf);
+ if (ret)
+ goto err_resp_queue;
+
+ mailbox->resp_queue_size = MBOX_RESP_QUEUE_NUM_ENTRIES;
+ mailbox->resp_queue_head = 0;
+
+ /* Allocate and initialize the mailbox descriptor */
+ ret = gxp_dma_alloc_coherent_buf(mailbox->gxp, vd->domain,
+ sizeof(struct gxp_mailbox_descriptor),
+ GFP_KERNEL, 0,
+ &mailbox->descriptor_buf);
+ if (ret)
+ goto err_descriptor;
+
+ mailbox->descriptor =
+ (struct gxp_mailbox_descriptor *)mailbox->descriptor_buf.vaddr;
+ mailbox->descriptor->cmd_queue_device_addr =
+ mailbox->cmd_queue_buf.dsp_addr;
+ mailbox->descriptor->resp_queue_device_addr =
+ mailbox->resp_queue_buf.dsp_addr;
+ mailbox->descriptor->cmd_queue_size = mailbox->cmd_queue_size;
+ mailbox->descriptor->resp_queue_size = mailbox->resp_queue_size;
+
+ return 0;
+
+err_descriptor:
+ gxp_dma_free_coherent_buf(mailbox->gxp, vd->domain,
+ &mailbox->resp_queue_buf);
+err_resp_queue:
+ gxp_dma_free_coherent_buf(mailbox->gxp, vd->domain,
+ &mailbox->cmd_queue_buf);
+err_cmd_queue:
+ return ret;
+}
+
+static void gxp_mailbox_ops_release_resources(struct gxp_mailbox *mailbox,
+ struct gxp_virtual_device *vd,
+ uint virt_core)
+{
+ gxp_dma_free_coherent_buf(mailbox->gxp, vd->domain,
+ &mailbox->cmd_queue_buf);
+ gxp_dma_free_coherent_buf(mailbox->gxp, vd->domain,
+ &mailbox->resp_queue_buf);
+ gxp_dma_free_coherent_buf(mailbox->gxp, vd->domain,
+ &mailbox->descriptor_buf);
+}
+
+/*
+ * Pops the wait_list until the sequence number of @resp is found, and copies
+ * @resp to the found entry.
+ *
+ * Entries in wait_list should have sequence number in increasing order, but
+ * the responses arriving and being handled may be out-of-order.
+ *
+ * Iterate over the wait_list, comparing #cur->resp->seq with @resp->seq:
+ * 1. #cur->resp->seq > @resp->seq:
+ * - Nothing to do, either @resp is invalid or its command timed out.
+ * - We're done.
+ * 2. #cur->resp->seq == @resp->seq:
+ * - Copy @resp, pop the head.
+ * - If #cur->resp has a destination queue, push it to that queue
+ * - We're done.
+ * 3. #cur->resp->seq < @resp->seq:
+ * - @resp has arrived out of sequence order.
+ * - Leave #cur->resp in the wait_list.
+ * - Keep iterating unless the list is exhausted.
+ */
+static void gxp_mailbox_handle_response(struct gxp_mailbox *mailbox,
+ const struct gxp_response *resp)
+{
+ struct gxp_mailbox_wait_list *cur, *nxt;
+ struct gxp_async_response *async_resp;
+ unsigned long flags;
+
+ mutex_lock(&mailbox->wait_list_lock);
+
+ list_for_each_entry_safe (cur, nxt, &mailbox->wait_list, list) {
+ if (cur->resp->seq > resp->seq) {
+ /*
+ * This response has already timed out and been removed
+ * from the wait list (or this is an invalid response).
+ * Drop it.
+ */
+ break;
+ }
+ if (cur->resp->seq == resp->seq) {
+ memcpy(cur->resp, resp, sizeof(*resp));
+ list_del(&cur->list);
+ if (cur->is_async) {
+ async_resp =
+ container_of(cur->resp,
+ struct gxp_async_response,
+ resp);
+
+ cancel_delayed_work(&async_resp->timeout_work);
+ gxp_pm_update_requested_power_states(
+ async_resp->mailbox->gxp,
+ async_resp->requested_states,
+ off_states);
+
+ spin_lock_irqsave(async_resp->dest_queue_lock,
+ flags);
+
+ list_add_tail(&async_resp->list_entry,
+ async_resp->dest_queue);
+ /*
+ * Marking the dest_queue as NULL indicates the
+ * response was handled in case its timeout
+ * handler fired between acquiring the
+ * wait_list_lock and cancelling the timeout.
+ */
+ async_resp->dest_queue = NULL;
+
+ /*
+ * Don't release the dest_queue_lock until both
+ * any eventfd has been signaled and any waiting
+ * thread has been woken. Otherwise one thread
+ * might consume and free the response before
+ * this function is done with it.
+ */
+ if (async_resp->eventfd) {
+ gxp_eventfd_signal(async_resp->eventfd);
+ gxp_eventfd_put(async_resp->eventfd);
+ }
+
+ wake_up(async_resp->dest_queue_waitq);
+
+ spin_unlock_irqrestore(
+ async_resp->dest_queue_lock, flags);
+ }
+ kfree(cur);
+ break;
+ }
+ }
+
+ mutex_unlock(&mailbox->wait_list_lock);
+}
+
+/*
+ * Fetches elements in the response queue.
+ *
+ * Returns the pointer of fetched response elements.
+ * @total_ptr will be the number of elements fetched.
+ *
+ * Returns -ENOMEM if failed on memory allocation.
+ * Returns NULL if the response queue is empty.
+ */
+static struct gxp_response *
+gxp_mailbox_fetch_responses(struct gxp_mailbox *mailbox, u32 *total_ptr)
+{
+ u32 head;
+ u32 tail;
+ u32 count;
+ u32 i;
+ u32 j;
+ u32 total = 0;
+ const u32 size = mailbox->resp_queue_size;
+ const struct gxp_response *queue = mailbox->resp_queue_buf.vaddr;
+ struct gxp_response *ret = NULL;
+ struct gxp_response *prev_ptr = NULL;
+
+ mutex_lock(&mailbox->resp_queue_lock);
+
+ head = mailbox->resp_queue_head;
+ /* loop until our head equals to CSR tail */
+ while (1) {
+ tail = gxp_mailbox_read_resp_queue_tail(mailbox);
+ count = gxp_circ_queue_cnt(head, tail, size,
+ CIRCULAR_QUEUE_WRAP_BIT);
+ if (count == 0)
+ break;
+
+ prev_ptr = ret;
+ ret = krealloc(prev_ptr, (total + count) * sizeof(*queue),
+ GFP_KERNEL);
+ /*
+ * Out-of-memory, we can return the previously fetched responses
+ * if any, or ENOMEM otherwise.
+ */
+ if (!ret) {
+ if (!prev_ptr)
+ ret = ERR_PTR(-ENOMEM);
+ else
+ ret = prev_ptr;
+ break;
+ }
+ /* copy responses */
+ j = CIRCULAR_QUEUE_REAL_INDEX(head, CIRCULAR_QUEUE_WRAP_BIT);
+ for (i = 0; i < count; i++) {
+ memcpy(&ret[total], &queue[j], sizeof(*queue));
+ ret[total].status = GXP_RESP_OK;
+ j = (j + 1) % size;
+ total++;
+ }
+ head = gxp_circ_queue_inc(head, count, size,
+ CIRCULAR_QUEUE_WRAP_BIT);
+ }
+ gxp_mailbox_inc_resp_queue_head_locked(mailbox, total,
+ CIRCULAR_QUEUE_WRAP_BIT);
+
+ mutex_unlock(&mailbox->resp_queue_lock);
+ /*
+ * Now that the response queue has been drained, send an interrupt
+ * to the device in case firmware was waiting for us to consume
+ * responses.
+ */
+ if (total == size) {
+ /* TODO(b/190868834) define interrupt bits */
+ gxp_mailbox_generate_device_interrupt(mailbox, BIT(0));
+ }
+
+ *total_ptr = total;
+ return ret;
+}
+
+/* Default operators for the DSP mailbox */
+struct gxp_mailbox_ops gxp_mailbox_default_ops = {
+ .allocate_resources = gxp_mailbox_ops_allocate_resources,
+ .release_resources = gxp_mailbox_ops_release_resources,
+};
+
+/* Default arguments for the DSP mailbox */
+const struct gxp_mailbox_args gxp_mailbox_default_args = {
+ .type = GXP_MBOX_TYPE_GENERAL,
+ .ops = &gxp_mailbox_default_ops,
+ .data = NULL,
+};
+
+/*
+ * Adds @resp to @mailbox->wait_list.
+ *
+ * wait_list is a FIFO queue, with sequence number in increasing order.
+ *
+ * Returns 0 on success, or -ENOMEM if failed on allocation.
+ */
+static int gxp_mailbox_push_wait_resp(struct gxp_mailbox *mailbox,
+ struct gxp_response *resp, bool is_async)
+{
+ struct gxp_mailbox_wait_list *entry =
+ kzalloc(sizeof(*entry), GFP_KERNEL);
+
+ if (!entry)
+ return -ENOMEM;
+ entry->resp = resp;
+ entry->is_async = is_async;
+ mutex_lock(&mailbox->wait_list_lock);
+ list_add_tail(&entry->list, &mailbox->wait_list);
+ mutex_unlock(&mailbox->wait_list_lock);
+
+ return 0;
+}
+
+/*
+ * Removes the response previously pushed with gxp_mailbox_push_wait_resp().
+ *
+ * This is used when the kernel gives up waiting for the response.
+ */
+static void gxp_mailbox_del_wait_resp(struct gxp_mailbox *mailbox,
+ struct gxp_response *resp)
+{
+ struct gxp_mailbox_wait_list *cur;
+
+ mutex_lock(&mailbox->wait_list_lock);
+
+ list_for_each_entry (cur, &mailbox->wait_list, list) {
+ if (cur->resp->seq > resp->seq) {
+ /*
+ * Sequence numbers in wait_list are in increasing
+ * order. This case implies no entry in the list
+ * matches @resp's sequence number.
+ */
+ break;
+ }
+ if (cur->resp->seq == resp->seq) {
+ list_del(&cur->list);
+ kfree(cur);
+ break;
+ }
+ }
+
+ mutex_unlock(&mailbox->wait_list_lock);
+}
+
+static int gxp_mailbox_enqueue_cmd(struct gxp_mailbox *mailbox,
+ struct gxp_command *cmd,
+ struct gxp_response *resp,
+ bool resp_is_async)
+{
+ int ret;
+ u32 tail;
+ struct gxp_command *cmd_queue = mailbox->cmd_queue_buf.vaddr;
+
+ mutex_lock(&mailbox->cmd_queue_lock);
+
+ cmd->seq = mailbox->cur_seq;
+ /*
+ * The lock ensures mailbox->cmd_queue_tail cannot be changed by
+ * other processes (this method should be the only one to modify the
+ * value of tail), therefore we can remember its value here and use it
+ * in various places below.
+ */
+ tail = mailbox->cmd_queue_tail;
+
+ /*
+ * If the cmd queue is full, it's up to the caller to retry.
+ */
+ if (gxp_mailbox_read_cmd_queue_head(mailbox) ==
+ (tail ^ CIRCULAR_QUEUE_WRAP_BIT)) {
+ ret = -EAGAIN;
+ goto out;
+ }
+
+ if (resp) {
+ /*
+ * Add @resp to the wait_list only if the cmd can be pushed
+ * successfully.
+ */
+ resp->seq = cmd->seq;
+ resp->status = GXP_RESP_WAITING;
+ ret = gxp_mailbox_push_wait_resp(mailbox, resp, resp_is_async);
+ if (ret)
+ goto out;
+ }
+ /* size of cmd_queue is a multiple of sizeof(*cmd) */
+ memcpy(cmd_queue +
+ CIRCULAR_QUEUE_REAL_INDEX(tail, CIRCULAR_QUEUE_WRAP_BIT),
+ cmd, sizeof(*cmd));
+ gxp_mailbox_inc_cmd_queue_tail_locked(mailbox, 1,
+ CIRCULAR_QUEUE_WRAP_BIT);
+ /* triggers doorbell */
+ /* TODO(b/190868834) define interrupt bits */
+ gxp_mailbox_generate_device_interrupt(mailbox, BIT(0));
+ /* bumps sequence number after the command is sent */
+ mailbox->cur_seq++;
+ ret = 0;
+out:
+ mutex_unlock(&mailbox->cmd_queue_lock);
+ if (ret)
+ dev_err(mailbox->gxp->dev, "%s: ret=%d", __func__, ret);
+
+ return ret;
+}
+
+static int gxp_mailbox_execute_cmd(struct gxp_mailbox *mailbox,
+ struct gxp_command *cmd,
+ struct gxp_response *resp)
+{
+ int ret;
+
+ ret = gxp_mailbox_enqueue_cmd(mailbox, cmd, resp,
+ /* resp_is_async = */ false);
+ if (ret)
+ return ret;
+ ret = wait_event_timeout(mailbox->wait_list_waitq,
+ resp->status != GXP_RESP_WAITING,
+ msecs_to_jiffies(MAILBOX_TIMEOUT));
+ if (!ret) {
+ dev_notice(mailbox->gxp->dev, "%s: event wait timeout",
+ __func__);
+ gxp_mailbox_del_wait_resp(mailbox, resp);
+ return -ETIMEDOUT;
+ }
+ if (resp->status != GXP_RESP_OK) {
+ dev_notice(mailbox->gxp->dev, "%s: resp status=%u", __func__,
+ resp->status);
+ return -ENOMSG;
+ }
+
+ return resp->retval;
+}
+
+static void async_cmd_timeout_work(struct work_struct *work)
+{
+ struct gxp_async_response *async_resp = container_of(
+ work, struct gxp_async_response, timeout_work.work);
+ unsigned long flags;
+
+ /*
+ * This function will acquire the mailbox wait_list_lock. This means if
+ * response processing is in progress, it will complete before this
+ * response can be removed from the wait list.
+ *
+ * Once this function has the wait_list_lock, no future response
+ * processing will begin until this response has been removed.
+ */
+ gxp_mailbox_del_wait_resp(async_resp->mailbox, &async_resp->resp);
+
+ /*
+ * Check if this response still has a valid destination queue, in case
+ * an in-progress call to `gxp_mailbox_handle_response()` completed
+ * the response while `gxp_mailbox_del_wait_resp()` was waiting for
+ * the wait_list_lock.
+ */
+ spin_lock_irqsave(async_resp->dest_queue_lock, flags);
+ if (async_resp->dest_queue) {
+ async_resp->resp.status = GXP_RESP_CANCELLED;
+ list_add_tail(&async_resp->list_entry, async_resp->dest_queue);
+ spin_unlock_irqrestore(async_resp->dest_queue_lock, flags);
+
+ gxp_pm_update_requested_power_states(
+ async_resp->mailbox->gxp, async_resp->requested_states,
+ off_states);
+
+ if (async_resp->eventfd) {
+ gxp_eventfd_signal(async_resp->eventfd);
+ gxp_eventfd_put(async_resp->eventfd);
+ }
+
+ wake_up(async_resp->dest_queue_waitq);
+ } else {
+ spin_unlock_irqrestore(async_resp->dest_queue_lock, flags);
+ }
+}
+
+static int gxp_mailbox_execute_cmd_async(struct gxp_mailbox *mailbox,
+ struct gxp_command *cmd,
+ struct list_head *resp_queue,
+ spinlock_t *queue_lock,
+ wait_queue_head_t *queue_waitq,
+ struct gxp_power_states power_states,
+ struct gxp_eventfd *eventfd)
+{
+ struct gxp_async_response *async_resp;
+ int ret;
+
+ async_resp = kzalloc(sizeof(*async_resp), GFP_KERNEL);
+ if (!async_resp)
+ return -ENOMEM;
+
+ async_resp->mailbox = mailbox;
+ async_resp->dest_queue = resp_queue;
+ async_resp->dest_queue_lock = queue_lock;
+ async_resp->dest_queue_waitq = queue_waitq;
+ async_resp->requested_states = power_states;
+ if (eventfd && gxp_eventfd_get(eventfd))
+ async_resp->eventfd = eventfd;
+ else
+ async_resp->eventfd = NULL;
+
+ INIT_DELAYED_WORK(&async_resp->timeout_work, async_cmd_timeout_work);
+ schedule_delayed_work(&async_resp->timeout_work,
+ msecs_to_jiffies(MAILBOX_TIMEOUT));
+
+ gxp_pm_update_requested_power_states(mailbox->gxp, off_states,
+ power_states);
+ ret = gxp_mailbox_enqueue_cmd(mailbox, cmd, &async_resp->resp,
+ /* resp_is_async = */ true);
+ if (ret)
+ goto err_free_resp;
+
+ return 0;
+
+err_free_resp:
+ gxp_pm_update_requested_power_states(mailbox->gxp, power_states,
+ off_states);
+ cancel_delayed_work_sync(&async_resp->timeout_work);
+ kfree(async_resp);
+ return ret;
+}
+
+static struct gxp_mailbox *
+gxp_mailbox_manager_allocate_mailbox(struct gxp_mailbox_manager *mgr,
+ struct gxp_virtual_device *vd,
+ uint virt_core, u8 core_id)
+{
+ struct gxp_mailbox *mailbox = gxp_mailbox_alloc(
+ mgr, vd, virt_core, core_id, &gxp_mailbox_default_args);
+
+ if (!IS_ERR(mailbox))
+ gxp_mailbox_generate_device_interrupt(mailbox, BIT(0));
+ return mailbox;
+}
+
+static int gxp_mailbox_manager_execute_cmd(
+ struct gxp_client *client, struct gxp_mailbox *mailbox, int virt_core,
+ u16 cmd_code, u8 cmd_priority, u64 cmd_daddr, u32 cmd_size,
+ u32 cmd_flags, u8 num_cores, struct gxp_power_states power_states,
+ u64 *resp_seq, u16 *resp_status)
+{
+ struct gxp_dev *gxp = client->gxp;
+ struct gxp_command cmd;
+ struct gxp_response resp;
+ struct buffer_descriptor buffer;
+ int ret;
+
+ /* Pack the command structure */
+ buffer.address = cmd_daddr;
+ buffer.size = cmd_size;
+ buffer.flags = cmd_flags;
+ /* cmd.seq is assigned by mailbox implementation */
+ cmd.code = cmd_code; /* All IOCTL commands are dispatch */
+ cmd.priority = cmd_priority; /* currently unused */
+ cmd.buffer_descriptor = buffer;
+
+ down_read(&gxp->vd_semaphore);
+ ret = gxp_mailbox_execute_cmd(mailbox, &cmd, &resp);
+ up_read(&gxp->vd_semaphore);
+
+ /* resp.seq and resp.status can be updated even though it failed to process the command */
+ if (resp_seq)
+ *resp_seq = resp.seq;
+ if (resp_status)
+ *resp_status = resp.status;
+
+ return ret;
+}
+
+static int gxp_mailbox_manager_execute_cmd_async(
+ struct gxp_client *client, struct gxp_mailbox *mailbox, int virt_core,
+ u16 cmd_code, u8 cmd_priority, u64 cmd_daddr, u32 cmd_size,
+ u32 cmd_flags, struct gxp_power_states power_states, u64 *cmd_seq)
+{
+ struct gxp_command cmd;
+ struct buffer_descriptor buffer;
+ struct mailbox_resp_queue *resp_queue =
+ &client->vd->mailbox_resp_queues[virt_core];
+ struct gxp_eventfd *eventfd = client->mb_eventfds[virt_core];
+ int ret;
+
+ /* Pack the command structure */
+ buffer.address = cmd_daddr;
+ buffer.size = cmd_size;
+ buffer.flags = cmd_flags;
+ /* cmd.seq is assigned by mailbox implementation */
+ cmd.code = cmd_code; /* All IOCTL commands are dispatch */
+ cmd.priority = cmd_priority; /* currently unused */
+ cmd.buffer_descriptor = buffer;
+
+ ret = gxp_mailbox_execute_cmd_async(
+ mailbox, &cmd, &resp_queue->dest_queue, &resp_queue->lock,
+ &resp_queue->waitq, power_states, eventfd);
+
+ if (cmd_seq)
+ *cmd_seq = cmd.seq;
+
+ return ret;
+}
+
+static int gxp_mailbox_manager_wait_async_resp(struct gxp_client *client,
+ int virt_core, u64 *resp_seq,
+ u16 *resp_status,
+ u32 *resp_retval,
+ u16 *error_code)
+{
+ struct gxp_async_response *resp_ptr;
+ struct mailbox_resp_queue *resp_queue =
+ &client->vd->mailbox_resp_queues[virt_core];
+ long timeout;
+
+ spin_lock_irq(&resp_queue->lock);
+
+ /*
+ * The "exclusive" version of wait_event is used since each wake
+ * corresponds to the addition of exactly one new response to be
+ * consumed. Therefore, only one waiting response ioctl can ever
+ * proceed per wake event.
+ */
+ timeout = wait_event_interruptible_lock_irq_timeout_exclusive(
+ resp_queue->waitq, !list_empty(&resp_queue->dest_queue),
+ resp_queue->lock, msecs_to_jiffies(MAILBOX_TIMEOUT));
+ if (timeout <= 0) {
+ spin_unlock_irq(&resp_queue->lock);
+ /* unusual case - this only happens when there is no command pushed */
+ return timeout ? -ETIMEDOUT : timeout;
+ }
+ resp_ptr = list_first_entry(&resp_queue->dest_queue,
+ struct gxp_async_response, list_entry);
+
+ /* Pop the front of the response list */
+ list_del(&(resp_ptr->list_entry));
+
+ spin_unlock_irq(&resp_queue->lock);
+
+ if (resp_seq)
+ *resp_seq = resp_ptr->resp.seq;
+ if (resp_status)
+ *resp_status = resp_ptr->resp.status;
+
+ switch (resp_ptr->resp.status) {
+ case GXP_RESP_OK:
+ if (error_code)
+ *error_code = GXP_RESPONSE_ERROR_NONE;
+ /* retval is only valid if status == GXP_RESP_OK */
+ if (resp_retval)
+ *resp_retval = resp_ptr->resp.retval;
+ break;
+ case GXP_RESP_CANCELLED:
+ if (error_code)
+ *error_code = GXP_RESPONSE_ERROR_TIMEOUT;
+ break;
+ default:
+ /* No other status values are valid at this point */
+ WARN(true, "Completed response had invalid status %hu",
+ resp_ptr->resp.status);
+ if (error_code)
+ *error_code = GXP_RESPONSE_ERROR_INTERNAL;
+ break;
+ }
+
+ /*
+ * We must be absolutely sure the timeout work has been cancelled
+ * and/or completed before freeing the `gxp_async_response`.
+ * There are 3 possible cases when we arrive at this point:
+ * 1) The response arrived normally and the timeout was cancelled
+ * 2) The response timedout and its timeout handler finished
+ * 3) The response handler and timeout handler raced, and the response
+ * handler "cancelled" the timeout handler while it was already in
+ * progress.
+ *
+ * This call handles case #3, and ensures any in-process timeout
+ * handler (which may reference the `gxp_async_response`) has
+ * been able to exit cleanly.
+ */
+ cancel_delayed_work_sync(&resp_ptr->timeout_work);
+ kfree(resp_ptr);
+
+ return 0;
+}
+
+static void gxp_mailbox_manager_release_unconsumed_async_resps(
+ struct gxp_virtual_device *vd)
+{
+ struct gxp_async_response *cur, *nxt;
+ int i;
+ unsigned long flags;
+
+ /* Cleanup any unconsumed responses */
+ for (i = 0; i < vd->num_cores; i++) {
+ /*
+ * Since VD is releasing, it is not necessary to lock here.
+ * Do it anyway for consistency.
+ */
+ spin_lock_irqsave(&vd->mailbox_resp_queues[i].lock, flags);
+ list_for_each_entry_safe (
+ cur, nxt, &vd->mailbox_resp_queues[i].dest_queue,
+ list_entry) {
+ list_del(&cur->list_entry);
+ kfree(cur);
+ }
+ spin_unlock_irqrestore(&vd->mailbox_resp_queues[i].lock, flags);
+ }
+}
+
+static void gxp_mailbox_manager_set_ops(struct gxp_mailbox_manager *mgr)
+{
+ mgr->allocate_mailbox = gxp_mailbox_manager_allocate_mailbox;
+ mgr->release_mailbox = gxp_mailbox_release;
+ mgr->reset_mailbox = gxp_mailbox_reset;
+ mgr->execute_cmd = gxp_mailbox_manager_execute_cmd;
+ mgr->execute_cmd_async = gxp_mailbox_manager_execute_cmd_async;
+ mgr->wait_async_resp = gxp_mailbox_manager_wait_async_resp;
+ mgr->release_unconsumed_async_resps =
+ gxp_mailbox_manager_release_unconsumed_async_resps;
+}
+
+void gxp_mailbox_init(struct gxp_mailbox_manager *mgr)
+{
+ gxp_mailbox_manager_set_ops(mgr);
+}
+
+int gxp_mailbox_init_consume_responses(struct gxp_mailbox *mailbox)
+{
+ mailbox->cur_seq = 0;
+ init_waitqueue_head(&mailbox->wait_list_waitq);
+ INIT_LIST_HEAD(&mailbox->wait_list);
+
+ return 0;
+}
+
+void gxp_mailbox_release_consume_responses(struct gxp_mailbox *mailbox)
+{
+ struct gxp_mailbox_wait_list *cur, *nxt;
+ struct gxp_async_response *async_resp;
+ struct list_head resps_to_flush;
+ unsigned long flags;
+
+ /*
+ * At this point only async responses should be pending. Flush them all
+ * from the `wait_list` at once so any remaining timeout workers
+ * waiting on `wait_list_lock` will know their responses have been
+ * handled already.
+ */
+ INIT_LIST_HEAD(&resps_to_flush);
+ mutex_lock(&mailbox->wait_list_lock);
+ list_for_each_entry_safe (cur, nxt, &mailbox->wait_list, list) {
+ list_del(&cur->list);
+ if (cur->is_async) {
+ list_add_tail(&cur->list, &resps_to_flush);
+ /*
+ * Clear the response's destination queue so that if the
+ * timeout worker is running, it won't try to process
+ * this response after `wait_list_lock` is released.
+ */
+ async_resp = container_of(
+ cur->resp, struct gxp_async_response, resp);
+ spin_lock_irqsave(async_resp->dest_queue_lock, flags);
+ async_resp->dest_queue = NULL;
+ spin_unlock_irqrestore(async_resp->dest_queue_lock,
+ flags);
+
+ } else {
+ dev_warn(
+ mailbox->gxp->dev,
+ "Unexpected synchronous command pending on mailbox release\n");
+ kfree(cur);
+ }
+ }
+ mutex_unlock(&mailbox->wait_list_lock);
+
+ /*
+ * Cancel the timeout timer of and free any responses that were still in
+ * the `wait_list` above.
+ */
+ list_for_each_entry_safe (cur, nxt, &resps_to_flush, list) {
+ list_del(&cur->list);
+ async_resp = container_of(cur->resp, struct gxp_async_response,
+ resp);
+ cancel_delayed_work_sync(&async_resp->timeout_work);
+ kfree(async_resp);
+ kfree(cur);
+ }
+}
+
+void gxp_mailbox_consume_responses(struct gxp_mailbox *mailbox)
+{
+ struct gxp_response *responses;
+ u32 i;
+ u32 count = 0;
+
+ /* fetch responses and bump RESP_QUEUE_HEAD */
+ responses = gxp_mailbox_fetch_responses(mailbox, &count);
+ if (IS_ERR(responses)) {
+ dev_err(mailbox->gxp->dev,
+ "GXP Mailbox failed on fetching responses: %ld",
+ PTR_ERR(responses));
+ return;
+ }
+
+ for (i = 0; i < count; i++)
+ gxp_mailbox_handle_response(mailbox, &responses[i]);
+ /*
+ * Responses handled, wake up threads that are waiting for a response.
+ */
+ wake_up(&mailbox->wait_list_waitq);
+ kfree(responses);
+}
diff --git a/gxp-mailbox-impl.h b/gxp-mailbox-impl.h
new file mode 100644
index 0000000..9a78e65
--- /dev/null
+++ b/gxp-mailbox-impl.h
@@ -0,0 +1,142 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Legacy implementation of the GXP mailbox interface.
+ * This file must be used only when the kernel driver has to compile the implementation of the
+ * mailbox by itself (i.e, when the target chip can't be compiled with GCIP).
+ *
+ * Copyright (C) 2022 Google LLC
+ */
+
+#ifndef __GXP_MAILBOX_IMPL_H__
+#define __GXP_MAILBOX_IMPL_H__
+
+#include <linux/spinlock_types.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
+
+#include "gxp-eventfd.h"
+#include "gxp-pm.h"
+
+/*
+ * Basic Buffer descriptor struct for message payloads.
+ */
+struct buffer_descriptor {
+ /* Address in the device's virtual address space. */
+ u64 address;
+ /* Size in bytes. */
+ u32 size;
+ /* Flags can be used to indicate message type, etc. */
+ u32 flags;
+};
+
+/*
+ * Format used for mailbox command queues.
+ */
+struct gxp_command {
+ /* Sequence number. Should match the corresponding response. */
+ u64 seq;
+ /*
+ * Identifies the type of command.
+ * Should be a value from `gxp_mailbox_command_code`
+ */
+ u16 code;
+ /*
+ * Priority level from 0 to 99, with 0 being the highest. Pending
+ * commands with higher priorities will be executed before lower
+ * priority ones.
+ */
+ u8 priority;
+ /*
+ * Insert spaces to make padding explicit. This does not affect
+ * alignment.
+ */
+ u8 reserved[5];
+ /* Struct describing the buffer containing the message payload */
+ struct buffer_descriptor buffer_descriptor;
+};
+
+/*
+ * Format used for mailbox response queues from kernel.
+ */
+struct gxp_response {
+ /* Sequence number. Should match the corresponding command. */
+ u64 seq;
+ /* The status code. Either SUCCESS or an error. */
+ u16 status;
+ /* Padding. */
+ u16 reserved;
+ /* Return value, dependent on the command this responds to. */
+ u32 retval;
+};
+
+/*
+ * Wrapper struct for responses consumed by a thread other than the one which
+ * sent the command.
+ */
+struct gxp_async_response {
+ struct list_head list_entry;
+ struct gxp_response resp;
+ struct delayed_work timeout_work;
+ /*
+ * If this response times out, this pointer to the owning mailbox is
+ * needed to delete this response from the list of pending responses.
+ */
+ struct gxp_mailbox *mailbox;
+ /* Queue to add the response to once it is complete or timed out */
+ struct list_head *dest_queue;
+ /*
+ * The lock that protects queue pointed to by `dest_queue`.
+ * The mailbox code also uses this lock to protect changes to the
+ * `dest_queue` pointer itself when processing this response.
+ */
+ spinlock_t *dest_queue_lock;
+ /* Queue of clients to notify when this response is processed */
+ wait_queue_head_t *dest_queue_waitq;
+ /* Specified power states vote during the command execution */
+ struct gxp_power_states requested_states;
+ /* gxp_eventfd to signal when the response completes. May be NULL */
+ struct gxp_eventfd *eventfd;
+};
+
+struct gxp_mailbox_wait_list {
+ struct list_head list;
+ struct gxp_response *resp;
+ bool is_async;
+};
+
+struct gxp_mailbox;
+struct gxp_mailbox_args;
+struct gxp_mailbox_manager;
+
+extern const struct gxp_mailbox_args gxp_mailbox_default_args;
+
+/* Initializes operators of @mgr to work with the legacy implementation of mailbox. */
+void gxp_mailbox_init(struct gxp_mailbox_manager *mgr);
+
+/*
+ * Following functions will be called by the `gxp-mailbox.c` according to its internal logic.
+ * You may not call these functions directly.
+ */
+
+/*
+ * Initializes the mailbox to be able to wait and consume responses.
+ * This function will be called when the `gxp_mailbox_alloc` function is called.
+ */
+int gxp_mailbox_init_consume_responses(struct gxp_mailbox *mailbox);
+
+/*
+ * Flushes all pending responses in the mailbox.
+ * This function will be called when the `gxp_mailbox_release` function is called.
+ */
+void gxp_mailbox_release_consume_responses(struct gxp_mailbox *mailbox);
+
+/*
+ * Fetches and handles responses, then wakes up threads that are waiting for a response.
+ * This function will be called by a worker which is scheduled in the IRQ handler. (See the
+ * `gxp_mailbox_consume_responses_work` function) To prevent use-after-free or race-condition
+ * bugs, gxp_mailbox_release() must be called before free the mailbox.
+ */
+void gxp_mailbox_consume_responses(struct gxp_mailbox *mailbox);
+
+#endif /* __GXP_MAILBOX_IMPL_H__ */
diff --git a/gxp-mailbox-manager.c b/gxp-mailbox-manager.c
new file mode 100644
index 0000000..1085a51
--- /dev/null
+++ b/gxp-mailbox-manager.c
@@ -0,0 +1,32 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Mailbox manager abstracts the mailbox interfaces of user commands.
+ *
+ * Copyright (C) 2022 Google LLC
+ */
+
+#include "gxp-mailbox-driver.h"
+#include "gxp-mailbox-manager.h"
+#include "gxp-mailbox.h"
+
+struct gxp_mailbox_manager *gxp_mailbox_create_manager(struct gxp_dev *gxp,
+ uint num_cores)
+{
+ struct gxp_mailbox_manager *mgr;
+
+ mgr = devm_kzalloc(gxp->dev, sizeof(*mgr), GFP_KERNEL);
+ if (!mgr)
+ return ERR_PTR(-ENOMEM);
+
+ mgr->gxp = gxp;
+ mgr->num_cores = num_cores;
+ mgr->get_mailbox_csr_base = gxp_mailbox_get_csr_base;
+ mgr->get_mailbox_data_base = gxp_mailbox_get_data_base;
+
+ mgr->mailboxes = devm_kcalloc(gxp->dev, mgr->num_cores,
+ sizeof(*mgr->mailboxes), GFP_KERNEL);
+ if (!mgr->mailboxes)
+ return ERR_PTR(-ENOMEM);
+
+ return mgr;
+}
diff --git a/gxp-mailbox-manager.h b/gxp-mailbox-manager.h
new file mode 100644
index 0000000..24cd16b
--- /dev/null
+++ b/gxp-mailbox-manager.h
@@ -0,0 +1,137 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Mailbox manager abstracts the mailbox interfaces of user commands.
+ *
+ * Copyright (C) 2022 Google LLC
+ */
+
+#ifndef __GXP_MAILBOX_MANAGER_H__
+#define __GXP_MAILBOX_MANAGER_H__
+
+#include "gxp-internal.h"
+
+struct gxp_mailbox;
+
+typedef void __iomem *(*get_mailbox_base_t)(struct gxp_dev *gxp, uint index);
+
+/*
+ * Following callbacks will be used for manipulating the mailbox to communicating with the
+ * firmware. By using this callbacks instead of calling the functions of each interface directly,
+ * we can abstract the mailbox and reduce effort of updating the codes outside of the mailbox when
+ * we refactor the mailbox in the future.
+ */
+
+/*
+ * Called when allocates a mailbox. The mailbox will be release by the `release_mailbox_t`.
+ *
+ * Return a pointer of allocated mailbox or an error pointer if error occurred.
+ *
+ * This callback is required if the device is in direct mode, otherwise it is optional.
+ */
+typedef struct gxp_mailbox *(*allocate_mailbox_t)(
+ struct gxp_mailbox_manager *mgr, struct gxp_virtual_device *vd,
+ uint virt_core, u8 core_id);
+
+/*
+ * Called to release @mailbox previously allocated by `allocate_mailbox_t`.
+ *
+ * This callback is required if the device is in direct mode, otherwise it is optional.
+ */
+typedef void (*release_mailbox_t)(struct gxp_mailbox_manager *mgr,
+ struct gxp_virtual_device *vd, uint virt_core,
+ struct gxp_mailbox *mailbox);
+
+/* Called when resets the @mailbox. */
+typedef void (*reset_mailbox_t)(struct gxp_mailbox *mailbox);
+
+/*
+ * Called when requests synchronous commands. This callback will be called from the
+ * `gxp_debugfs_mailbox` function. The response will be returned to the @resp_seq, @resp_status
+ * and `retval` of `struct gxp_response` will be returned as the return value of this function.
+ * You can pass NULL to @resp_seq and @resp_status if you don't need the result. See the
+ * `struct gxp_response` for the details.
+ *
+ * Returns the value `retval` of `struct gxp_response` when the request succeeds. Otherwise,
+ * returns a negative value as an error.
+ *
+ * This callback is always required regardless of the mode of device.
+ */
+typedef int (*execute_cmd_t)(struct gxp_client *client,
+ struct gxp_mailbox *mailbox, int virt_core,
+ u16 cmd_code, u8 cmd_priority, u64 cmd_daddr,
+ u32 cmd_size, u32 cmd_flags, u8 num_cores,
+ struct gxp_power_states power_states,
+ u64 *resp_seq, u16 *resp_status);
+
+/*
+ * Called when requests asynchronous commands. This callback will be called when
+ * `GXP_MAILBOX_COMMAND_COMPAT` or `GXP_MAILBOX_COMMAND` ioctls are fired. The sequence number of
+ * the command will be returned to the @cmd_seq. @eventfd will be signalled when the response
+ * arrives.
+ *
+ * Returns a non-zero value when error occurs while putting the command to the cmd_queue of
+ * mailbox.
+ *
+ * This callback is required if the device is in direct mode, otherwise it is optional.
+ */
+typedef int (*execute_cmd_async_t)(struct gxp_client *client,
+ struct gxp_mailbox *mailbox, int virt_core,
+ u16 cmd_code, u8 cmd_priority, u64 cmd_daddr,
+ u32 cmd_size, u32 cmd_flags,
+ struct gxp_power_states power_states,
+ u64 *cmd_seq);
+
+/*
+ * Called when waiting for an asynchronous response which is requested by `execute_cmd_async`.
+ * This callback will be called when `GXP_MAILBOX_RESPONSE` ioctl is fired. The response will be
+ * returned to the @resp_seq, @resp_status and @resp_retval. You can pass NULL to them if you don't
+ * need the result. See the `struct gxp_response` for the details. The corresponding error code of
+ * the response status will be set to the @error_code.
+ *
+ * Returns 0 if it succeed to get the response. Otherwise, returns a non-zero value as an error.
+ *
+ * This callback is required if the device is in direct mode, otherwise it is optional.
+ */
+typedef int (*wait_async_resp_t)(struct gxp_client *client, int virt_core,
+ u64 *resp_seq, u16 *resp_status,
+ u32 *resp_retval, u16 *error_code);
+
+/*
+ * Called when cleans up unconsumed async responses in the queue which arrived or timed out.
+ * This callback will be called when the @vd is released.
+ *
+ * This callback is always required regardless of the mode of device.
+ */
+typedef void (*release_unconsumed_async_resps_t)(struct gxp_virtual_device *vd);
+
+/*
+ * This structure manages how the mailbox works with user commands.
+ * The way how the mailbox works is dependent on the what kind of interface is used by the device.
+ * To minimize the effort of updating the codes outside of the mailbox, it abstracts the interfaces
+ * by defining the callbacks above.
+ */
+struct gxp_mailbox_manager {
+ struct gxp_dev *gxp;
+ u8 num_cores;
+ struct gxp_mailbox **mailboxes;
+ get_mailbox_base_t get_mailbox_csr_base;
+ get_mailbox_base_t get_mailbox_data_base;
+ allocate_mailbox_t allocate_mailbox;
+ release_mailbox_t release_mailbox;
+ reset_mailbox_t reset_mailbox;
+ execute_cmd_t execute_cmd;
+ execute_cmd_async_t execute_cmd_async;
+ wait_async_resp_t wait_async_resp;
+ release_unconsumed_async_resps_t release_unconsumed_async_resps;
+};
+
+/*
+ * Allocate the mailbox manager.
+ *
+ * In general, only one mailbox manager will be used by @gxp. What kind of mailbox interface will
+ * be used is decided internally.
+ */
+struct gxp_mailbox_manager *gxp_mailbox_create_manager(struct gxp_dev *gxp,
+ uint num_cores);
+
+#endif /* __GXP_MAILBOX_MANAGER_H__ */
diff --git a/gxp-mailbox-regs.h b/gxp-mailbox-regs.h
index 5d83b5e..5c518ed 100644
--- a/gxp-mailbox-regs.h
+++ b/gxp-mailbox-regs.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* GXP mailbox registers.
*
@@ -7,34 +7,9 @@
#ifndef __GXP_MAILBOX_REGS_H__
#define __GXP_MAILBOX_REGS_H__
-/* Mailbox CSRs */
-#define MBOX_MCUCTLR_OFFSET 0x0000
-
-#define MBOX_INTGR0_OFFSET 0x0020
-#define MBOX_INTCR0_OFFSET 0x0024
-#define MBOX_INTMR0_OFFSET 0x0028
-#define MBOX_INTSR0_OFFSET 0x002C
-#define MBOX_INTMSR0_OFFSET 0x0030
-
-#define MBOX_INTGR1_OFFSET 0x0040
-#define MBOX_INTCR1_OFFSET 0x0044
-#define MBOX_INTMR1_OFFSET 0x0048
-#define MBOX_INTSR1_OFFSET 0x004C
-#define MBOX_INTMSR1_OFFSET 0x0050
-
-/* Mailbox Shared Data Registers */
-#define MBOX_DATA_REG_BASE 0x0080
-
-#define MBOX_STATUS_OFFSET 0x00
-#define MBOX_DESCRIPTOR_ADDR_OFFSET 0x04
-#define MBOX_CMD_TAIL_RESP_HEAD_OFFSET 0x08
-#define MBOX_CMD_HEAD_RESP_TAIL_OFFSET 0x0C
-
-#define MBOX_REGS_SIZE 0x180
-
/*
* Macros for separating out the command queue tail and response queue head in
- * the `MBOX_CMD_TAIL_RESP_HEAD_OFFSET` register.
+ * the `MBOX_DATA_CMD_TAIL_RESP_HEAD_OFFSET` register.
*/
#define CMD_TAIL_SHIFT 16
#define RESP_HEAD_SHIFT 0
@@ -43,7 +18,7 @@
/*
* Macros for separating out the command queue head and response queue tail in
- * the `MBOX_CMD_HEAD_RESP_TAIL_OFFSET` register.
+ * the `MBOX_DATA_CMD_HEAD_RESP_TAIL_OFFSET` register.
*/
#define CMD_HEAD_SHIFT 16
#define RESP_TAIL_SHIFT 0
diff --git a/gxp-mailbox.c b/gxp-mailbox.c
index aa28fc0..758b707 100644
--- a/gxp-mailbox.c
+++ b/gxp-mailbox.c
@@ -14,343 +14,27 @@
#include <linux/slab.h>
#include <uapi/linux/sched/types.h>
+#include "gxp-config.h" /* GXP_USE_LEGACY_MAILBOX */
#include "gxp-dma.h"
#include "gxp-internal.h"
#include "gxp-mailbox.h"
#include "gxp-mailbox-driver.h"
#include "gxp-pm.h"
+#include "gxp.h"
-/* Timeout of 1s by default */
-int gxp_mbx_timeout = 1000;
-module_param_named(mbx_timeout, gxp_mbx_timeout, int, 0660);
-
-/* Utilities of circular queue operations */
-
-#define CIRCULAR_QUEUE_WRAP_BIT BIT(15)
-#define CIRCULAR_QUEUE_INDEX_MASK (CIRCULAR_QUEUE_WRAP_BIT - 1)
-#define CIRCULAR_QUEUE_WRAPPED(idx) ((idx) & CIRCULAR_QUEUE_WRAP_BIT)
-#define CIRCULAR_QUEUE_REAL_INDEX(idx) ((idx) & CIRCULAR_QUEUE_INDEX_MASK)
-
-#define MBOX_CMD_QUEUE_NUM_ENTRIES 1024
-#define MBOX_CMD_QUEUE_SIZE \
- (sizeof(struct gxp_command) * MBOX_CMD_QUEUE_NUM_ENTRIES)
-
-#define MBOX_RESP_QUEUE_NUM_ENTRIES 1024
-#define MBOX_RESP_QUEUE_SIZE \
- (sizeof(struct gxp_response) * MBOX_RESP_QUEUE_NUM_ENTRIES)
-
-/*
- * Returns the number of elements in a circular queue given its @head, @tail,
- * and @queue_size.
- */
-static inline u32 circular_queue_count(u32 head, u32 tail, u32 queue_size)
-{
- if (CIRCULAR_QUEUE_WRAPPED(tail) != CIRCULAR_QUEUE_WRAPPED(head))
- return queue_size - CIRCULAR_QUEUE_REAL_INDEX(head) +
- CIRCULAR_QUEUE_REAL_INDEX(tail);
- else
- return tail - head;
-}
-
-/* Increases @index of a circular queue by @inc. */
-static inline u32 circular_queue_inc(u32 index, u32 inc, u32 queue_size)
-{
- u32 new_index = CIRCULAR_QUEUE_REAL_INDEX(index) + inc;
-
- if (new_index >= queue_size)
- return (index + inc - queue_size) ^ CIRCULAR_QUEUE_WRAP_BIT;
- else
- return index + inc;
-}
-
-/* Sets mailbox->cmd_queue_tail and corresponding CSR on device. */
-static void gxp_mailbox_set_cmd_queue_tail(struct gxp_mailbox *mailbox,
- u32 value)
-{
- mailbox->cmd_queue_tail = value;
- gxp_mailbox_write_cmd_queue_tail(mailbox, value);
-}
-
-/* Sets mailbox->resp_queue_head and corresponding CSR on device. */
-static void gxp_mailbox_set_resp_queue_head(struct gxp_mailbox *mailbox,
- u32 value)
-{
- mailbox->resp_queue_head = value;
- gxp_mailbox_write_resp_queue_head(mailbox, value);
-}
-
-/*
- * Increases the command queue tail by @inc.
- *
- * The queue uses the mirrored circular buffer arrangement. Each index (head and
- * tail) has a wrap bit, represented by the constant CIRCULAR_QUEUE_WRAP_BIT.
- * Whenever an index is increased and will exceed the end of the queue, the wrap
- * bit is xor-ed.
- *
- * This method will update both mailbox->cmd_queue_tail and CSR on device.
- *
- * Returns 0 on success.
- * If command queue tail will exceed command queue head after adding @inc,
- * -EBUSY is returned and all fields remain unchanged. The caller should
- * handle this case and implement a mechanism to wait until the consumer
- * consumes commands.
- *
- * Caller must hold cmd_queue_lock.
- */
-static int gxp_mailbox_inc_cmd_queue_tail(struct gxp_mailbox *mailbox, u32 inc)
-{
- u32 head;
- u32 remain_size;
- u32 new_tail;
-
- lockdep_assert_held(&mailbox->cmd_queue_lock);
-
- if (inc > mailbox->cmd_queue_size)
- return -EINVAL;
-
- head = gxp_mailbox_read_cmd_queue_head(mailbox);
- remain_size = mailbox->cmd_queue_size -
- circular_queue_count(head, mailbox->cmd_queue_tail,
- mailbox->cmd_queue_size);
- /* no enough space left */
- if (inc > remain_size)
- return -EBUSY;
-
- new_tail = circular_queue_inc(mailbox->cmd_queue_tail, inc,
- mailbox->cmd_queue_size);
- gxp_mailbox_set_cmd_queue_tail(mailbox, new_tail);
- return 0;
-}
-
-/*
- * Increases the response queue head by @inc.
- *
- * The queue uses the mirrored circular buffer arrangement. Each index (head and
- * tail) has a wrap bit, represented by the constant CIRCULAR_QUEUE_WRAP_BIT.
- * Whenever an index is increased and will exceed the end of the queue, the wrap
- * bit is xor-ed.
- *
- * This method will update both mailbox->resp_queue_head and CSR on device.
- *
- * Returns 0 on success.
- * -EINVAL is returned if the queue head will exceed tail of queue, and no
- * fields or CSR is updated in this case.
- *
- * Caller must hold resp_queue_lock.
- */
-static int gxp_mailbox_inc_resp_queue_head(struct gxp_mailbox *mailbox, u32 inc)
-{
- u32 tail;
- u32 size;
- u32 new_head;
-
- lockdep_assert_held(&mailbox->resp_queue_lock);
-
- if (inc > mailbox->resp_queue_size)
- return -EINVAL;
-
- tail = gxp_mailbox_read_resp_queue_tail(mailbox);
- size = circular_queue_count(mailbox->resp_queue_head, tail,
- mailbox->resp_queue_size);
- if (inc > size)
- return -EINVAL;
- new_head = circular_queue_inc(mailbox->resp_queue_head, inc,
- mailbox->resp_queue_size);
- gxp_mailbox_set_resp_queue_head(mailbox, new_head);
-
- return 0;
-}
-
-struct gxp_mailbox_manager *gxp_mailbox_create_manager(struct gxp_dev *gxp,
- uint num_cores)
-{
- struct gxp_mailbox_manager *mgr;
-
- mgr = devm_kzalloc(gxp->dev, sizeof(*mgr), GFP_KERNEL);
- if (!mgr)
- return ERR_PTR(-ENOMEM);
-
- mgr->gxp = gxp;
- mgr->num_cores = num_cores;
- mgr->get_mailbox_csr_base = gxp_mailbox_get_csr_base;
- mgr->get_mailbox_data_base = gxp_mailbox_get_data_base;
-
- mgr->mailboxes = devm_kcalloc(gxp->dev, mgr->num_cores,
- sizeof(*mgr->mailboxes), GFP_KERNEL);
- if (!mgr->mailboxes)
- return ERR_PTR(-ENOMEM);
-
- return mgr;
-}
-
-/*
- * Pops the wait_list until the sequence number of @resp is found, and copies
- * @resp to the found entry.
- *
- * Entries in wait_list should have sequence number in increasing order, but
- * the responses arriving and being handled may be out-of-order.
- *
- * Iterate over the wait_list, comparing #cur->resp->seq with @resp->seq:
- * 1. #cur->resp->seq > @resp->seq:
- * - Nothing to do, either @resp is invalid or its command timed out.
- * - We're done.
- * 2. #cur->resp->seq == @resp->seq:
- * - Copy @resp, pop the head.
- * - If #cur->resp has a destination queue, push it to that queue
- * - We're done.
- * 3. #cur->resp->seq < @resp->seq:
- * - @resp has arrived out of sequence order.
- * - Leave #cur->resp in the wait_list.
- * - Keep iterating unless the list is exhausted.
- */
-static void gxp_mailbox_handle_response(struct gxp_mailbox *mailbox,
- const struct gxp_response *resp)
-{
- struct gxp_mailbox_wait_list *cur, *nxt;
- struct gxp_async_response *async_resp;
- unsigned long flags;
-
- mutex_lock(&mailbox->wait_list_lock);
-
- list_for_each_entry_safe(cur, nxt, &mailbox->wait_list, list) {
- if (cur->resp->seq > resp->seq) {
- /*
- * This response has already timed out and been removed
- * from the wait list (or this is an invalid response).
- * Drop it.
- */
- break;
- }
- if (cur->resp->seq == resp->seq) {
- memcpy(cur->resp, resp, sizeof(*resp));
- list_del(&cur->list);
- if (cur->is_async) {
- async_resp =
- container_of(cur->resp,
- struct gxp_async_response,
- resp);
-
- cancel_delayed_work(&async_resp->timeout_work);
- gxp_pm_update_requested_power_states(
- async_resp->mailbox->gxp,
- async_resp->gxp_power_state,
- async_resp->requested_low_clkmux,
- AUR_OFF, false,
- async_resp->memory_power_state,
- AUR_MEM_UNDEFINED);
-
- spin_lock_irqsave(async_resp->dest_queue_lock,
- flags);
-
- list_add_tail(&async_resp->list_entry,
- async_resp->dest_queue);
- /*
- * Marking the dest_queue as NULL indicates the
- * response was handled in case its timeout
- * handler fired between acquiring the
- * wait_list_lock and cancelling the timeout.
- */
- async_resp->dest_queue = NULL;
-
- /*
- * Don't release the dest_queue_lock until both
- * any eventfd has been signaled and any waiting
- * thread has been woken. Otherwise one thread
- * might consume and free the response before
- * this function is done with it.
- */
- if (async_resp->eventfd) {
- gxp_eventfd_signal(async_resp->eventfd);
- gxp_eventfd_put(async_resp->eventfd);
- }
-
- wake_up(async_resp->dest_queue_waitq);
-
- spin_unlock_irqrestore(
- async_resp->dest_queue_lock, flags);
-
- }
- kfree(cur);
- break;
- }
- }
-
- mutex_unlock(&mailbox->wait_list_lock);
-}
+#if GXP_USE_LEGACY_MAILBOX
+#include "gxp-mailbox-impl.h"
+#else
+#include <gcip/gcip-mailbox.h>
+#include <gcip/gcip-kci.h>
-/*
- * Fetches elements in the response queue.
- *
- * Returns the pointer of fetched response elements.
- * @total_ptr will be the number of elements fetched.
- *
- * Returns -ENOMEM if failed on memory allocation.
- * Returns NULL if the response queue is empty.
- */
-static struct gxp_response *
-gxp_mailbox_fetch_responses(struct gxp_mailbox *mailbox, u32 *total_ptr)
-{
- u32 head;
- u32 tail;
- u32 count;
- u32 i;
- u32 j;
- u32 total = 0;
- const u32 size = mailbox->resp_queue_size;
- const struct gxp_response *queue = mailbox->resp_queue;
- struct gxp_response *ret = NULL;
- struct gxp_response *prev_ptr = NULL;
-
- mutex_lock(&mailbox->resp_queue_lock);
-
- head = mailbox->resp_queue_head;
- /* loop until our head equals to CSR tail */
- while (1) {
- tail = gxp_mailbox_read_resp_queue_tail(mailbox);
- count = circular_queue_count(head, tail, size);
- if (count == 0)
- break;
-
- prev_ptr = ret;
- ret = krealloc(prev_ptr, (total + count) * sizeof(*queue),
- GFP_KERNEL);
- /*
- * Out-of-memory, we can return the previously fetched responses
- * if any, or ENOMEM otherwise.
- */
- if (!ret) {
- if (!prev_ptr)
- ret = ERR_PTR(-ENOMEM);
- else
- ret = prev_ptr;
- break;
- }
- /* copy responses */
- j = CIRCULAR_QUEUE_REAL_INDEX(head);
- for (i = 0; i < count; i++) {
- memcpy(&ret[total], &queue[j], sizeof(*queue));
- ret[total].status = GXP_RESP_OK;
- j = (j + 1) % size;
- total++;
- }
- head = circular_queue_inc(head, count, size);
- }
- gxp_mailbox_inc_resp_queue_head(mailbox, total);
+#include "gxp-kci.h"
+#include "gxp-mcu-telemetry.h"
+#endif
- mutex_unlock(&mailbox->resp_queue_lock);
- /*
- * Now that the response queue has been drained, send an interrupt
- * to the device in case firmware was waiting for us to consume
- * responses.
- */
- if (total == size) {
- /* TODO(b/190868834) define interrupt bits */
- gxp_mailbox_generate_device_interrupt(mailbox, BIT(0));
- }
-
- *total_ptr = total;
- return ret;
-}
+/* Timeout of 1s by default */
+int gxp_mbx_timeout = 2000;
+module_param_named(mbx_timeout, gxp_mbx_timeout, int, 0660);
/*
* Fetches and handles responses, then wakes up threads that are waiting for a
@@ -364,26 +48,21 @@ static void gxp_mailbox_consume_responses_work(struct kthread_work *work)
{
struct gxp_mailbox *mailbox =
container_of(work, struct gxp_mailbox, response_work);
- struct gxp_response *responses;
- u32 i;
- u32 count = 0;
-
- /* fetch responses and bump RESP_QUEUE_HEAD */
- responses = gxp_mailbox_fetch_responses(mailbox, &count);
- if (IS_ERR(responses)) {
- dev_err(mailbox->gxp->dev,
- "GXP Mailbox failed on fetching responses: %ld",
- PTR_ERR(responses));
- return;
- }
- for (i = 0; i < count; i++)
- gxp_mailbox_handle_response(mailbox, &responses[i]);
- /*
- * Responses handled, wake up threads that are waiting for a response.
- */
- wake_up(&mailbox->wait_list_waitq);
- kfree(responses);
+#if GXP_USE_LEGACY_MAILBOX
+ gxp_mailbox_consume_responses(mailbox);
+#else
+ switch (mailbox->type) {
+ case GXP_MBOX_TYPE_GENERAL:
+ gcip_mailbox_consume_responses_work(mailbox->mbx_impl.gcip_mbx);
+ break;
+ case GXP_MBOX_TYPE_KCI:
+ gcip_kci_handle_irq(mailbox->mbx_impl.gcip_kci);
+ gxp_mcu_telemetry_irq_handler(
+ ((struct gxp_kci *)mailbox->data)->mcu);
+ break;
+ }
+#endif
}
/*
@@ -398,8 +77,8 @@ static inline void gxp_mailbox_handle_irq(struct gxp_mailbox *mailbox)
/* Priority level for realtime worker threads */
#define GXP_RT_THREAD_PRIORITY 2
-static struct task_struct *
-create_response_rt_thread(struct device *dev, void *data, int core_id)
+static struct task_struct *create_response_rt_thread(struct device *dev,
+ void *data, int core_id)
{
static const struct sched_param param = {
.sched_priority = GXP_RT_THREAD_PRIORITY,
@@ -420,66 +99,72 @@ create_response_rt_thread(struct device *dev, void *data, int core_id)
return task;
}
+static int gxp_mailbox_set_ops(struct gxp_mailbox *mailbox,
+ struct gxp_mailbox_ops *ops)
+{
+ if (!ops) {
+ dev_err(mailbox->gxp->dev, "Incomplete gxp_mailbox ops.\n");
+ return -EINVAL;
+ }
+
+ mailbox->ops = ops;
+
+ return 0;
+}
+
+static inline void gxp_mailbox_set_data(struct gxp_mailbox *mailbox, void *data)
+{
+ mailbox->data = data;
+}
+
static struct gxp_mailbox *create_mailbox(struct gxp_mailbox_manager *mgr,
struct gxp_virtual_device *vd,
- uint virt_core, u8 core_id)
+ uint virt_core, u8 core_id,
+ const struct gxp_mailbox_args *args)
{
struct gxp_mailbox *mailbox;
+ int ret;
+
+ if (!args) {
+ dev_err(mgr->gxp->dev, "Incomplete gxp_mailbox args.\n");
+ ret = -EINVAL;
+ goto err_args;
+ }
mailbox = kzalloc(sizeof(*mailbox), GFP_KERNEL);
- if (!mailbox)
+ if (!mailbox) {
+ ret = -ENOMEM;
goto err_mailbox;
+ }
mailbox->core_id = core_id;
mailbox->gxp = mgr->gxp;
mailbox->csr_reg_base = mgr->get_mailbox_csr_base(mgr->gxp, core_id);
mailbox->data_reg_base = mgr->get_mailbox_data_base(mgr->gxp, core_id);
+ mailbox->type = args->type;
+ mailbox->queue_wrap_bit = args->queue_wrap_bit;
+ mailbox->cmd_elem_size = args->cmd_elem_size;
+ mailbox->resp_elem_size = args->resp_elem_size;
+ mailbox->ignore_seq_order = args->ignore_seq_order;
+ gxp_mailbox_set_data(mailbox, args->data);
+
+ ret = gxp_mailbox_set_ops(mailbox, args->ops);
+ if (ret)
+ goto err_set_ops;
- /* Allocate and initialize the command queue */
- mailbox->cmd_queue = (struct gxp_command *)gxp_dma_alloc_coherent(
- mailbox->gxp, vd, BIT(virt_core),
- sizeof(struct gxp_command) * MBOX_CMD_QUEUE_NUM_ENTRIES,
- &(mailbox->cmd_queue_device_addr), GFP_KERNEL, 0);
- if (!mailbox->cmd_queue)
- goto err_cmd_queue;
+ ret = mailbox->ops->allocate_resources(mailbox, vd, virt_core);
+ if (ret)
+ goto err_allocate_resources;
- mailbox->cmd_queue_size = MBOX_CMD_QUEUE_NUM_ENTRIES;
- mailbox->cmd_queue_tail = 0;
mutex_init(&mailbox->cmd_queue_lock);
-
- /* Allocate and initialize the response queue */
- mailbox->resp_queue = (struct gxp_response *)gxp_dma_alloc_coherent(
- mailbox->gxp, vd, BIT(virt_core),
- sizeof(struct gxp_response) * MBOX_RESP_QUEUE_NUM_ENTRIES,
- &(mailbox->resp_queue_device_addr), GFP_KERNEL, 0);
- if (!mailbox->resp_queue)
- goto err_resp_queue;
-
- mailbox->resp_queue_size = MBOX_RESP_QUEUE_NUM_ENTRIES;
- mailbox->resp_queue_head = 0;
mutex_init(&mailbox->resp_queue_lock);
-
- /* Allocate and initialize the mailbox descriptor */
- mailbox->descriptor =
- (struct gxp_mailbox_descriptor *)gxp_dma_alloc_coherent(
- mailbox->gxp, vd, BIT(virt_core),
- sizeof(struct gxp_mailbox_descriptor),
- &(mailbox->descriptor_device_addr), GFP_KERNEL, 0);
- if (!mailbox->descriptor)
- goto err_descriptor;
-
- mailbox->descriptor->cmd_queue_device_addr =
- mailbox->cmd_queue_device_addr;
- mailbox->descriptor->resp_queue_device_addr =
- mailbox->resp_queue_device_addr;
- mailbox->descriptor->cmd_queue_size = mailbox->cmd_queue_size;
- mailbox->descriptor->resp_queue_size = mailbox->resp_queue_size;
-
kthread_init_worker(&mailbox->response_worker);
mailbox->response_thread = create_response_rt_thread(
mailbox->gxp->dev, &mailbox->response_worker, core_id);
- if (IS_ERR(mailbox->response_thread))
+ if (IS_ERR(mailbox->response_thread)) {
+ ret = -ENOMEM;
goto err_thread;
+ }
/* Initialize driver before interacting with its registers */
gxp_mailbox_driver_init(mailbox);
@@ -487,73 +172,236 @@ static struct gxp_mailbox *create_mailbox(struct gxp_mailbox_manager *mgr,
return mailbox;
err_thread:
- gxp_dma_free_coherent(mailbox->gxp, vd, BIT(virt_core),
- sizeof(struct gxp_mailbox_descriptor),
- mailbox->descriptor,
- mailbox->descriptor_device_addr);
-err_descriptor:
- gxp_dma_free_coherent(
- mailbox->gxp, vd, BIT(virt_core),
- sizeof(struct gxp_response) * mailbox->resp_queue_size,
- mailbox->resp_queue, mailbox->resp_queue_device_addr);
-err_resp_queue:
- gxp_dma_free_coherent(
- mailbox->gxp, vd, BIT(virt_core),
- sizeof(struct gxp_command) * mailbox->cmd_queue_size,
- mailbox->cmd_queue, mailbox->cmd_queue_device_addr);
-err_cmd_queue:
+ mailbox->ops->release_resources(mailbox, vd, virt_core);
+err_allocate_resources:
+err_set_ops:
kfree(mailbox);
err_mailbox:
- return ERR_PTR(-ENOMEM);
+err_args:
+ return ERR_PTR(ret);
+}
+
+static void release_mailbox(struct gxp_mailbox *mailbox,
+ struct gxp_virtual_device *vd, uint virt_core)
+{
+ if (IS_GXP_TEST && !mailbox)
+ return;
+ mailbox->ops->release_resources(mailbox, vd, virt_core);
+ kthread_flush_worker(&mailbox->response_worker);
+ if (mailbox->response_thread)
+ kthread_stop(mailbox->response_thread);
+ kfree(mailbox);
+}
+
+#if !GXP_USE_LEGACY_MAILBOX
+static int init_gcip_mailbox(struct gxp_mailbox *mailbox)
+{
+ const struct gcip_mailbox_args args = {
+ .dev = mailbox->gxp->dev,
+ .queue_wrap_bit = mailbox->queue_wrap_bit,
+ .cmd_queue = mailbox->cmd_queue_buf.vaddr,
+ .cmd_elem_size = mailbox->cmd_elem_size,
+ .resp_queue = mailbox->resp_queue_buf.vaddr,
+ .resp_elem_size = mailbox->resp_elem_size,
+ .timeout = MAILBOX_TIMEOUT,
+ .ops = mailbox->ops->gcip_ops.mbx,
+ .data = mailbox,
+ .ignore_seq_order = mailbox->ignore_seq_order,
+ };
+ struct gcip_mailbox *gcip_mbx;
+ int ret;
+
+ gcip_mbx = kzalloc(sizeof(*gcip_mbx), GFP_KERNEL);
+ if (!gcip_mbx)
+ return -ENOMEM;
+
+ /* Initialize gcip_mailbox */
+ ret = gcip_mailbox_init(gcip_mbx, &args);
+ if (ret) {
+ kfree(gcip_mbx);
+ return ret;
+ }
+
+ mailbox->mbx_impl.gcip_mbx = gcip_mbx;
+
+ return 0;
+}
+
+static void release_gcip_mailbox(struct gxp_mailbox *mailbox)
+{
+ struct gcip_mailbox *gcip_mbx = mailbox->mbx_impl.gcip_mbx;
+
+ if (gcip_mbx == NULL)
+ return;
+
+ gcip_mailbox_release(gcip_mbx);
+ kfree(gcip_mbx);
+ mailbox->mbx_impl.gcip_mbx = NULL;
+}
+
+static int init_gcip_kci(struct gxp_mailbox *mailbox)
+{
+ const struct gcip_kci_args args = {
+ .dev = mailbox->gxp->dev,
+ .cmd_queue = mailbox->cmd_queue_buf.vaddr,
+ .resp_queue = mailbox->resp_queue_buf.vaddr,
+ .queue_wrap_bit = mailbox->queue_wrap_bit,
+ .rkci_buffer_size = GXP_REVERSE_KCI_BUFFER_SIZE,
+ .timeout = GXP_KCI_TIMEOUT,
+ .ops = mailbox->ops->gcip_ops.kci,
+ .data = mailbox,
+ };
+ struct gcip_kci *gcip_kci;
+ int ret;
+
+ gcip_kci = kzalloc(sizeof(*gcip_kci), GFP_KERNEL);
+ if (!gcip_kci)
+ return -ENOMEM;
+
+ ret = gcip_kci_init(gcip_kci, &args);
+ if (ret) {
+ kfree(gcip_kci);
+ return ret;
+ }
+
+ mailbox->mbx_impl.gcip_kci = gcip_kci;
+
+ return 0;
+}
+
+static void release_gcip_kci(struct gxp_mailbox *mailbox)
+{
+ struct gcip_kci *gcip_kci = mailbox->mbx_impl.gcip_kci;
+
+ if (gcip_kci == NULL)
+ return;
+
+ gcip_kci_cancel_work_queues(gcip_kci);
+ gcip_kci_release(gcip_kci);
+ kfree(gcip_kci);
+ mailbox->mbx_impl.gcip_kci = NULL;
+}
+#endif /* !GXP_USE_LEGACY_MAILBOX */
+
+/*
+ * Initializes @mailbox->mbx_impl to start waiting and consuming responses.
+ * This will initializes GCIP mailbox modules according to the type of @mailbox.
+ * - GENERAL: will initialize @mailbox->mbx_impl.gcip_mbx
+ * - KCI: will initialize @mailbox->mbx_impl.kci_mbx
+ *
+ * Note: On `GXP_USE_LEGACY_MAILBOX`, it will initialize @mailbox itself as its
+ * queuing logic is implemented in `gxp-mailbox-impl.c`.
+ */
+static int init_mailbox_impl(struct gxp_mailbox *mailbox)
+{
+ int ret;
+
+#if GXP_USE_LEGACY_MAILBOX
+ if (mailbox->type != GXP_MBOX_TYPE_GENERAL)
+ return -EOPNOTSUPP;
+
+ ret = gxp_mailbox_init_consume_responses(mailbox);
+ if (ret)
+ return ret;
+#else
+ switch (mailbox->type) {
+ case GXP_MBOX_TYPE_GENERAL:
+ ret = init_gcip_mailbox(mailbox);
+ if (ret)
+ return ret;
+ break;
+ case GXP_MBOX_TYPE_KCI:
+ ret = init_gcip_kci(mailbox);
+ if (ret)
+ return ret;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+#endif /* GXP_USE_LEGACY_MAILBOX */
+
+ return 0;
}
-static void enable_mailbox(struct gxp_mailbox *mailbox)
+static int enable_mailbox(struct gxp_mailbox *mailbox)
{
- gxp_mailbox_write_descriptor(mailbox, mailbox->descriptor_device_addr);
+ int ret;
+
+ gxp_mailbox_write_descriptor(mailbox, mailbox->descriptor_buf.dsp_addr);
gxp_mailbox_write_cmd_queue_head(mailbox, 0);
gxp_mailbox_write_cmd_queue_tail(mailbox, 0);
gxp_mailbox_write_resp_queue_head(mailbox, 0);
gxp_mailbox_write_resp_queue_tail(mailbox, 0);
+ ret = init_mailbox_impl(mailbox);
+ if (ret)
+ return ret;
+
mailbox->handle_irq = gxp_mailbox_handle_irq;
- mailbox->cur_seq = 0;
- init_waitqueue_head(&mailbox->wait_list_waitq);
- INIT_LIST_HEAD(&mailbox->wait_list);
mutex_init(&mailbox->wait_list_lock);
- kthread_init_work(&mailbox->response_work, gxp_mailbox_consume_responses_work);
+ kthread_init_work(&mailbox->response_work,
+ gxp_mailbox_consume_responses_work);
/* Only enable interrupts once everything has been setup */
gxp_mailbox_driver_enable_interrupts(mailbox);
/* Enable the mailbox */
gxp_mailbox_write_status(mailbox, 1);
- /* TODO(b/190868834) define interrupt bits */
- gxp_mailbox_generate_device_interrupt(mailbox, BIT(0));
+
+ return 0;
}
struct gxp_mailbox *gxp_mailbox_alloc(struct gxp_mailbox_manager *mgr,
struct gxp_virtual_device *vd,
- uint virt_core, u8 core_id)
+ uint virt_core, u8 core_id,
+ const struct gxp_mailbox_args *args)
{
struct gxp_mailbox *mailbox;
+ int ret;
- mailbox = create_mailbox(mgr, vd, virt_core, core_id);
+ mailbox = create_mailbox(mgr, vd, virt_core, core_id, args);
if (IS_ERR(mailbox))
return mailbox;
- enable_mailbox(mailbox);
+ ret = enable_mailbox(mailbox);
+ if (ret) {
+ release_mailbox(mailbox, vd, virt_core);
+ return ERR_PTR(ret);
+ }
return mailbox;
}
+/*
+ * Releases the @mailbox->mbx_impl to flush all pending responses in the wait
+ * list.
+ * This releases GCIP mailbox modules according to the type of @mailbox.
+ * - GENERAL: will release @mailbox->mbx_impl.gcip_mbx
+ * - KCI: will release @mailbox->mbx_impl.kci_mbx
+ *
+ * Note: On `GXP_USE_LEGACY_MAILBOX`, it will release @mailbox itself as its
+ * queuing logic is implemented in `gxp-mailbox-impl.c`.
+ */
+static void release_mailbox_impl(struct gxp_mailbox *mailbox)
+{
+#if GXP_USE_LEGACY_MAILBOX
+ gxp_mailbox_release_consume_responses(mailbox);
+#else
+ switch (mailbox->type) {
+ case GXP_MBOX_TYPE_GENERAL:
+ release_gcip_mailbox(mailbox);
+ break;
+ case GXP_MBOX_TYPE_KCI:
+ release_gcip_kci(mailbox);
+ break;
+ }
+#endif
+}
+
void gxp_mailbox_release(struct gxp_mailbox_manager *mgr,
struct gxp_virtual_device *vd, uint virt_core,
struct gxp_mailbox *mailbox)
{
int i;
- struct gxp_mailbox_wait_list *cur, *nxt;
- struct gxp_async_response *async_resp;
- struct list_head resps_to_flush;
- unsigned long flags;
if (!mailbox) {
dev_err(mgr->gxp->dev,
@@ -576,51 +424,7 @@ void gxp_mailbox_release(struct gxp_mailbox_manager *mgr,
cancel_work_sync(mailbox->interrupt_handlers[i]);
}
- /*
- * At this point only async responses should be pending. Flush them all
- * from the `wait_list` at once so any remaining timeout workers
- * waiting on `wait_list_lock` will know their responses have been
- * handled already.
- */
- INIT_LIST_HEAD(&resps_to_flush);
- mutex_lock(&mailbox->wait_list_lock);
- list_for_each_entry_safe(cur, nxt, &mailbox->wait_list, list) {
- list_del(&cur->list);
- if (cur->is_async) {
- list_add_tail(&cur->list, &resps_to_flush);
- /*
- * Clear the response's destination queue so that if the
- * timeout worker is running, it won't try to process
- * this response after `wait_list_lock` is released.
- */
- async_resp = container_of(
- cur->resp, struct gxp_async_response, resp);
- spin_lock_irqsave(async_resp->dest_queue_lock, flags);
- async_resp->dest_queue = NULL;
- spin_unlock_irqrestore(async_resp->dest_queue_lock,
- flags);
-
- } else {
- dev_warn(
- mailbox->gxp->dev,
- "Unexpected synchronous command pending on mailbox release\n");
- kfree(cur);
- }
- }
- mutex_unlock(&mailbox->wait_list_lock);
-
- /*
- * Cancel the timeout timer of and free any responses that were still in
- * the `wait_list` above.
- */
- list_for_each_entry_safe(cur, nxt, &resps_to_flush, list) {
- list_del(&cur->list);
- async_resp = container_of(cur->resp, struct gxp_async_response,
- resp);
- cancel_delayed_work_sync(&async_resp->timeout_work);
- kfree(async_resp);
- kfree(cur);
- }
+ release_mailbox_impl(mailbox);
/* Reset the mailbox HW */
gxp_mailbox_reset_hw(mailbox);
@@ -637,21 +441,7 @@ void gxp_mailbox_release(struct gxp_mailbox_manager *mgr,
mgr->mailboxes[mailbox->core_id] = NULL;
/* Clean up resources */
- gxp_dma_free_coherent(
- mailbox->gxp, vd, BIT(virt_core),
- sizeof(struct gxp_command) * mailbox->cmd_queue_size,
- mailbox->cmd_queue, mailbox->cmd_queue_device_addr);
- gxp_dma_free_coherent(
- mailbox->gxp, vd, BIT(virt_core),
- sizeof(struct gxp_response) * mailbox->resp_queue_size,
- mailbox->resp_queue, mailbox->resp_queue_device_addr);
- gxp_dma_free_coherent(mailbox->gxp, vd, BIT(virt_core),
- sizeof(struct gxp_mailbox_descriptor),
- mailbox->descriptor,
- mailbox->descriptor_device_addr);
- kthread_flush_worker(&mailbox->response_worker);
- kthread_stop(mailbox->response_thread);
- kfree(mailbox);
+ release_mailbox(mailbox, vd, virt_core);
}
void gxp_mailbox_reset(struct gxp_mailbox *mailbox)
@@ -659,241 +449,6 @@ void gxp_mailbox_reset(struct gxp_mailbox *mailbox)
dev_notice(mailbox->gxp->dev, "%s not yet implemented\n", __func__);
}
-/*
- * Adds @resp to @mailbox->wait_list.
- *
- * wait_list is a FIFO queue, with sequence number in increasing order.
- *
- * Returns 0 on success, or -ENOMEM if failed on allocation.
- */
-static int gxp_mailbox_push_wait_resp(struct gxp_mailbox *mailbox,
- struct gxp_response *resp, bool is_async)
-{
- struct gxp_mailbox_wait_list *entry =
- kzalloc(sizeof(*entry), GFP_KERNEL);
-
- if (!entry)
- return -ENOMEM;
- entry->resp = resp;
- entry->is_async = is_async;
- mutex_lock(&mailbox->wait_list_lock);
- list_add_tail(&entry->list, &mailbox->wait_list);
- mutex_unlock(&mailbox->wait_list_lock);
-
- return 0;
-}
-
-/*
- * Removes the response previously pushed with gxp_mailbox_push_wait_resp().
- *
- * This is used when the kernel gives up waiting for the response.
- */
-static void gxp_mailbox_del_wait_resp(struct gxp_mailbox *mailbox,
- struct gxp_response *resp)
-{
- struct gxp_mailbox_wait_list *cur;
-
- mutex_lock(&mailbox->wait_list_lock);
-
- list_for_each_entry(cur, &mailbox->wait_list, list) {
- if (cur->resp->seq > resp->seq) {
- /*
- * Sequence numbers in wait_list are in increasing
- * order. This case implies no entry in the list
- * matches @resp's sequence number.
- */
- break;
- }
- if (cur->resp->seq == resp->seq) {
- list_del(&cur->list);
- kfree(cur);
- break;
- }
- }
-
- mutex_unlock(&mailbox->wait_list_lock);
-}
-
-static int gxp_mailbox_enqueue_cmd(struct gxp_mailbox *mailbox,
- struct gxp_command *cmd,
- struct gxp_response *resp,
- bool resp_is_async)
-{
- int ret;
- u32 tail;
-
- mutex_lock(&mailbox->cmd_queue_lock);
-
- cmd->seq = mailbox->cur_seq;
- /*
- * The lock ensures mailbox->cmd_queue_tail cannot be changed by
- * other processes (this method should be the only one to modify the
- * value of tail), therefore we can remember its value here and use it
- * in various places below.
- */
- tail = mailbox->cmd_queue_tail;
-
- /*
- * If the cmd queue is full, it's up to the caller to retry.
- */
- if (gxp_mailbox_read_cmd_queue_head(mailbox) ==
- (tail ^ CIRCULAR_QUEUE_WRAP_BIT)) {
- ret = -EAGAIN;
- goto out;
- }
-
- if (resp) {
- /*
- * Add @resp to the wait_list only if the cmd can be pushed
- * successfully.
- */
- resp->seq = cmd->seq;
- resp->status = GXP_RESP_WAITING;
- ret = gxp_mailbox_push_wait_resp(mailbox, resp, resp_is_async);
- if (ret)
- goto out;
- }
- /* size of cmd_queue is a multiple of sizeof(*cmd) */
- memcpy(mailbox->cmd_queue + CIRCULAR_QUEUE_REAL_INDEX(tail), cmd,
- sizeof(*cmd));
- gxp_mailbox_inc_cmd_queue_tail(mailbox, 1);
- /* triggers doorbell */
- /* TODO(b/190868834) define interrupt bits */
- gxp_mailbox_generate_device_interrupt(mailbox, BIT(0));
- /* bumps sequence number after the command is sent */
- mailbox->cur_seq++;
- ret = 0;
-out:
- mutex_unlock(&mailbox->cmd_queue_lock);
- if (ret)
- dev_err(mailbox->gxp->dev, "%s: ret=%d", __func__, ret);
-
- return ret;
-}
-
-int gxp_mailbox_execute_cmd(struct gxp_mailbox *mailbox,
- struct gxp_command *cmd, struct gxp_response *resp)
-{
- int ret;
-
- ret = gxp_mailbox_enqueue_cmd(mailbox, cmd, resp,
- /* resp_is_async = */ false);
- if (ret)
- return ret;
- ret = wait_event_timeout(mailbox->wait_list_waitq,
- resp->status != GXP_RESP_WAITING,
- msecs_to_jiffies(MAILBOX_TIMEOUT));
- if (!ret) {
- dev_notice(mailbox->gxp->dev, "%s: event wait timeout",
- __func__);
- gxp_mailbox_del_wait_resp(mailbox, resp);
- return -ETIMEDOUT;
- }
- if (resp->status != GXP_RESP_OK) {
- dev_notice(mailbox->gxp->dev, "%s: resp status=%u", __func__,
- resp->status);
- return -ENOMSG;
- }
-
- return resp->retval;
-}
-
-static void async_cmd_timeout_work(struct work_struct *work)
-{
- struct gxp_async_response *async_resp = container_of(
- work, struct gxp_async_response, timeout_work.work);
- unsigned long flags;
-
- /*
- * This function will acquire the mailbox wait_list_lock. This means if
- * response processing is in progress, it will complete before this
- * response can be removed from the wait list.
- *
- * Once this function has the wait_list_lock, no future response
- * processing will begin until this response has been removed.
- */
- gxp_mailbox_del_wait_resp(async_resp->mailbox, &async_resp->resp);
-
- /*
- * Check if this response still has a valid destination queue, in case
- * an in-progress call to `gxp_mailbox_handle_response()` completed
- * the response while `gxp_mailbox_del_wait_resp()` was waiting for
- * the wait_list_lock.
- */
- spin_lock_irqsave(async_resp->dest_queue_lock, flags);
- if (async_resp->dest_queue) {
- async_resp->resp.status = GXP_RESP_CANCELLED;
- list_add_tail(&async_resp->list_entry, async_resp->dest_queue);
- spin_unlock_irqrestore(async_resp->dest_queue_lock, flags);
-
- gxp_pm_update_requested_power_states(
- async_resp->mailbox->gxp, async_resp->gxp_power_state,
- async_resp->requested_low_clkmux, AUR_OFF, false,
- async_resp->memory_power_state, AUR_MEM_UNDEFINED);
-
- if (async_resp->eventfd) {
- gxp_eventfd_signal(async_resp->eventfd);
- gxp_eventfd_put(async_resp->eventfd);
- }
-
- wake_up(async_resp->dest_queue_waitq);
- } else {
- spin_unlock_irqrestore(async_resp->dest_queue_lock, flags);
- }
-}
-
-int gxp_mailbox_execute_cmd_async(struct gxp_mailbox *mailbox,
- struct gxp_command *cmd,
- struct list_head *resp_queue,
- spinlock_t *queue_lock,
- wait_queue_head_t *queue_waitq,
- uint gxp_power_state, uint memory_power_state,
- bool requested_low_clkmux,
- struct gxp_eventfd *eventfd)
-{
- struct gxp_async_response *async_resp;
- int ret;
-
- async_resp = kzalloc(sizeof(*async_resp), GFP_KERNEL);
- if (!async_resp)
- return -ENOMEM;
-
- async_resp->mailbox = mailbox;
- async_resp->dest_queue = resp_queue;
- async_resp->dest_queue_lock = queue_lock;
- async_resp->dest_queue_waitq = queue_waitq;
- async_resp->gxp_power_state = gxp_power_state;
- async_resp->memory_power_state = memory_power_state;
- async_resp->requested_low_clkmux = requested_low_clkmux;
- if (eventfd && gxp_eventfd_get(eventfd))
- async_resp->eventfd = eventfd;
- else
- async_resp->eventfd = NULL;
-
- INIT_DELAYED_WORK(&async_resp->timeout_work, async_cmd_timeout_work);
- schedule_delayed_work(&async_resp->timeout_work,
- msecs_to_jiffies(MAILBOX_TIMEOUT));
-
- gxp_pm_update_requested_power_states(
- mailbox->gxp, AUR_OFF, false, gxp_power_state,
- requested_low_clkmux, AUR_MEM_UNDEFINED, memory_power_state);
- ret = gxp_mailbox_enqueue_cmd(mailbox, cmd, &async_resp->resp,
- /* resp_is_async = */ true);
- if (ret)
- goto err_free_resp;
-
- return 0;
-
-err_free_resp:
- gxp_pm_update_requested_power_states(mailbox->gxp, gxp_power_state,
- requested_low_clkmux, AUR_OFF, false,
- memory_power_state,
- AUR_MEM_UNDEFINED);
- cancel_delayed_work_sync(&async_resp->timeout_work);
- kfree(async_resp);
- return ret;
-}
-
int gxp_mailbox_register_interrupt_handler(struct gxp_mailbox *mailbox,
u32 int_bit,
struct work_struct *handler)
@@ -908,7 +463,7 @@ int gxp_mailbox_register_interrupt_handler(struct gxp_mailbox *mailbox,
}
int gxp_mailbox_unregister_interrupt_handler(struct gxp_mailbox *mailbox,
- u32 int_bit)
+ u32 int_bit)
{
/* Bit 0 is reserved for incoming mailbox responses */
if (int_bit == 0 || int_bit >= GXP_MAILBOX_INT_BIT_COUNT)
@@ -918,3 +473,31 @@ int gxp_mailbox_unregister_interrupt_handler(struct gxp_mailbox *mailbox,
return 0;
}
+
+#if !GXP_USE_LEGACY_MAILBOX
+int gxp_mailbox_send_cmd(struct gxp_mailbox *mailbox, void *cmd, void *resp)
+{
+ switch (mailbox->type) {
+ case GXP_MBOX_TYPE_GENERAL:
+ return gcip_mailbox_send_cmd(mailbox->mbx_impl.gcip_mbx, cmd,
+ resp);
+ case GXP_MBOX_TYPE_KCI:
+ return gcip_kci_send_cmd(mailbox->mbx_impl.gcip_kci, cmd);
+ }
+ return -EOPNOTSUPP;
+}
+
+struct gcip_mailbox_resp_awaiter *
+gxp_mailbox_put_cmd(struct gxp_mailbox *mailbox, void *cmd, void *resp,
+ void *data)
+{
+ switch (mailbox->type) {
+ case GXP_MBOX_TYPE_GENERAL:
+ return gcip_mailbox_put_cmd(mailbox->mbx_impl.gcip_mbx, cmd,
+ resp, data);
+ default:
+ break;
+ }
+ return ERR_PTR(-EOPNOTSUPP);
+}
+#endif /* !GXP_USE_LEGACY_MAILBOX */
diff --git a/gxp-mailbox.h b/gxp-mailbox.h
index 4bea5d7..cf72fbe 100644
--- a/gxp-mailbox.h
+++ b/gxp-mailbox.h
@@ -2,7 +2,7 @@
/*
* GXP mailbox interface.
*
- * Copyright (C) 2020 Google LLC
+ * Copyright (C) 2020-2022 Google LLC
*/
#ifndef __GXP_MAILBOX_H__
#define __GXP_MAILBOX_H__
@@ -10,7 +10,45 @@
#include <linux/kthread.h>
#include "gxp-client.h"
+#include "gxp-config.h" /* GXP_USE_LEGACY_MAILBOX */
+#include "gxp-dma.h"
#include "gxp-internal.h"
+#include "gxp-mailbox-manager.h"
+
+#if !GXP_USE_LEGACY_MAILBOX
+#include <gcip/gcip-kci.h>
+#include <gcip/gcip-mailbox.h>
+#endif
+
+/*
+ * Offset from the host mailbox interface to the device interface that needs to
+ * be mapped.
+ */
+#if defined(CONFIG_GXP_IP_ZEBU) || defined(CONFIG_GXP_GEM5)
+#define MAILBOX_DEVICE_INTERFACE_OFFSET 0x180000
+#else
+#define MAILBOX_DEVICE_INTERFACE_OFFSET 0x10000
+#endif
+
+#define __wait_event_lock_irq_timeout_exclusive(wq_head, condition, lock, \
+ timeout, state) \
+ ___wait_event(wq_head, ___wait_cond_timeout(condition), state, 1, \
+ timeout, spin_unlock_irq(&lock); \
+ __ret = schedule_timeout(__ret); spin_lock_irq(&lock))
+
+/*
+ * wait_event_interruptible_lock_irq_timeout() but set the exclusive flag.
+ */
+#define wait_event_interruptible_lock_irq_timeout_exclusive( \
+ wq_head, condition, lock, timeout) \
+ ({ \
+ long __ret = timeout; \
+ if (!___wait_cond_timeout(condition)) \
+ __ret = __wait_event_lock_irq_timeout_exclusive( \
+ wq_head, condition, lock, timeout, \
+ TASK_INTERRUPTIBLE); \
+ __ret; \
+ })
/* Command/Response Structures */
@@ -21,86 +59,19 @@ enum gxp_mailbox_command_code {
GXP_MBOX_CODE_SUSPEND_REQUEST = 1,
};
-/* Basic Buffer descriptor struct for message payloads. */
-struct buffer_descriptor {
- /* Address in the device's virtual address space. */
- u64 address;
- /* Size in bytes. */
- u32 size;
- /* Flags can be used to indicate message type, etc. */
- u32 flags;
-};
-
-/* Format used for mailbox command queues. */
-struct gxp_command {
- /* Sequence number. Should match the corresponding response. */
- u64 seq;
+enum gxp_mailbox_type {
/*
- * Identifies the type of command.
- * Should be a value from `gxp_mailbox_command_code`
+ * Mailbox will utilize `gcip-mailbox.h` internally.
+ * (Note: On `GXP_USE_LEGACY_MAILBOX`, it utilizes `gxp-mailbox-impl.h`
+ * instead.)
+ * Mostly will be used for handling user commands.
*/
- u16 code;
+ GXP_MBOX_TYPE_GENERAL = 0,
/*
- * Priority level from 0 to 99, with 0 being the highest. Pending
- * commands with higher priorities will be executed before lower
- * priority ones.
+ * Mailbox will utilize `gcip-kci.h` internally.
+ * Will be used for handling kernel commands.
*/
- u8 priority;
- /*
- * Insert spaces to make padding explicit. This does not affect
- * alignment.
- */
- u8 reserved[5];
- /* Struct describing the buffer containing the message payload */
- struct buffer_descriptor buffer_descriptor;
-};
-
-/* Format used for mailbox response queues from kernel. */
-struct gxp_response {
- /* Sequence number. Should match the corresponding command. */
- u64 seq;
- /* The status code. Either SUCCESS or an error. */
- u16 status;
- /* Padding. */
- u16 reserved;
- /* Return value, dependent on the command this responds to. */
- u32 retval;
-};
-
-/*
- * Wrapper struct for responses consumed by a thread other than the one which
- * sent the command.
- */
-struct gxp_async_response {
- struct list_head list_entry;
- struct gxp_response resp;
- struct delayed_work timeout_work;
- /*
- * If this response times out, this pointer to the owning mailbox is
- * needed to delete this response from the list of pending responses.
- */
- struct gxp_mailbox *mailbox;
- /* Queue to add the response to once it is complete or timed out */
- struct list_head *dest_queue;
- /*
- * The lock that protects queue pointed to by `dest_queue`.
- * The mailbox code also uses this lock to protect changes to the
- * `dest_queue` pointer itself when processing this response.
- */
- spinlock_t *dest_queue_lock;
- /* Queue of clients to notify when this response is processed */
- wait_queue_head_t *dest_queue_waitq;
- /* Specified power state vote during the command execution */
- uint gxp_power_state;
- /* Specified memory power state vote during the command execution */
- uint memory_power_state;
- /*
- * Specified whether the power state vote is requested with low
- * frequency CLKMUX flag.
- */
- bool requested_low_clkmux;
- /* gxp_eventfd to signal when the response completes. May be NULL */
- struct gxp_eventfd *eventfd;
+ GXP_MBOX_TYPE_KCI = 1,
};
enum gxp_response_status {
@@ -109,12 +80,6 @@ enum gxp_response_status {
GXP_RESP_CANCELLED = 2,
};
-struct gxp_mailbox_wait_list {
- struct list_head list;
- struct gxp_response *resp;
- bool is_async;
-};
-
/* Mailbox Structures */
struct gxp_mailbox_descriptor {
u64 cmd_queue_device_addr;
@@ -123,6 +88,72 @@ struct gxp_mailbox_descriptor {
u32 resp_queue_size;
};
+struct gxp_mailbox;
+
+/*
+ * Defines the callback functions which are used by the mailbox.
+ */
+struct gxp_mailbox_ops {
+ /*
+ * Allocates resources such as cmd_queue and resp_queue which are used by the mailbox.
+ * This callback will be called by the `gxp_mailbox_alloc` internally.
+ * Following variables should be set in this callback.
+ * - @mailbox->cmd_queue : the pointer of the command queue.
+ * - @mailbox->cmd_queue_size : the size of @mailbox->cmd_queue. (the maximum number of
+ * command elements.)
+ * - @mailbox->cmd_queue_tail : the initial value of the tail of command queue.
+ * - @mailbox->resp_queue : the pointer of the response queue.
+ * - @mailbox->resp_queue_size : the size of @mailbox->resp_queue. (the maximum number of
+ * response elements.)
+ * - @mailbox->resp_queue_head : the initial value of the head of response queue.
+ * - @mailbox->descriptor : the pointer of the `strunct gxp_mailbox_descriptor`
+ * instance.
+ * - @mailbox
+ * ->descriptor_device_addr : the device address of @mailbox->descriptor.
+ * - @mailbox->descriptor
+ * ->cmd_queue_device_addr : the device address of @mailbox->cmd_queue.
+ * - @mailbox->descriptor
+ * ->resp_queue_device_addr : the device address of @mailbox->resp_queue.
+ * - @mailbox->descriptor
+ * ->cmd_queue_size : the size of @mailbox->cmd_queue.
+ * - @mailbox->descriptor
+ * ->resp_queue_size : the size of @mailbox->resp_queue.
+ * Context: normal.
+ */
+ int (*allocate_resources)(struct gxp_mailbox *mailbox,
+ struct gxp_virtual_device *vd,
+ uint virt_core);
+ /*
+ * Releases resources which are allocated by `allocate_resources`.
+ * This callback will be called by the `gxp_mailbox_release` internally.
+ * Context: normal.
+ */
+ void (*release_resources)(struct gxp_mailbox *mailbox,
+ struct gxp_virtual_device *vd,
+ uint virt_core);
+#if !GXP_USE_LEGACY_MAILBOX
+ /*
+ * Operators which has dependency on the GCIP according to the type of mailbox.
+ * - GXP_MBOX_TYPE_GENERAL: @gcip_ops.mbx must be defined.
+ * - GXP_MBOX_TYPE_KCI: @gcip_ops.kci must be defined.
+ */
+ union {
+ const struct gcip_mailbox_ops *mbx;
+ const struct gcip_kci_ops *kci;
+ } gcip_ops;
+#endif
+};
+
+struct gxp_mailbox_args {
+ enum gxp_mailbox_type type;
+ struct gxp_mailbox_ops *ops;
+ u64 queue_wrap_bit;
+ u32 cmd_elem_size;
+ u32 resp_elem_size;
+ bool ignore_seq_order;
+ void *data;
+};
+
#define GXP_MAILBOX_INT_BIT_COUNT 16
struct gxp_mailbox {
@@ -140,42 +171,53 @@ struct gxp_mailbox {
/* Protects to_host_poll_task while it holds a sync barrier */
struct mutex polling_lock;
- u64 cur_seq;
+ u64 queue_wrap_bit; /* warp bit for both cmd and resp queue */
+ u32 cmd_elem_size; /* size of element of cmd queue */
+ struct gxp_coherent_buf descriptor_buf;
struct gxp_mailbox_descriptor *descriptor;
- dma_addr_t descriptor_device_addr;
- struct gxp_command *cmd_queue;
+ struct gxp_coherent_buf cmd_queue_buf;
u32 cmd_queue_size; /* size of cmd queue */
u32 cmd_queue_tail; /* offset within the cmd queue */
- dma_addr_t cmd_queue_device_addr; /* device address for cmd queue */
struct mutex cmd_queue_lock; /* protects cmd_queue */
- struct gxp_response *resp_queue;
+ u32 resp_elem_size; /* size of element of resp queue */
+ struct gxp_coherent_buf resp_queue_buf;
u32 resp_queue_size; /* size of resp queue */
u32 resp_queue_head; /* offset within the resp queue */
- dma_addr_t resp_queue_device_addr; /* device address for resp queue */
struct mutex resp_queue_lock; /* protects resp_queue */
- /* add to this list if a command needs to wait for a response */
- struct list_head wait_list;
+ /* commands which need to wait for responses will be added to the wait_list */
struct mutex wait_list_lock; /* protects wait_list */
- /* queue for waiting for the wait_list to be consumed */
- wait_queue_head_t wait_list_waitq;
/* to create our own realtime worker for handling responses */
struct kthread_worker response_worker;
struct task_struct *response_thread;
struct kthread_work response_work;
-};
-typedef void __iomem *(*get_mailbox_base_t)(struct gxp_dev *gxp, uint index);
+ enum gxp_mailbox_type type;
+ struct gxp_mailbox_ops *ops;
+ void *data; /* private data */
-struct gxp_mailbox_manager {
- struct gxp_dev *gxp;
- u8 num_cores;
- struct gxp_mailbox **mailboxes;
- get_mailbox_base_t get_mailbox_csr_base;
- get_mailbox_base_t get_mailbox_data_base;
+ bool ignore_seq_order; /* allow out-of-order responses if true (always false in KCI) */
+
+#if GXP_USE_LEGACY_MAILBOX
+ u64 cur_seq;
+ /* add to this list if a command needs to wait for a response */
+ struct list_head wait_list;
+ /* queue for waiting for the wait_list to be consumed */
+ wait_queue_head_t wait_list_waitq;
+#else /* !GXP_USE_LEGACY_MAILBOX */
+ /*
+ * Implementation of the mailbox according to the type.
+ * - GXP_MBOX_TYPE_GENERAL: @gcip_mbx will be allocated.
+ * - GXP_MBOX_TYPE_KCI: @gcip_kci will be allocated.
+ */
+ union {
+ struct gcip_mailbox *gcip_mbx;
+ struct gcip_kci *gcip_kci;
+ } mbx_impl;
+#endif /* GXP_USE_LEGACY_MAILBOX */
};
/* Mailbox APIs */
@@ -183,40 +225,51 @@ struct gxp_mailbox_manager {
extern int gxp_mbx_timeout;
#define MAILBOX_TIMEOUT (gxp_mbx_timeout * GXP_TIME_DELAY_FACTOR)
-struct gxp_mailbox_manager *gxp_mailbox_create_manager(struct gxp_dev *gxp,
- uint num_cores);
-
/*
- * The following functions all require their caller have locked
- * gxp->vd_semaphore for reading.
+ * The following functions are low-level interfaces of the mailbox. The actual work of it will be
+ * implemented from the high-level interfaces such as DCI, UCI and KCI via the callbacks defined
+ * above. Therefore, you may not call these functions directly.
+ * (Except `gxp_mailbox_{register,unregister}_interrupt_handler` functions.)
+ *
+ * If the mailbox interacts with virtual cores according to the implementation, the caller must
+ * have locked gxp->vd_semaphore for reading.
*/
struct gxp_mailbox *gxp_mailbox_alloc(struct gxp_mailbox_manager *mgr,
struct gxp_virtual_device *vd,
- uint virt_core, u8 core_id);
+ uint virt_core, u8 core_id,
+ const struct gxp_mailbox_args *args);
+
void gxp_mailbox_release(struct gxp_mailbox_manager *mgr,
struct gxp_virtual_device *vd, uint virt_core,
struct gxp_mailbox *mailbox);
void gxp_mailbox_reset(struct gxp_mailbox *mailbox);
-int gxp_mailbox_execute_cmd(struct gxp_mailbox *mailbox,
- struct gxp_command *cmd, struct gxp_response *resp);
-
-int gxp_mailbox_execute_cmd_async(struct gxp_mailbox *mailbox,
- struct gxp_command *cmd,
- struct list_head *resp_queue,
- spinlock_t *queue_lock,
- wait_queue_head_t *queue_waitq,
- uint gxp_power_state, uint memory_power_state,
- bool requested_low_clkmux,
- struct gxp_eventfd *eventfd);
-
int gxp_mailbox_register_interrupt_handler(struct gxp_mailbox *mailbox,
u32 int_bit,
struct work_struct *handler);
int gxp_mailbox_unregister_interrupt_handler(struct gxp_mailbox *mailbox,
- u32 int_bit);
+ u32 int_bit);
+
+#if !GXP_USE_LEGACY_MAILBOX
+/*
+ * Executes command synchronously. If @resp is not NULL, the response will be returned to it.
+ * See the `gcip_mailbox_send_cmd` of `gcip-mailbox.h` or `gcip_kci_send_cmd` of `gcip-kci.h`
+ * for detail.
+ */
+int gxp_mailbox_send_cmd(struct gxp_mailbox *mailbox, void *cmd, void *resp);
+
+/*
+ * Executes command asynchronously. The response will be written to @resp.
+ * See the `gcip_mailbox_put_cmd` function of `gcip-mailbox.h` for detail.
+ *
+ * Note: KCI doesn't support asynchronous requests.
+ */
+struct gcip_mailbox_resp_awaiter *
+gxp_mailbox_put_cmd(struct gxp_mailbox *mailbox, void *cmd, void *resp,
+ void *data);
+#endif /* !GXP_USE_LEGACY_MAILBOX */
#endif /* __GXP_MAILBOX_H__ */
diff --git a/gxp-mapping.c b/gxp-mapping.c
index 9a69173..1a89b1c 100644
--- a/gxp-mapping.c
+++ b/gxp-mapping.c
@@ -6,16 +6,50 @@
*/
#include <linux/dma-mapping.h>
+#include <linux/ktime.h>
#include <linux/mm.h>
#include <linux/mmap_lock.h>
+#include <linux/moduleparam.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
+#include "gxp-client.h"
#include "gxp-debug-dump.h"
#include "gxp-dma.h"
#include "gxp-internal.h"
#include "gxp-mapping.h"
-#include "mm-backport.h"
+
+#if IS_ENABLED(CONFIG_GXP_TEST)
+/* expose this variable to have unit tests set it dynamically */
+bool gxp_log_iova;
+#else
+static bool gxp_log_iova;
+#endif
+
+module_param_named(log_iova, gxp_log_iova, bool, 0660);
+
+void gxp_mapping_iova_log(struct gxp_client *client, struct gxp_mapping *map,
+ u8 mask)
+{
+ static bool is_first_log = true;
+ struct device *dev = client->gxp->dev;
+ const char *op = mask & GXP_IOVA_LOG_MAP ? "MAP" : "UNMAP";
+ const char *buf_type = mask & GXP_IOVA_LOG_DMABUF ? "DMABUF" : "BUFFER";
+
+ if (likely(!gxp_log_iova))
+ return;
+
+ if (is_first_log) {
+ dev_info(
+ dev,
+ "iova_log_start: operation, buf_type, tgid, pid, host_address, device_address, size");
+ is_first_log = false;
+ }
+
+ dev_info(dev, "iova_log: %s, %s, %d, %d, %#llx, %#llx, %zu", op,
+ buf_type, client->pid, client->tgid, map->host_address,
+ map->device_address, map->size);
+}
/* Destructor for a mapping created with `gxp_mapping_create()` */
static void destroy_mapping(struct gxp_mapping *mapping)
@@ -35,9 +69,9 @@ static void destroy_mapping(struct gxp_mapping *mapping)
* user requires a mapping be synced before unmapping, they are
* responsible for calling `gxp_mapping_sync()` before hand.
*/
- gxp_dma_unmap_sg(mapping->gxp, mapping->vd, mapping->virt_core_list,
- mapping->sgt.sgl, mapping->sgt.orig_nents,
- mapping->dir, DMA_ATTR_SKIP_CPU_SYNC);
+ gxp_dma_unmap_sg(mapping->gxp, mapping->domain, mapping->sgt.sgl,
+ mapping->sgt.orig_nents, mapping->dir,
+ DMA_ATTR_SKIP_CPU_SYNC);
/* Unpin the user pages */
for_each_sg_page(mapping->sgt.sgl, &sg_iter, mapping->sgt.orig_nents,
@@ -57,9 +91,8 @@ static void destroy_mapping(struct gxp_mapping *mapping)
}
struct gxp_mapping *gxp_mapping_create(struct gxp_dev *gxp,
- struct gxp_virtual_device *vd,
- uint virt_core_list, u64 user_address,
- size_t size, u32 flags,
+ struct gxp_iommu_domain *domain,
+ u64 user_address, size_t size, u32 flags,
enum dma_data_direction dir)
{
struct gxp_mapping *mapping = NULL;
@@ -154,8 +187,7 @@ struct gxp_mapping *gxp_mapping_create(struct gxp_dev *gxp,
mapping->destructor = destroy_mapping;
mapping->host_address = user_address;
mapping->gxp = gxp;
- mapping->virt_core_list = virt_core_list;
- mapping->vd = vd;
+ mapping->domain = domain;
mapping->size = size;
mapping->gxp_dma_flags = flags;
mapping->dir = dir;
@@ -168,8 +200,8 @@ struct gxp_mapping *gxp_mapping_create(struct gxp_dev *gxp,
}
/* map the user pages */
- ret = gxp_dma_map_sg(gxp, mapping->vd, mapping->virt_core_list,
- mapping->sgt.sgl, mapping->sgt.nents, mapping->dir,
+ ret = gxp_dma_map_sg(gxp, mapping->domain, mapping->sgt.sgl,
+ mapping->sgt.nents, mapping->dir,
DMA_ATTR_SKIP_CPU_SYNC, mapping->gxp_dma_flags);
if (!ret) {
dev_err(gxp->dev, "Failed to map sgt (ret=%d)\n", ret);
@@ -240,6 +272,13 @@ int gxp_mapping_sync(struct gxp_mapping *mapping, u32 offset, u32 size,
}
/*
+ * Since the scatter-gather list of the mapping is modified while it is
+ * being synced, only one sync for a given mapping can occur at a time.
+ * Rather than maintain a mutex for every mapping, lock the mapping list
+ * mutex, making all syncs mutually exclusive.
+ */
+ mutex_lock(&mapping->sync_lock);
+ /*
* Mappings are created at a PAGE_SIZE granularity, however other data
* which is not part of the mapped buffer may be present in the first
* and last pages of the buffer's scattergather list.
@@ -270,17 +309,9 @@ int gxp_mapping_sync(struct gxp_mapping *mapping, u32 offset, u32 size,
/* Make sure a valid starting scatterlist was found for the start */
if (!start_sg) {
ret = -EINVAL;
- goto out;
+ goto out_unlock;
}
- /*
- * Since the scatter-gather list of the mapping is modified while it is
- * being synced, only one sync for a given mapping can occur at a time.
- * Rather than maintain a mutex for every mapping, lock the mapping list
- * mutex, making all syncs mutually exclusive.
- */
- mutex_lock(&mapping->sync_lock);
-
start_sg->offset += start_diff;
start_sg->dma_address += start_diff;
start_sg->length -= start_diff;
@@ -304,8 +335,8 @@ int gxp_mapping_sync(struct gxp_mapping *mapping, u32 offset, u32 size,
start_sg->length += start_diff;
start_sg->dma_length += start_diff;
+out_unlock:
mutex_unlock(&mapping->sync_lock);
-
out:
gxp_mapping_put(mapping);
diff --git a/gxp-mapping.h b/gxp-mapping.h
index dbb80d9..8d970ef 100644
--- a/gxp-mapping.h
+++ b/gxp-mapping.h
@@ -16,6 +16,16 @@
#include "gxp-internal.h"
+#if IS_ENABLED(CONFIG_GXP_TEST)
+/* expose this variable to have unit tests set it dynamically */
+extern bool gxp_log_iova;
+#endif
+
+#define GXP_IOVA_LOG_UNMAP (0u << 0)
+#define GXP_IOVA_LOG_MAP (1u << 0)
+#define GXP_IOVA_LOG_BUFFER (0u << 1)
+#define GXP_IOVA_LOG_DMABUF (1u << 1)
+
struct gxp_mapping {
struct rb_node node;
refcount_t refcount;
@@ -27,8 +37,7 @@ struct gxp_mapping {
*/
u64 host_address;
struct gxp_dev *gxp;
- uint virt_core_list;
- struct gxp_virtual_device *vd;
+ struct gxp_iommu_domain *domain;
/*
* `device_address` and `size` are the base address and size of the
* user buffer a mapping represents.
@@ -56,10 +65,21 @@ struct gxp_mapping {
};
/**
+ * gxp_mapping_iova_log() - Log IOVA mapping details
+ * @client: The client to create/destroy the mapping for
+ * @map: The mapping being handled
+ * @mask: The mask combination of GXP_IOVA_LOG_*
+ *
+ * Log IOVA mapping details for each map/unmap operation.
+ * Log the field names of the data before first mapping is logged.
+ */
+void gxp_mapping_iova_log(struct gxp_client *client, struct gxp_mapping *map,
+ u8 mask);
+
+/**
* gxp_mapping_create() - Create a mapping for a user buffer
* @gxp: The GXP device to create the mapping for
- * @vd: The virtual device to create the mapping for
- * @virt_core_list: A bitfield indicating the cores in @vd to map the buffer to
+ * @domain: The iommu domain the mapping for
* @user_address: The user-space address of the buffer to map
* @size: The size of the buffer to be mapped
* @flags: Flags describing the type of mapping to create; currently unused
@@ -76,9 +96,8 @@ struct gxp_mapping {
* to map the buffer for the device.
*/
struct gxp_mapping *gxp_mapping_create(struct gxp_dev *gxp,
- struct gxp_virtual_device *vd,
- uint virt_core_list, u64 user_address,
- size_t size, u32 flags,
+ struct gxp_iommu_domain *domain,
+ u64 user_address, size_t size, u32 flags,
enum dma_data_direction dir);
/**
diff --git a/gxp-mba-driver.c b/gxp-mba-driver.c
new file mode 100644
index 0000000..14a8057
--- /dev/null
+++ b/gxp-mba-driver.c
@@ -0,0 +1,73 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * GXP mailbox array driver implementation.
+ *
+ * Copyright (C) 2022 Google LLC
+ */
+
+#include <asm/barrier.h>
+#include <linux/bitops.h>
+#include <linux/interrupt.h>
+#include <linux/kthread.h>
+#include <linux/of_irq.h>
+#include <linux/spinlock.h>
+
+#include "gxp-config.h"
+#include "gxp-mailbox-driver.h"
+#include "gxp-mailbox.h"
+
+#include "gxp-mailbox-driver.c"
+
+/* gxp-mailbox-driver.h: CSR-based calls */
+
+static u32 csr_read(struct gxp_mailbox *mailbox, uint reg_offset)
+{
+ return readl(mailbox->csr_reg_base + reg_offset);
+}
+
+static void csr_write(struct gxp_mailbox *mailbox, uint reg_offset, u32 value)
+{
+ writel(value, mailbox->csr_reg_base + reg_offset);
+}
+
+void gxp_mailbox_reset_hw(struct gxp_mailbox *mailbox)
+{
+ //TODO(b/261670165): check if client flush is required.
+}
+
+void gxp_mailbox_generate_device_interrupt(struct gxp_mailbox *mailbox,
+ u32 int_mask)
+{
+ /*
+ * Ensure all memory writes have been committed to memory before
+ * signalling to the device to read from them. This avoids the scenario
+ * where the interrupt trigger write gets delivered to the MBX HW before
+ * the DRAM transactions made it to DRAM since they're Normal
+ * transactions and can be re-ordered and backed off behind other
+ * transfers.
+ */
+ wmb();
+
+ csr_write(mailbox, MBOX_CLIENT_IRQ_TRIG, 0x1);
+}
+
+u32 gxp_mailbox_get_device_mask_status(struct gxp_mailbox *mailbox)
+{
+ return csr_read(mailbox, MBOX_CLIENT_SHDW);
+}
+
+void gxp_mailbox_clear_host_interrupt(struct gxp_mailbox *mailbox, u32 int_mask)
+{
+ /* Write 1 to clear */
+ csr_write(mailbox, MBOX_CLIENT_IRQ_STATUS, 0x1);
+}
+
+void gxp_mailbox_mask_host_interrupt(struct gxp_mailbox *mailbox, u32 int_mask)
+{
+ csr_write(mailbox, MBOX_CLIENT_IRQ_CFG, MBOX_CLIENT_IRQ_MSG_INT);
+}
+
+u32 gxp_mailbox_get_host_mask_status(struct gxp_mailbox *mailbox)
+{
+ return csr_read(mailbox, MBOX_CLIENT_IRQ_CFG);
+}
diff --git a/gxp-notification.h b/gxp-notification.h
index a4e4fd3..6f43b70 100644
--- a/gxp-notification.h
+++ b/gxp-notification.h
@@ -14,7 +14,7 @@
enum gxp_notification_to_host_type {
HOST_NOTIF_MAILBOX_RESPONSE = 0,
HOST_NOTIF_DEBUG_DUMP_READY = 1,
- HOST_NOTIF_TELEMETRY_STATUS = 2,
+ HOST_NOTIF_CORE_TELEMETRY_STATUS = 2,
HOST_NOTIF_MAX
};
diff --git a/gxp-platform.c b/gxp-platform.c
index 5da0eb2..0f56b9c 100644
--- a/gxp-platform.c
+++ b/gxp-platform.c
@@ -5,2310 +5,35 @@
* Copyright (C) 2021 Google LLC
*/
-#if IS_ENABLED(CONFIG_SUBSYSTEM_COREDUMP)
-#include <linux/platform_data/sscoredump.h>
-#endif
-
-#include <linux/acpi.h>
-#include <linux/cred.h>
#include <linux/device.h>
-#include <linux/dma-mapping.h>
-#include <linux/file.h>
-#include <linux/fs.h>
-#include <linux/genalloc.h>
-#include <linux/kthread.h>
-#include <linux/log2.h>
-#include <linux/miscdevice.h>
-#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
-#include <linux/pm_runtime.h>
-#include <linux/sched.h>
-#include <linux/uaccess.h>
-#include <linux/uidgid.h>
-#if (IS_ENABLED(CONFIG_GXP_TEST) || IS_ENABLED(CONFIG_ANDROID)) && !IS_ENABLED(CONFIG_GXP_GEM5)
-#include <soc/google/tpu-ext.h>
-#endif
-#include "gxp-client.h"
-#include "gxp-config.h"
-#include "gxp-debug-dump.h"
-#include "gxp-debugfs.h"
-#include "gxp-dma.h"
-#include "gxp-dmabuf.h"
-#include "gxp-domain-pool.h"
-#include "gxp-firmware.h"
-#include "gxp-firmware-data.h"
#include "gxp-internal.h"
-#include "gxp-mailbox.h"
-#include "gxp-mailbox-driver.h"
-#include "gxp-mapping.h"
-#include "gxp-notification.h"
-#include "gxp-pm.h"
-#include "gxp-telemetry.h"
-#include "gxp-thermal.h"
-#include "gxp-vd.h"
-#include "gxp-wakelock.h"
-#include "gxp.h"
-
-static struct gxp_dev *gxp_debug_pointer;
-
-#define __wait_event_lock_irq_timeout_exclusive(wq_head, condition, lock, \
- timeout, state) \
- ___wait_event(wq_head, ___wait_cond_timeout(condition), state, 1, \
- timeout, spin_unlock_irq(&lock); \
- __ret = schedule_timeout(__ret); spin_lock_irq(&lock))
-
-/*
- * wait_event_interruptible_lock_irq_timeout() but set the exclusive flag.
- */
-#define wait_event_interruptible_lock_irq_timeout_exclusive( \
- wq_head, condition, lock, timeout) \
- ({ \
- long __ret = timeout; \
- if (!___wait_cond_timeout(condition)) \
- __ret = __wait_event_lock_irq_timeout_exclusive( \
- wq_head, condition, lock, timeout, \
- TASK_INTERRUPTIBLE); \
- __ret; \
- })
-
-/* Caller needs to hold client->semaphore */
-static bool check_client_has_available_vd(struct gxp_client *client,
- char *ioctl_name)
-{
- struct gxp_dev *gxp = client->gxp;
-
- lockdep_assert_held(&client->semaphore);
- if (!client->vd) {
- dev_err(gxp->dev,
- "%s requires the client allocate a VIRTUAL_DEVICE\n",
- ioctl_name);
- return false;
- }
- if (client->vd->state == GXP_VD_UNAVAILABLE) {
- dev_err(gxp->dev, "Cannot do %s on a broken virtual device\n",
- ioctl_name);
- return false;
- }
- return true;
-}
-
-/* Caller needs to hold client->semaphore for reading */
-static bool check_client_has_available_vd_wakelock(struct gxp_client *client,
- char *ioctl_name)
-{
- struct gxp_dev *gxp = client->gxp;
-
- lockdep_assert_held_read(&client->semaphore);
- if (!client->has_vd_wakelock) {
- dev_err(gxp->dev,
- "%s requires the client hold a VIRTUAL_DEVICE wakelock\n",
- ioctl_name);
- return false;
- }
- if (client->vd->state == GXP_VD_UNAVAILABLE) {
- dev_err(gxp->dev, "Cannot do %s on a broken virtual device\n",
- ioctl_name);
- return false;
- }
- return true;
-}
-
-#if IS_ENABLED(CONFIG_SUBSYSTEM_COREDUMP)
-static struct sscd_platform_data gxp_sscd_pdata;
-
-static void gxp_sscd_release(struct device *dev)
-{
- pr_debug("%s\n", __func__);
-}
-
-static struct platform_device gxp_sscd_dev = {
- .name = GXP_DRIVER_NAME,
- .driver_override = SSCD_NAME,
- .id = -1,
- .dev = {
- .platform_data = &gxp_sscd_pdata,
- .release = gxp_sscd_release,
- },
-};
-#endif // CONFIG_SUBSYSTEM_COREDUMP
-
-/* Mapping from GXP_POWER_STATE_* to enum aur_power_state in gxp-pm.h */
-static const uint aur_state_array[GXP_NUM_POWER_STATES] = {
- AUR_OFF, AUR_UUD, AUR_SUD, AUR_UD, AUR_NOM,
- AUR_READY, AUR_UUD_PLUS, AUR_SUD_PLUS, AUR_UD_PLUS
-};
-/* Mapping from MEMORY_POWER_STATE_* to enum aur_memory_power_state in gxp-pm.h */
-static const uint aur_memory_state_array[MEMORY_POWER_STATE_MAX + 1] = {
- AUR_MEM_UNDEFINED, AUR_MEM_MIN, AUR_MEM_VERY_LOW, AUR_MEM_LOW,
- AUR_MEM_HIGH, AUR_MEM_VERY_HIGH, AUR_MEM_MAX
-};
-
-static int gxp_open(struct inode *inode, struct file *file)
-{
- struct gxp_client *client;
- struct gxp_dev *gxp = container_of(file->private_data, struct gxp_dev,
- misc_dev);
- int ret = 0;
-
- /* If this is the first call to open(), request the firmware files */
- ret = gxp_firmware_request_if_needed(gxp);
- if (ret) {
- dev_err(gxp->dev,
- "Failed to request dsp firmware files (ret=%d)\n", ret);
- return ret;
- }
-
- client = gxp_client_create(gxp);
- if (IS_ERR(client))
- return PTR_ERR(client);
-
- client->tgid = current->tgid;
- client->pid = current->pid;
-
- file->private_data = client;
-
- mutex_lock(&gxp->client_list_lock);
- list_add(&client->list_entry, &gxp->client_list);
- mutex_unlock(&gxp->client_list_lock);
-
- return ret;
-}
-
-static int gxp_release(struct inode *inode, struct file *file)
-{
- struct gxp_client *client = file->private_data;
-
- /*
- * If open failed and no client was created then no clean-up is needed.
- */
- if (!client)
- return 0;
-
- if (client->enabled_telemetry_logging)
- gxp_telemetry_disable(client->gxp, GXP_TELEMETRY_TYPE_LOGGING);
- if (client->enabled_telemetry_tracing)
- gxp_telemetry_disable(client->gxp, GXP_TELEMETRY_TYPE_TRACING);
-
- mutex_lock(&client->gxp->client_list_lock);
- list_del(&client->list_entry);
- mutex_unlock(&client->gxp->client_list_lock);
-
- gxp_client_destroy(client);
-
- return 0;
-}
-
-static inline enum dma_data_direction mapping_flags_to_dma_dir(u32 flags)
-{
- switch (flags & 0x3) {
- case 0x0: /* 0b00 */
- return DMA_BIDIRECTIONAL;
- case 0x1: /* 0b01 */
- return DMA_TO_DEVICE;
- case 0x2: /* 0b10 */
- return DMA_FROM_DEVICE;
- }
-
- return DMA_NONE;
-}
-
-static int gxp_map_buffer(struct gxp_client *client,
- struct gxp_map_ioctl __user *argp)
-{
- struct gxp_dev *gxp = client->gxp;
- struct gxp_map_ioctl ibuf;
- struct gxp_mapping *map;
- int ret = 0;
-
- if (copy_from_user(&ibuf, argp, sizeof(ibuf)))
- return -EFAULT;
-
- if (ibuf.size == 0 || ibuf.virtual_core_list == 0)
- return -EINVAL;
-
- if (ibuf.host_address % L1_CACHE_BYTES || ibuf.size % L1_CACHE_BYTES) {
- dev_err(gxp->dev,
- "Mapped buffers must be cache line aligned and padded.\n");
- return -EINVAL;
- }
-
- down_read(&client->semaphore);
-
- if (!check_client_has_available_vd(client, "GXP_MAP_BUFFER")) {
- ret = -ENODEV;
- goto out;
- }
-
- /* the list contains un-allocated core bits */
- if (ibuf.virtual_core_list & ~(BIT(client->vd->num_cores) - 1)) {
- ret = -EINVAL;
- goto out;
- }
-
- map = gxp_mapping_create(gxp, client->vd, ibuf.virtual_core_list,
- ibuf.host_address, ibuf.size,
- /*gxp_dma_flags=*/0,
- mapping_flags_to_dma_dir(ibuf.flags));
- if (IS_ERR(map)) {
- ret = PTR_ERR(map);
- dev_err(gxp->dev, "Failed to create mapping (ret=%d)\n", ret);
- goto out;
- }
-
- ret = gxp_vd_mapping_store(client->vd, map);
- if (ret) {
- dev_err(gxp->dev, "Failed to store mapping (ret=%d)\n", ret);
- goto error_destroy;
- }
-
- ibuf.device_address = map->device_address;
-
- if (copy_to_user(argp, &ibuf, sizeof(ibuf))) {
- ret = -EFAULT;
- goto error_remove;
- }
-
- /*
- * The virtual device acquired its own reference to the mapping when
- * it was stored in the VD's records. Release the reference from
- * creating the mapping since this function is done using it.
- */
- gxp_mapping_put(map);
-
-out:
- up_read(&client->semaphore);
-
- return ret;
-
-error_remove:
- gxp_vd_mapping_remove(client->vd, map);
-error_destroy:
- gxp_mapping_put(map);
- up_read(&client->semaphore);
- return ret;
-}
-
-static int gxp_unmap_buffer(struct gxp_client *client,
- struct gxp_map_ioctl __user *argp)
-{
- struct gxp_dev *gxp = client->gxp;
- struct gxp_map_ioctl ibuf;
- struct gxp_mapping *map;
- int ret = 0;
-
- if (copy_from_user(&ibuf, argp, sizeof(ibuf)))
- return -EFAULT;
-
- down_read(&client->semaphore);
-
- if (!client->vd) {
- dev_err(gxp->dev,
- "GXP_UNMAP_BUFFER requires the client allocate a VIRTUAL_DEVICE\n");
- ret = -ENODEV;
- goto out;
- }
-
- map = gxp_vd_mapping_search(client->vd,
- (dma_addr_t)ibuf.device_address);
- if (!map) {
- dev_err(gxp->dev,
- "Mapping not found for provided device address %#llX\n",
- ibuf.device_address);
- ret = -EINVAL;
- goto out;
- } else if (!map->host_address) {
- dev_err(gxp->dev, "dma-bufs must be unmapped via GXP_UNMAP_DMABUF\n");
- ret = -EINVAL;
- goto out;
- }
-
- WARN_ON(map->host_address != ibuf.host_address);
-
- gxp_vd_mapping_remove(client->vd, map);
-
- /* Release the reference from gxp_vd_mapping_search() */
- gxp_mapping_put(map);
-
-out:
- up_read(&client->semaphore);
-
- return ret;
-}
-
-static int gxp_sync_buffer(struct gxp_client *client,
- struct gxp_sync_ioctl __user *argp)
-{
- struct gxp_dev *gxp = client->gxp;
- struct gxp_sync_ioctl ibuf;
- struct gxp_mapping *map;
- int ret;
-
- if (copy_from_user(&ibuf, argp, sizeof(ibuf)))
- return -EFAULT;
-
- down_read(&client->semaphore);
-
- if (!client->vd) {
- dev_err(gxp->dev,
- "GXP_SYNC_BUFFER requires the client allocate a VIRTUAL_DEVICE\n");
- ret = -ENODEV;
- goto out;
- }
-
- map = gxp_vd_mapping_search(client->vd,
- (dma_addr_t)ibuf.device_address);
- if (!map) {
- dev_err(gxp->dev,
- "Mapping not found for provided device address %#llX\n",
- ibuf.device_address);
- ret = -EINVAL;
- goto out;
- }
-
- ret = gxp_mapping_sync(map, ibuf.offset, ibuf.size,
- ibuf.flags == GXP_SYNC_FOR_CPU);
-
- /* Release the reference from gxp_vd_mapping_search() */
- gxp_mapping_put(map);
-
-out:
- up_read(&client->semaphore);
-
- return ret;
-}
-
-static int
-gxp_mailbox_command_compat(struct gxp_client *client,
- struct gxp_mailbox_command_compat_ioctl __user *argp)
-{
- struct gxp_dev *gxp = client->gxp;
- struct gxp_mailbox_command_compat_ioctl ibuf;
- struct gxp_command cmd;
- struct buffer_descriptor buffer;
- int virt_core, phys_core;
- int ret = 0;
- uint gxp_power_state, memory_power_state;
-
- if (copy_from_user(&ibuf, argp, sizeof(ibuf))) {
- dev_err(gxp->dev,
- "Unable to copy ioctl data from user-space\n");
- return -EFAULT;
- }
-
- /* Caller must hold VIRTUAL_DEVICE wakelock */
- down_read(&client->semaphore);
-
- if (!check_client_has_available_vd_wakelock(client,
- "GXP_MAILBOX_COMMAND")) {
- ret = -ENODEV;
- goto out_unlock_client_semaphore;
- }
-
- down_read(&gxp->vd_semaphore);
-
- virt_core = ibuf.virtual_core_id;
- phys_core = gxp_vd_virt_core_to_phys_core(client->vd, virt_core);
- if (phys_core < 0) {
- dev_err(gxp->dev,
- "Mailbox command failed: Invalid virtual core id (%u)\n",
- virt_core);
- ret = -EINVAL;
- goto out;
- }
-
- if (!gxp_is_fw_running(gxp, phys_core)) {
- dev_err(gxp->dev,
- "Cannot process mailbox command for core %d when firmware isn't running\n",
- phys_core);
- ret = -EINVAL;
- goto out;
- }
-
- if (gxp->mailbox_mgr == NULL || gxp->mailbox_mgr->mailboxes == NULL ||
- gxp->mailbox_mgr->mailboxes[phys_core] == NULL) {
- dev_err(gxp->dev, "Mailbox not initialized for core %d\n",
- phys_core);
- ret = -EIO;
- goto out;
- }
-
- /* Pack the command structure */
- buffer.address = ibuf.device_address;
- buffer.size = ibuf.size;
- buffer.flags = ibuf.flags;
- /* cmd.seq is assigned by mailbox implementation */
- cmd.code = GXP_MBOX_CODE_DISPATCH; /* All IOCTL commands are dispatch */
- cmd.priority = 0; /* currently unused */
- cmd.buffer_descriptor = buffer;
- gxp_power_state = AUR_OFF;
- memory_power_state = AUR_MEM_UNDEFINED;
-
- ret = gxp_mailbox_execute_cmd_async(
- gxp->mailbox_mgr->mailboxes[phys_core], &cmd,
- &client->vd->mailbox_resp_queues[virt_core].queue,
- &client->vd->mailbox_resp_queues[virt_core].lock,
- &client->vd->mailbox_resp_queues[virt_core].waitq,
- gxp_power_state, memory_power_state, false,
- client->mb_eventfds[virt_core]);
- if (ret) {
- dev_err(gxp->dev, "Failed to enqueue mailbox command (ret=%d)\n",
- ret);
- goto out;
- }
-
- ibuf.sequence_number = cmd.seq;
- if (copy_to_user(argp, &ibuf, sizeof(ibuf))) {
- dev_err(gxp->dev, "Failed to copy back sequence number!\n");
- ret = -EFAULT;
- goto out;
- }
-
-out:
- up_read(&gxp->vd_semaphore);
-out_unlock_client_semaphore:
- up_read(&client->semaphore);
-
- return ret;
-}
-
-static int gxp_mailbox_command(struct gxp_client *client,
- struct gxp_mailbox_command_ioctl __user *argp)
-{
- struct gxp_dev *gxp = client->gxp;
- struct gxp_mailbox_command_ioctl ibuf;
- struct gxp_command cmd;
- struct buffer_descriptor buffer;
- int virt_core, phys_core;
- int ret = 0;
- uint gxp_power_state, memory_power_state;
- bool requested_low_clkmux = false;
-
- if (copy_from_user(&ibuf, argp, sizeof(ibuf))) {
- dev_err(gxp->dev,
- "Unable to copy ioctl data from user-space\n");
- return -EFAULT;
- }
- if (ibuf.gxp_power_state == GXP_POWER_STATE_OFF) {
- dev_err(gxp->dev,
- "GXP_POWER_STATE_OFF is not a valid value when executing a mailbox command\n");
- return -EINVAL;
- }
- if (ibuf.gxp_power_state < GXP_POWER_STATE_OFF ||
- ibuf.gxp_power_state >= GXP_NUM_POWER_STATES) {
- dev_err(gxp->dev, "Requested power state is invalid\n");
- return -EINVAL;
- }
- if (ibuf.memory_power_state < MEMORY_POWER_STATE_UNDEFINED ||
- ibuf.memory_power_state > MEMORY_POWER_STATE_MAX) {
- dev_err(gxp->dev, "Requested memory power state is invalid\n");
- return -EINVAL;
- }
-
- if (ibuf.gxp_power_state == GXP_POWER_STATE_READY) {
- dev_warn_once(
- gxp->dev,
- "GXP_POWER_STATE_READY is deprecated, please set GXP_POWER_LOW_FREQ_CLKMUX with GXP_POWER_STATE_UUD state");
- ibuf.gxp_power_state = GXP_POWER_STATE_UUD;
- }
-
- if(ibuf.power_flags & GXP_POWER_NON_AGGRESSOR)
- dev_warn_once(
- gxp->dev,
- "GXP_POWER_NON_AGGRESSOR is deprecated, no operation here");
-
- /* Caller must hold VIRTUAL_DEVICE wakelock */
- down_read(&client->semaphore);
-
- if (!check_client_has_available_vd_wakelock(client,
- "GXP_MAILBOX_COMMAND")) {
- ret = -ENODEV;
- goto out_unlock_client_semaphore;
- }
-
- down_read(&gxp->vd_semaphore);
-
- virt_core = ibuf.virtual_core_id;
- phys_core = gxp_vd_virt_core_to_phys_core(client->vd, virt_core);
- if (phys_core < 0) {
- dev_err(gxp->dev,
- "Mailbox command failed: Invalid virtual core id (%u)\n",
- virt_core);
- ret = -EINVAL;
- goto out;
- }
-
- if (!gxp_is_fw_running(gxp, phys_core)) {
- dev_err(gxp->dev,
- "Cannot process mailbox command for core %d when firmware isn't running\n",
- phys_core);
- ret = -EINVAL;
- goto out;
- }
-
- if (gxp->mailbox_mgr == NULL || gxp->mailbox_mgr->mailboxes == NULL ||
- gxp->mailbox_mgr->mailboxes[phys_core] == NULL) {
- dev_err(gxp->dev, "Mailbox not initialized for core %d\n",
- phys_core);
- ret = -EIO;
- goto out;
- }
-
- /* Pack the command structure */
- buffer.address = ibuf.device_address;
- buffer.size = ibuf.size;
- buffer.flags = ibuf.flags;
- /* cmd.seq is assigned by mailbox implementation */
- cmd.code = GXP_MBOX_CODE_DISPATCH; /* All IOCTL commands are dispatch */
- cmd.priority = 0; /* currently unused */
- cmd.buffer_descriptor = buffer;
- gxp_power_state = aur_state_array[ibuf.gxp_power_state];
- memory_power_state = aur_memory_state_array[ibuf.memory_power_state];
- requested_low_clkmux = (ibuf.power_flags & GXP_POWER_LOW_FREQ_CLKMUX) != 0;
-
- ret = gxp_mailbox_execute_cmd_async(
- gxp->mailbox_mgr->mailboxes[phys_core], &cmd,
- &client->vd->mailbox_resp_queues[virt_core].queue,
- &client->vd->mailbox_resp_queues[virt_core].lock,
- &client->vd->mailbox_resp_queues[virt_core].waitq,
- gxp_power_state, memory_power_state, requested_low_clkmux,
- client->mb_eventfds[virt_core]);
- if (ret) {
- dev_err(gxp->dev, "Failed to enqueue mailbox command (ret=%d)\n",
- ret);
- goto out;
- }
-
- ibuf.sequence_number = cmd.seq;
- if (copy_to_user(argp, &ibuf, sizeof(ibuf))) {
- dev_err(gxp->dev, "Failed to copy back sequence number!\n");
- ret = -EFAULT;
- goto out;
- }
-
-out:
- up_read(&gxp->vd_semaphore);
-out_unlock_client_semaphore:
- up_read(&client->semaphore);
-
- return ret;
-}
-
-static int gxp_mailbox_response(struct gxp_client *client,
- struct gxp_mailbox_response_ioctl __user *argp)
-{
- struct gxp_dev *gxp = client->gxp;
- struct gxp_mailbox_response_ioctl ibuf;
- struct gxp_async_response *resp_ptr;
- int virt_core;
- int ret = 0;
- long timeout;
-
- if (copy_from_user(&ibuf, argp, sizeof(ibuf)))
- return -EFAULT;
-
- /* Caller must hold VIRTUAL_DEVICE wakelock */
- down_read(&client->semaphore);
-
- if (!check_client_has_available_vd_wakelock(client,
- "GXP_MAILBOX_RESPONSE")) {
- ret = -ENODEV;
- goto out;
- }
-
- virt_core = ibuf.virtual_core_id;
- if (virt_core >= client->vd->num_cores) {
- dev_err(gxp->dev, "Mailbox response failed: Invalid virtual core id (%u)\n",
- virt_core);
- ret = -EINVAL;
- goto out;
- }
-
- spin_lock_irq(&client->vd->mailbox_resp_queues[virt_core].lock);
-
- /*
- * The "exclusive" version of wait_event is used since each wake
- * corresponds to the addition of exactly one new response to be
- * consumed. Therefore, only one waiting response ioctl can ever
- * proceed per wake event.
- */
- timeout = wait_event_interruptible_lock_irq_timeout_exclusive(
- client->vd->mailbox_resp_queues[virt_core].waitq,
- !list_empty(&client->vd->mailbox_resp_queues[virt_core].queue),
- client->vd->mailbox_resp_queues[virt_core].lock,
- msecs_to_jiffies(MAILBOX_TIMEOUT));
- if (timeout <= 0) {
- spin_unlock_irq(
- &client->vd->mailbox_resp_queues[virt_core].lock);
- /* unusual case - this only happens when there is no command pushed */
- ret = timeout ? -ETIMEDOUT : timeout;
- goto out;
- }
- resp_ptr = list_first_entry(
- &client->vd->mailbox_resp_queues[virt_core].queue,
- struct gxp_async_response, list_entry);
-
- /* Pop the front of the response list */
- list_del(&(resp_ptr->list_entry));
-
- spin_unlock_irq(&client->vd->mailbox_resp_queues[virt_core].lock);
-
- ibuf.sequence_number = resp_ptr->resp.seq;
- switch (resp_ptr->resp.status) {
- case GXP_RESP_OK:
- ibuf.error_code = GXP_RESPONSE_ERROR_NONE;
- /* retval is only valid if status == GXP_RESP_OK */
- ibuf.cmd_retval = resp_ptr->resp.retval;
- break;
- case GXP_RESP_CANCELLED:
- ibuf.error_code = GXP_RESPONSE_ERROR_TIMEOUT;
- break;
- default:
- /* No other status values are valid at this point */
- WARN(true, "Completed response had invalid status %hu",
- resp_ptr->resp.status);
- ibuf.error_code = GXP_RESPONSE_ERROR_INTERNAL;
- break;
- }
-
- /*
- * We must be absolutely sure the timeout work has been cancelled
- * and/or completed before freeing the `gxp_async_response`.
- * There are 3 possible cases when we arrive at this point:
- * 1) The response arrived normally and the timeout was cancelled
- * 2) The response timedout and its timeout handler finished
- * 3) The response handler and timeout handler raced, and the response
- * handler "cancelled" the timeout handler while it was already in
- * progress.
- *
- * This call handles case #3, and ensures any in-process timeout
- * handler (which may reference the `gxp_async_response`) has
- * been able to exit cleanly.
- */
- cancel_delayed_work_sync(&resp_ptr->timeout_work);
- kfree(resp_ptr);
-
- if (copy_to_user(argp, &ibuf, sizeof(ibuf)))
- ret = -EFAULT;
-
-out:
- up_read(&client->semaphore);
-
- return ret;
-}
-
-static int gxp_get_specs(struct gxp_client *client,
- struct gxp_specs_ioctl __user *argp)
-{
- struct gxp_specs_ioctl ibuf = {
- .core_count = GXP_NUM_CORES,
- .memory_per_core = client->gxp->memory_per_core,
- };
-
- if (copy_to_user(argp, &ibuf, sizeof(ibuf)))
- return -EFAULT;
-
- return 0;
-}
-
-static int gxp_allocate_vd(struct gxp_client *client,
- struct gxp_virtual_device_ioctl __user *argp)
-{
- struct gxp_dev *gxp = client->gxp;
- struct gxp_virtual_device_ioctl ibuf;
- struct gxp_virtual_device *vd;
- int ret = 0;
-
- if (copy_from_user(&ibuf, argp, sizeof(ibuf)))
- return -EFAULT;
-
- if (ibuf.core_count == 0 || ibuf.core_count > GXP_NUM_CORES) {
- dev_err(gxp->dev, "Invalid core count (%u)\n", ibuf.core_count);
- return -EINVAL;
- }
-
- if (ibuf.memory_per_core > gxp->memory_per_core) {
- dev_err(gxp->dev, "Invalid memory-per-core (%u)\n",
- ibuf.memory_per_core);
- return -EINVAL;
- }
-
- down_write(&client->semaphore);
-
- if (client->vd) {
- dev_err(gxp->dev, "Virtual device was already allocated for client\n");
- ret = -EINVAL;
- goto out;
- }
-
- vd = gxp_vd_allocate(gxp, ibuf.core_count);
- if (IS_ERR(vd)) {
- ret = PTR_ERR(vd);
- dev_err(gxp->dev,
- "Failed to allocate virtual device for client (%d)\n",
- ret);
- goto out;
- }
-
- client->vd = vd;
-
-out:
- up_write(&client->semaphore);
-
- return ret;
-}
-
-static int
-gxp_etm_trace_start_command(struct gxp_client *client,
- struct gxp_etm_trace_start_ioctl __user *argp)
-{
- struct gxp_dev *gxp = client->gxp;
- struct gxp_etm_trace_start_ioctl ibuf;
- int phys_core;
- int ret = 0;
-
- if (copy_from_user(&ibuf, argp, sizeof(ibuf)))
- return -EFAULT;
-
- ibuf.trace_ram_enable &= ETM_TRACE_LSB_MASK;
- ibuf.atb_enable &= ETM_TRACE_LSB_MASK;
-
- if (!ibuf.trace_ram_enable && !ibuf.atb_enable)
- return -EINVAL;
-
- if (!(ibuf.sync_msg_period == 0 ||
- (ibuf.sync_msg_period <= ETM_TRACE_SYNC_MSG_PERIOD_MAX &&
- ibuf.sync_msg_period >= ETM_TRACE_SYNC_MSG_PERIOD_MIN &&
- is_power_of_2(ibuf.sync_msg_period))))
- return -EINVAL;
-
- if (ibuf.pc_match_mask_length > ETM_TRACE_PC_MATCH_MASK_LEN_MAX)
- return -EINVAL;
-
- /* Caller must hold VIRTUAL_DEVICE wakelock */
- down_read(&client->semaphore);
-
- if (!check_client_has_available_vd_wakelock(
- client, "GXP_ETM_TRACE_START_COMMAND")) {
- ret = -ENODEV;
- goto out_unlock_client_semaphore;
- }
-
- down_read(&gxp->vd_semaphore);
-
- phys_core =
- gxp_vd_virt_core_to_phys_core(client->vd, ibuf.virtual_core_id);
- if (phys_core < 0) {
- dev_err(gxp->dev, "Trace start failed: Invalid virtual core id (%u)\n",
- ibuf.virtual_core_id);
- ret = -EINVAL;
- goto out;
- }
-
- /*
- * TODO (b/185260919): Pass the etm trace configuration to system FW
- * once communication channel between kernel and system FW is ready
- * (b/185819530).
- */
-
-out:
- up_read(&gxp->vd_semaphore);
-out_unlock_client_semaphore:
- up_read(&client->semaphore);
-
- return ret;
-}
-
-static int gxp_etm_trace_sw_stop_command(struct gxp_client *client,
- __u16 __user *argp)
-{
- struct gxp_dev *gxp = client->gxp;
- u16 virtual_core_id;
- int phys_core;
- int ret = 0;
-
- if (copy_from_user(&virtual_core_id, argp, sizeof(virtual_core_id)))
- return -EFAULT;
-
- /* Caller must hold VIRTUAL_DEVICE wakelock */
- down_read(&client->semaphore);
-
- if (!check_client_has_available_vd_wakelock(
- client, "GXP_ETM_TRACE_SW_STOP_COMMAND")) {
- ret = -ENODEV;
- goto out_unlock_client_semaphore;
- }
-
- down_read(&gxp->vd_semaphore);
-
- phys_core = gxp_vd_virt_core_to_phys_core(client->vd, virtual_core_id);
- if (phys_core < 0) {
- dev_err(gxp->dev, "Trace stop via software trigger failed: Invalid virtual core id (%u)\n",
- virtual_core_id);
- ret = -EINVAL;
- goto out;
- }
-
- /*
- * TODO (b/185260919): Pass the etm stop signal to system FW once
- * communication channel between kernel and system FW is ready
- * (b/185819530).
- */
-
-out:
- up_read(&gxp->vd_semaphore);
-out_unlock_client_semaphore:
- up_read(&client->semaphore);
-
- return ret;
-}
-
-static int gxp_etm_trace_cleanup_command(struct gxp_client *client,
- __u16 __user *argp)
-{
- struct gxp_dev *gxp = client->gxp;
- u16 virtual_core_id;
- int phys_core;
- int ret = 0;
-
- if (copy_from_user(&virtual_core_id, argp, sizeof(virtual_core_id)))
- return -EFAULT;
-
- /* Caller must hold VIRTUAL_DEVICE wakelock */
- down_read(&client->semaphore);
-
- if (!check_client_has_available_vd_wakelock(
- client, "GXP_ETM_TRACE_CLEANUP_COMMAND")) {
- ret = -ENODEV;
- goto out_unlock_client_semaphore;
- }
-
- down_read(&gxp->vd_semaphore);
-
- phys_core = gxp_vd_virt_core_to_phys_core(client->vd, virtual_core_id);
- if (phys_core < 0) {
- dev_err(gxp->dev, "Trace cleanup failed: Invalid virtual core id (%u)\n",
- virtual_core_id);
- ret = -EINVAL;
- goto out;
- }
-
- /*
- * TODO (b/185260919): Pass the etm clean up signal to system FW once
- * communication channel between kernel and system FW is ready
- * (b/185819530).
- */
-
-out:
- up_read(&gxp->vd_semaphore);
-out_unlock_client_semaphore:
- up_read(&client->semaphore);
-
- return ret;
-}
-
-static int
-gxp_etm_get_trace_info_command(struct gxp_client *client,
- struct gxp_etm_get_trace_info_ioctl __user *argp)
-{
- struct gxp_dev *gxp = client->gxp;
- struct gxp_etm_get_trace_info_ioctl ibuf;
- int phys_core;
- u32 *trace_header;
- u32 *trace_data;
- int ret = 0;
-
- if (copy_from_user(&ibuf, argp, sizeof(ibuf)))
- return -EFAULT;
-
- if (ibuf.type > 1)
- return -EINVAL;
-
- /* Caller must hold VIRTUAL_DEVICE wakelock */
- down_read(&client->semaphore);
-
- if (!check_client_has_available_vd_wakelock(
- client, "GXP_ETM_GET_TRACE_INFO_COMMAND")) {
- ret = -ENODEV;
- goto out_unlock_client_semaphore;
- }
-
- down_read(&gxp->vd_semaphore);
-
- phys_core = gxp_vd_virt_core_to_phys_core(client->vd, ibuf.virtual_core_id);
- if (phys_core < 0) {
- dev_err(gxp->dev, "Get trace info failed: Invalid virtual core id (%u)\n",
- ibuf.virtual_core_id);
- ret = -EINVAL;
- goto out;
- }
-
- trace_header = kzalloc(GXP_TRACE_HEADER_SIZE, GFP_KERNEL);
- if (!trace_header) {
- ret = -ENOMEM;
- goto out;
- }
-
- trace_data = kzalloc(GXP_TRACE_RAM_SIZE, GFP_KERNEL);
- if (!trace_data) {
- ret = -ENOMEM;
- goto out_free_header;
- }
-
- /*
- * TODO (b/185260919): Get trace information from system FW once
- * communication channel between kernel and system FW is ready
- * (b/185819530).
- */
-
- if (copy_to_user((void __user *)ibuf.trace_header_addr, trace_header,
- GXP_TRACE_HEADER_SIZE)) {
- ret = -EFAULT;
- goto out_free_data;
- }
-
- if (ibuf.type == 1) {
- if (copy_to_user((void __user *)ibuf.trace_data_addr,
- trace_data, GXP_TRACE_RAM_SIZE)) {
- ret = -EFAULT;
- goto out_free_data;
- }
- }
-
-out_free_data:
- kfree(trace_data);
-out_free_header:
- kfree(trace_header);
-
-out:
- up_read(&gxp->vd_semaphore);
-out_unlock_client_semaphore:
- up_read(&client->semaphore);
-
- return ret;
-}
-
-static int gxp_enable_telemetry(struct gxp_client *client,
- __u8 __user *argp)
-{
- struct gxp_dev *gxp = client->gxp;
- __u8 type;
- int ret;
-
- if (copy_from_user(&type, argp, sizeof(type)))
- return -EFAULT;
-
- if (type != GXP_TELEMETRY_TYPE_LOGGING &&
- type != GXP_TELEMETRY_TYPE_TRACING)
- return -EINVAL;
-
- ret = gxp_telemetry_enable(gxp, type);
-
- /*
- * Record what telemetry types this client enabled so they can be
- * cleaned-up if the client closes without disabling them.
- */
- if (!ret && type == GXP_TELEMETRY_TYPE_LOGGING)
- client->enabled_telemetry_logging = true;
- if (!ret && type == GXP_TELEMETRY_TYPE_TRACING)
- client->enabled_telemetry_tracing = true;
-
- return ret;
-}
-
-static int gxp_disable_telemetry(struct gxp_client *client, __u8 __user *argp)
-{
- struct gxp_dev *gxp = client->gxp;
- __u8 type;
- int ret;
-
- if (copy_from_user(&type, argp, sizeof(type)))
- return -EFAULT;
-
- if (type != GXP_TELEMETRY_TYPE_LOGGING &&
- type != GXP_TELEMETRY_TYPE_TRACING)
- return -EINVAL;
-
- ret = gxp_telemetry_disable(gxp, type);
-
- if (!ret && type == GXP_TELEMETRY_TYPE_LOGGING)
- client->enabled_telemetry_logging = false;
- if (!ret && type == GXP_TELEMETRY_TYPE_TRACING)
- client->enabled_telemetry_tracing = false;
-
- return ret;
-}
-
-static int gxp_map_tpu_mbx_queue(struct gxp_client *client,
- struct gxp_tpu_mbx_queue_ioctl __user *argp)
-{
-#if (IS_ENABLED(CONFIG_GXP_TEST) || IS_ENABLED(CONFIG_ANDROID)) && !IS_ENABLED(CONFIG_GXP_GEM5)
- struct gxp_dev *gxp = client->gxp;
- struct edgetpu_ext_mailbox_info *mbx_info;
- struct gxp_tpu_mbx_queue_ioctl ibuf;
- struct edgetpu_ext_client_info gxp_tpu_info;
- u32 phys_core_list = 0;
- u32 virtual_core_list;
- u32 core_count;
- int ret = 0;
-
- if (!gxp->tpu_dev.mbx_paddr) {
- dev_err(gxp->dev, "%s: TPU is not available for interop\n",
- __func__);
- return -EINVAL;
- }
-
- if (copy_from_user(&ibuf, argp, sizeof(ibuf)))
- return -EFAULT;
-
- down_write(&client->semaphore);
-
- if (!check_client_has_available_vd(client, "GXP_MAP_TPU_MBX_QUEUE")) {
- ret = -ENODEV;
- goto out_unlock_client_semaphore;
- }
- down_read(&gxp->vd_semaphore);
+#include "gxp-common-platform.c"
- virtual_core_list = ibuf.virtual_core_list;
- core_count = hweight_long(virtual_core_list);
- phys_core_list = gxp_vd_virt_core_list_to_phys_core_list(
- client->vd, virtual_core_list);
- if (!phys_core_list) {
- dev_err(gxp->dev, "%s: invalid virtual core list 0x%x\n",
- __func__, virtual_core_list);
- ret = -EINVAL;
- goto out;
- }
-
- mbx_info =
- kmalloc(sizeof(struct edgetpu_ext_mailbox_info) + core_count *
- sizeof(struct edgetpu_ext_mailbox_descriptor),
- GFP_KERNEL);
- if (!mbx_info) {
- ret = -ENOMEM;
- goto out;
- }
-
- if (client->tpu_file) {
- dev_err(gxp->dev, "Mappings already exist for TPU mailboxes");
- ret = -EBUSY;
- goto out_free;
- }
-
- gxp_tpu_info.tpu_fd = ibuf.tpu_fd;
- gxp_tpu_info.mbox_map = phys_core_list;
- gxp_tpu_info.attr = (struct edgetpu_mailbox_attr __user *)ibuf.attr_ptr;
- ret = edgetpu_ext_driver_cmd(gxp->tpu_dev.dev,
- EDGETPU_EXTERNAL_CLIENT_TYPE_DSP,
- ALLOCATE_EXTERNAL_MAILBOX, &gxp_tpu_info,
- mbx_info);
- if (ret) {
- dev_err(gxp->dev, "Failed to allocate ext TPU mailboxes %d",
- ret);
- goto out_free;
- }
- /*
- * If someone is attacking us through this interface -
- * it's possible that ibuf.tpu_fd here is already a different file from
- * the one passed to edgetpu_ext_driver_cmd() (if the runtime closes the
- * FD and opens another file exactly between the TPU driver call above
- * and the fget below).
- * But the worst consequence of this attack is we fget() ourselves (GXP
- * FD), which only leads to memory leak (because the file object has a
- * reference to itself). The race is also hard to hit so we don't insist
- * on preventing it.
- */
- client->tpu_file = fget(ibuf.tpu_fd);
- if (!client->tpu_file) {
- edgetpu_ext_driver_cmd(gxp->tpu_dev.dev,
- EDGETPU_EXTERNAL_CLIENT_TYPE_DSP,
- FREE_EXTERNAL_MAILBOX, &gxp_tpu_info,
- NULL);
- ret = -EINVAL;
- goto out_free;
- }
- /* Align queue size to page size for iommu map. */
- mbx_info->cmdq_size = ALIGN(mbx_info->cmdq_size, PAGE_SIZE);
- mbx_info->respq_size = ALIGN(mbx_info->respq_size, PAGE_SIZE);
-
- ret = gxp_dma_map_tpu_buffer(gxp, client->vd, virtual_core_list,
- phys_core_list, mbx_info);
- if (ret) {
- dev_err(gxp->dev, "Failed to map TPU mailbox buffer %d", ret);
- fput(client->tpu_file);
- client->tpu_file = NULL;
- edgetpu_ext_driver_cmd(gxp->tpu_dev.dev,
- EDGETPU_EXTERNAL_CLIENT_TYPE_DSP,
- FREE_EXTERNAL_MAILBOX, &gxp_tpu_info,
- NULL);
- goto out_free;
- }
- client->mbx_desc.phys_core_list = phys_core_list;
- client->mbx_desc.virt_core_list = virtual_core_list;
- client->mbx_desc.cmdq_size = mbx_info->cmdq_size;
- client->mbx_desc.respq_size = mbx_info->respq_size;
-
-out_free:
- kfree(mbx_info);
-
-out:
- up_read(&gxp->vd_semaphore);
-out_unlock_client_semaphore:
- up_write(&client->semaphore);
-
- return ret;
-#else
- return -ENODEV;
-#endif
-}
-
-static int gxp_unmap_tpu_mbx_queue(struct gxp_client *client,
- struct gxp_tpu_mbx_queue_ioctl __user *argp)
-{
-#if (IS_ENABLED(CONFIG_GXP_TEST) || IS_ENABLED(CONFIG_ANDROID)) && !IS_ENABLED(CONFIG_GXP_GEM5)
- struct gxp_dev *gxp = client->gxp;
- struct gxp_tpu_mbx_queue_ioctl ibuf;
- struct edgetpu_ext_client_info gxp_tpu_info;
- int ret = 0;
-
- if (copy_from_user(&ibuf, argp, sizeof(ibuf)))
- return -EFAULT;
-
- down_write(&client->semaphore);
-
- if (!client->vd) {
- dev_err(gxp->dev,
- "GXP_UNMAP_TPU_MBX_QUEUE requires the client allocate a VIRTUAL_DEVICE\n");
- ret = -ENODEV;
- goto out;
- }
-
- if (!client->tpu_file) {
- dev_err(gxp->dev, "No mappings exist for TPU mailboxes");
- ret = -EINVAL;
- goto out;
- }
-
- gxp_dma_unmap_tpu_buffer(gxp, client->vd, client->mbx_desc);
-
- gxp_tpu_info.tpu_fd = ibuf.tpu_fd;
- edgetpu_ext_driver_cmd(gxp->tpu_dev.dev,
- EDGETPU_EXTERNAL_CLIENT_TYPE_DSP,
- FREE_EXTERNAL_MAILBOX, &gxp_tpu_info, NULL);
- fput(client->tpu_file);
- client->tpu_file = NULL;
-
-out:
- up_write(&client->semaphore);
-
- return ret;
-#else
- return -ENODEV;
-#endif
-}
-
-static int gxp_register_telemetry_eventfd(
- struct gxp_client *client,
- struct gxp_register_telemetry_eventfd_ioctl __user *argp)
-{
- struct gxp_dev *gxp = client->gxp;
- struct gxp_register_telemetry_eventfd_ioctl ibuf;
-
- if (copy_from_user(&ibuf, argp, sizeof(ibuf)))
- return -EFAULT;
-
- return gxp_telemetry_register_eventfd(gxp, ibuf.type, ibuf.eventfd);
-}
-
-static int gxp_unregister_telemetry_eventfd(
- struct gxp_client *client,
- struct gxp_register_telemetry_eventfd_ioctl __user *argp)
-{
- struct gxp_dev *gxp = client->gxp;
- struct gxp_register_telemetry_eventfd_ioctl ibuf;
-
- if (copy_from_user(&ibuf, argp, sizeof(ibuf)))
- return -EFAULT;
-
- return gxp_telemetry_unregister_eventfd(gxp, ibuf.type);
-}
-
-static int gxp_read_global_counter(struct gxp_client *client,
- __u64 __user *argp)
+void gxp_iommu_setup_shareability(struct gxp_dev *gxp)
{
- struct gxp_dev *gxp = client->gxp;
- u32 high_first, high_second, low;
- u64 counter_val;
- int ret = 0;
-
- /* Caller must hold BLOCK wakelock */
- down_read(&client->semaphore);
-
- if (!client->has_block_wakelock) {
- dev_err(gxp->dev,
- "GXP_READ_GLOBAL_COUNTER requires the client hold a BLOCK wakelock\n");
- ret = -ENODEV;
- goto out;
- }
-
- high_first = gxp_read_32(gxp, GXP_REG_GLOBAL_COUNTER_HIGH);
- low = gxp_read_32(gxp, GXP_REG_GLOBAL_COUNTER_LOW);
-
- /*
- * Check if the lower 32 bits could have wrapped in-between reading
- * the high and low bit registers by validating the higher 32 bits
- * haven't changed.
- */
- high_second = gxp_read_32(gxp, GXP_REG_GLOBAL_COUNTER_HIGH);
- if (high_first != high_second)
- low = gxp_read_32(gxp, GXP_REG_GLOBAL_COUNTER_LOW);
-
- counter_val = ((u64)high_second << 32) | low;
-
- if (copy_to_user(argp, &counter_val, sizeof(counter_val)))
- ret = -EFAULT;
-
-out:
- up_read(&client->semaphore);
-
- return ret;
+ /* IO coherency not supported */
}
-static int gxp_acquire_wake_lock_compat(
- struct gxp_client *client,
- struct gxp_acquire_wakelock_compat_ioctl __user *argp)
-{
- struct gxp_dev *gxp = client->gxp;
- struct gxp_acquire_wakelock_compat_ioctl ibuf;
- bool acquired_block_wakelock = false;
- int ret = 0;
-
- if (copy_from_user(&ibuf, argp, sizeof(ibuf)))
- return -EFAULT;
-
- if (ibuf.gxp_power_state == GXP_POWER_STATE_OFF) {
- dev_err(gxp->dev,
- "GXP_POWER_STATE_OFF is not a valid value when acquiring a wakelock\n");
- return -EINVAL;
- }
- if (ibuf.gxp_power_state < GXP_POWER_STATE_OFF ||
- ibuf.gxp_power_state >= GXP_NUM_POWER_STATES) {
- dev_err(gxp->dev, "Requested power state is invalid\n");
- return -EINVAL;
- }
- if ((ibuf.memory_power_state < MEMORY_POWER_STATE_MIN ||
- ibuf.memory_power_state > MEMORY_POWER_STATE_MAX) &&
- ibuf.memory_power_state != MEMORY_POWER_STATE_UNDEFINED) {
- dev_err(gxp->dev,
- "Requested memory power state %d is invalid\n",
- ibuf.memory_power_state);
- return -EINVAL;
- }
-
- if (ibuf.gxp_power_state == GXP_POWER_STATE_READY) {
- dev_warn_once(
- gxp->dev,
- "GXP_POWER_STATE_READY is deprecated, please set GXP_POWER_LOW_FREQ_CLKMUX with GXP_POWER_STATE_UUD state");
- ibuf.gxp_power_state = GXP_POWER_STATE_UUD;
- }
-
- down_write(&client->semaphore);
- if ((ibuf.components_to_wake & WAKELOCK_VIRTUAL_DEVICE) &&
- (!client->vd)) {
- dev_err(gxp->dev,
- "Must allocate a virtual device to acquire VIRTUAL_DEVICE wakelock\n");
- ret = -EINVAL;
- goto out;
- }
-
- /* Acquire a BLOCK wakelock if requested */
- if (ibuf.components_to_wake & WAKELOCK_BLOCK) {
- if (!client->has_block_wakelock) {
- ret = gxp_wakelock_acquire(gxp);
- acquired_block_wakelock = true;
- }
-
- if (ret) {
- dev_err(gxp->dev,
- "Failed to acquire BLOCK wakelock for client (ret=%d)\n",
- ret);
- goto out;
- }
-
- client->has_block_wakelock = true;
-
- /*
- * Update client's TGID/PID in case the process that opened
- * /dev/gxp is not the one that called this IOCTL.
- */
- client->tgid = current->tgid;
- client->pid = current->pid;
- }
-
- /* Acquire a VIRTUAL_DEVICE wakelock if requested */
- if (ibuf.components_to_wake & WAKELOCK_VIRTUAL_DEVICE) {
- if (!client->has_block_wakelock) {
- dev_err(gxp->dev,
- "Must hold BLOCK wakelock to acquire VIRTUAL_DEVICE wakelock\n");
- ret = -EINVAL;
- goto out;
-
- }
-
- if (client->vd->state == GXP_VD_UNAVAILABLE) {
- dev_err(gxp->dev,
- "Cannot acquire VIRTUAL_DEVICE wakelock on a broken virtual device\n");
- ret = -ENODEV;
- goto out;
- }
-
- if (!client->has_vd_wakelock) {
- down_write(&gxp->vd_semaphore);
- if (client->vd->state == GXP_VD_OFF)
- ret = gxp_vd_start(client->vd);
- else
- ret = gxp_vd_resume(client->vd);
- up_write(&gxp->vd_semaphore);
- }
-
- if (ret) {
- dev_err(gxp->dev,
- "Failed to acquire VIRTUAL_DEVICE wakelock for client (ret=%d)\n",
- ret);
- goto err_acquiring_vd_wl;
- }
-
- client->has_vd_wakelock = true;
- }
-
- gxp_pm_update_requested_power_states(
- gxp, client->requested_power_state, client->requested_low_clkmux,
- aur_state_array[ibuf.gxp_power_state], false,
- client->requested_memory_power_state,
- aur_memory_state_array[ibuf.memory_power_state]);
- client->requested_power_state = aur_state_array[ibuf.gxp_power_state];
- client->requested_low_clkmux = false;
- client->requested_memory_power_state =
- aur_memory_state_array[ibuf.memory_power_state];
-out:
- up_write(&client->semaphore);
-
- return ret;
-
-err_acquiring_vd_wl:
- /*
- * In a single call, if any wakelock acquisition fails, all of them do.
- * If the client was acquiring both wakelocks and failed to acquire the
- * VIRTUAL_DEVICE wakelock after successfully acquiring the BLOCK
- * wakelock, then release it before returning the error code.
- */
- if (acquired_block_wakelock) {
- gxp_wakelock_release(gxp);
- client->has_block_wakelock = false;
- }
-
- up_write(&client->semaphore);
-
- return ret;
-}
-
-static int gxp_acquire_wake_lock(struct gxp_client *client,
- struct gxp_acquire_wakelock_ioctl __user *argp)
-{
- struct gxp_dev *gxp = client->gxp;
- struct gxp_acquire_wakelock_ioctl ibuf;
- bool acquired_block_wakelock = false;
- bool requested_low_clkmux = false;
- int ret = 0;
-
- if (copy_from_user(&ibuf, argp, sizeof(ibuf)))
- return -EFAULT;
-
- if (ibuf.gxp_power_state == GXP_POWER_STATE_OFF) {
- dev_err(gxp->dev,
- "GXP_POWER_STATE_OFF is not a valid value when acquiring a wakelock\n");
- return -EINVAL;
- }
- if (ibuf.gxp_power_state < GXP_POWER_STATE_OFF ||
- ibuf.gxp_power_state >= GXP_NUM_POWER_STATES) {
- dev_err(gxp->dev, "Requested power state is invalid\n");
- return -EINVAL;
- }
- if ((ibuf.memory_power_state < MEMORY_POWER_STATE_MIN ||
- ibuf.memory_power_state > MEMORY_POWER_STATE_MAX) &&
- ibuf.memory_power_state != MEMORY_POWER_STATE_UNDEFINED) {
- dev_err(gxp->dev,
- "Requested memory power state %d is invalid\n",
- ibuf.memory_power_state);
- return -EINVAL;
- }
-
- if (ibuf.gxp_power_state == GXP_POWER_STATE_READY) {
- dev_warn_once(
- gxp->dev,
- "GXP_POWER_STATE_READY is deprecated, please set GXP_POWER_LOW_FREQ_CLKMUX with GXP_POWER_STATE_UUD state");
- ibuf.gxp_power_state = GXP_POWER_STATE_UUD;
- }
-
- if(ibuf.flags & GXP_POWER_NON_AGGRESSOR)
- dev_warn_once(
- gxp->dev,
- "GXP_POWER_NON_AGGRESSOR is deprecated, no operation here");
-
- down_write(&client->semaphore);
- if ((ibuf.components_to_wake & WAKELOCK_VIRTUAL_DEVICE) &&
- (!client->vd)) {
- dev_err(gxp->dev,
- "Must allocate a virtual device to acquire VIRTUAL_DEVICE wakelock\n");
- ret = -EINVAL;
- goto out;
- }
-
- /* Acquire a BLOCK wakelock if requested */
- if (ibuf.components_to_wake & WAKELOCK_BLOCK) {
- if (!client->has_block_wakelock) {
- ret = gxp_wakelock_acquire(gxp);
- acquired_block_wakelock = true;
- }
-
- if (ret) {
- dev_err(gxp->dev,
- "Failed to acquire BLOCK wakelock for client (ret=%d)\n",
- ret);
- goto out;
- }
-
- client->has_block_wakelock = true;
- }
-
- /* Acquire a VIRTUAL_DEVICE wakelock if requested */
- if (ibuf.components_to_wake & WAKELOCK_VIRTUAL_DEVICE) {
- if (!client->has_block_wakelock) {
- dev_err(gxp->dev,
- "Must hold BLOCK wakelock to acquire VIRTUAL_DEVICE wakelock\n");
- ret = -EINVAL;
- goto out;
-
- }
-
- if (client->vd->state == GXP_VD_UNAVAILABLE) {
- dev_err(gxp->dev,
- "Cannot acquire VIRTUAL_DEVICE wakelock on a broken virtual device\n");
- ret = -ENODEV;
- goto err_acquiring_vd_wl;
- }
-
- if (!client->has_vd_wakelock) {
- down_write(&gxp->vd_semaphore);
- if (client->vd->state == GXP_VD_OFF)
- ret = gxp_vd_start(client->vd);
- else
- ret = gxp_vd_resume(client->vd);
- up_write(&gxp->vd_semaphore);
- }
-
- if (ret) {
- dev_err(gxp->dev,
- "Failed to acquire VIRTUAL_DEVICE wakelock for client (ret=%d)\n",
- ret);
- goto err_acquiring_vd_wl;
- }
-
- client->has_vd_wakelock = true;
- }
- requested_low_clkmux = (ibuf.flags & GXP_POWER_LOW_FREQ_CLKMUX) != 0;
-
- gxp_pm_update_requested_power_states(
- gxp, client->requested_power_state, client->requested_low_clkmux,
- aur_state_array[ibuf.gxp_power_state], requested_low_clkmux,
- client->requested_memory_power_state,
- aur_memory_state_array[ibuf.memory_power_state]);
- client->requested_power_state = aur_state_array[ibuf.gxp_power_state];
- client->requested_low_clkmux = requested_low_clkmux;
- client->requested_memory_power_state =
- aur_memory_state_array[ibuf.memory_power_state];
-out:
- up_write(&client->semaphore);
-
- return ret;
-
-err_acquiring_vd_wl:
- /*
- * In a single call, if any wakelock acquisition fails, all of them do.
- * If the client was acquiring both wakelocks and failed to acquire the
- * VIRTUAL_DEVICE wakelock after successfully acquiring the BLOCK
- * wakelock, then release it before returning the error code.
- */
- if (acquired_block_wakelock) {
- gxp_wakelock_release(gxp);
- client->has_block_wakelock = false;
- }
-
- up_write(&client->semaphore);
-
- return ret;
-}
-
-static int gxp_release_wake_lock(struct gxp_client *client, __u32 __user *argp)
-{
- struct gxp_dev *gxp = client->gxp;
- u32 wakelock_components;
- int ret = 0;
-
- if (copy_from_user(&wakelock_components, argp,
- sizeof(wakelock_components)))
- return -EFAULT;
-
- down_write(&client->semaphore);
-
- if (wakelock_components & WAKELOCK_VIRTUAL_DEVICE) {
- if (!client->has_vd_wakelock) {
- dev_err(gxp->dev,
- "Client must hold a VIRTUAL_DEVICE wakelock to release one\n");
- ret = -ENODEV;
- goto out;
- }
-
- /*
- * Currently VD state will not be GXP_VD_UNAVAILABLE if
- * has_vd_wakelock is true. Add this check just in case
- * GXP_VD_UNAVAILABLE will occur in more scenarios in the
- * future.
- */
- if (client->vd->state != GXP_VD_UNAVAILABLE) {
- down_write(&gxp->vd_semaphore);
- gxp_vd_suspend(client->vd);
- up_write(&gxp->vd_semaphore);
- }
-
- client->has_vd_wakelock = false;
- }
-
- if (wakelock_components & WAKELOCK_BLOCK) {
- if (client->has_vd_wakelock) {
- dev_err(gxp->dev,
- "Client cannot release BLOCK wakelock while holding a VD wakelock\n");
- ret = -EBUSY;
- goto out;
- }
-
- if (!client->has_block_wakelock) {
- dev_err(gxp->dev,
- "Client must hold a BLOCK wakelock to release one\n");
- ret = -ENODEV;
- goto out;
- }
-
- gxp_wakelock_release(gxp);
- /*
- * Other clients may still be using the BLK_AUR, check if we need
- * to change the power state.
- */
- gxp_pm_update_requested_power_states(
- gxp, client->requested_power_state,
- client->requested_low_clkmux, AUR_OFF, false,
- client->requested_memory_power_state,
- AUR_MEM_UNDEFINED);
- client->requested_power_state = AUR_OFF;
- client->requested_memory_power_state = AUR_MEM_UNDEFINED;
- client->requested_low_clkmux = false;
- client->has_block_wakelock = false;
- }
-
-out:
- up_write(&client->semaphore);
-
- return ret;
-}
-
-static int gxp_map_dmabuf(struct gxp_client *client,
- struct gxp_map_dmabuf_ioctl __user *argp)
-{
- struct gxp_dev *gxp = client->gxp;
- struct gxp_map_dmabuf_ioctl ibuf;
- struct gxp_mapping *mapping;
- int ret = 0;
-
- if (copy_from_user(&ibuf, argp, sizeof(ibuf)))
- return -EFAULT;
-
- if (ibuf.virtual_core_list == 0)
- return -EINVAL;
-
- down_read(&client->semaphore);
-
- if (!check_client_has_available_vd(client, "GXP_MAP_DMABUF")) {
- ret = -ENODEV;
- goto out_unlock;
- }
-
- mapping = gxp_dmabuf_map(gxp, client->vd, ibuf.virtual_core_list,
- ibuf.dmabuf_fd,
- /*gxp_dma_flags=*/0,
- mapping_flags_to_dma_dir(ibuf.flags));
- if (IS_ERR(mapping)) {
- ret = PTR_ERR(mapping);
- dev_err(gxp->dev, "Failed to map dma-buf (ret=%d)\n", ret);
- goto out_unlock;
- }
-
- ret = gxp_vd_mapping_store(client->vd, mapping);
- if (ret) {
- dev_err(gxp->dev,
- "Failed to store mapping for dma-buf (ret=%d)\n", ret);
- goto out_put;
- }
-
- ibuf.device_address = mapping->device_address;
-
- if (copy_to_user(argp, &ibuf, sizeof(ibuf))) {
- /* If the IOCTL fails, the dma-buf must be unmapped */
- gxp_vd_mapping_remove(client->vd, mapping);
- ret = -EFAULT;
- }
-
-out_put:
- /*
- * Release the reference from creating the dmabuf mapping
- * If the mapping was not successfully stored in the owning virtual
- * device, this will unmap and cleanup the dmabuf.
- */
- gxp_mapping_put(mapping);
-
-out_unlock:
- up_read(&client->semaphore);
-
- return ret;
-}
-
-static int gxp_unmap_dmabuf(struct gxp_client *client,
- struct gxp_map_dmabuf_ioctl __user *argp)
-{
- struct gxp_dev *gxp = client->gxp;
- struct gxp_map_dmabuf_ioctl ibuf;
- struct gxp_mapping *mapping;
- int ret = 0;
-
- if (copy_from_user(&ibuf, argp, sizeof(ibuf)))
- return -EFAULT;
-
- down_read(&client->semaphore);
-
- if (!client->vd) {
- dev_err(gxp->dev,
- "GXP_UNMAP_DMABUF requires the client allocate a VIRTUAL_DEVICE\n");
- ret = -ENODEV;
- goto out;
- }
-
- /*
- * Fetch and remove the internal mapping records.
- * If host_address is not 0, the provided device_address belongs to a
- * non-dma-buf mapping.
- */
- mapping = gxp_vd_mapping_search(client->vd, ibuf.device_address);
- if (IS_ERR_OR_NULL(mapping) || mapping->host_address) {
- dev_warn(gxp->dev, "No dma-buf mapped for given IOVA\n");
- /*
- * If the device address belongs to a non-dma-buf mapping,
- * release the reference to it obtained via the search.
- */
- if (!IS_ERR_OR_NULL(mapping))
- gxp_mapping_put(mapping);
- ret = -EINVAL;
- goto out;
- }
-
- /* Remove the mapping from its VD, releasing the VD's reference */
- gxp_vd_mapping_remove(client->vd, mapping);
-
- /* Release the reference from gxp_vd_mapping_search() */
- gxp_mapping_put(mapping);
-
-out:
- up_read(&client->semaphore);
-
- return ret;
-}
-
-static int gxp_register_mailbox_eventfd(
- struct gxp_client *client,
- struct gxp_register_mailbox_eventfd_ioctl __user *argp)
-{
- struct gxp_register_mailbox_eventfd_ioctl ibuf;
- struct gxp_eventfd *eventfd;
- int ret = 0;
-
- if (copy_from_user(&ibuf, argp, sizeof(ibuf)))
- return -EFAULT;
-
- down_write(&client->semaphore);
-
- if (!check_client_has_available_vd(client, "GXP_REGISTER_MAILBOX_EVENTFD")) {
- ret = -ENODEV;
- goto out;
- }
-
- if (ibuf.virtual_core_id >= client->vd->num_cores) {
- ret = -EINVAL;
- goto out;
- }
-
- /* Make sure the provided eventfd is valid */
- eventfd = gxp_eventfd_create(ibuf.eventfd);
- if (IS_ERR(eventfd)) {
- ret = PTR_ERR(eventfd);
- goto out;
- }
-
- /* Set the new eventfd, replacing any existing one */
- if (client->mb_eventfds[ibuf.virtual_core_id])
- gxp_eventfd_put(client->mb_eventfds[ibuf.virtual_core_id]);
-
- client->mb_eventfds[ibuf.virtual_core_id] = eventfd;
-
-out:
- up_write(&client->semaphore);
-
- return ret;
-}
-
-static int gxp_unregister_mailbox_eventfd(
- struct gxp_client *client,
- struct gxp_register_mailbox_eventfd_ioctl __user *argp)
-{
- struct gxp_register_mailbox_eventfd_ioctl ibuf;
- int ret = 0;
-
- if (copy_from_user(&ibuf, argp, sizeof(ibuf)))
- return -EFAULT;
-
- down_write(&client->semaphore);
-
- if (!client->vd) {
- dev_err(client->gxp->dev,
- "GXP_UNREGISTER_MAILBOX_EVENTFD requires the client allocate a VIRTUAL_DEVICE\n");
- ret = -ENODEV;
- goto out;
- }
-
- if (ibuf.virtual_core_id >= client->vd->num_cores) {
- ret = -EINVAL;
- goto out;
- }
-
- if (client->mb_eventfds[ibuf.virtual_core_id])
- gxp_eventfd_put(client->mb_eventfds[ibuf.virtual_core_id]);
-
- client->mb_eventfds[ibuf.virtual_core_id] = NULL;
-
-out:
- up_write(&client->semaphore);
-
- return ret;
-}
-
-static int
-gxp_get_interface_version(struct gxp_client *client,
- struct gxp_interface_version_ioctl __user *argp)
-{
- struct gxp_interface_version_ioctl ibuf;
- int ret;
-
- ibuf.version_major = GXP_INTERFACE_VERSION_MAJOR;
- ibuf.version_minor = GXP_INTERFACE_VERSION_MINOR;
- memset(ibuf.version_build, 0, GXP_INTERFACE_VERSION_BUILD_BUFFER_SIZE);
- ret = snprintf(ibuf.version_build,
- GXP_INTERFACE_VERSION_BUILD_BUFFER_SIZE - 1,
- GIT_REPO_TAG);
-
- if (ret < 0 || ret >= GXP_INTERFACE_VERSION_BUILD_BUFFER_SIZE) {
- dev_warn(
- client->gxp->dev,
- "Buffer size insufficient to hold GIT_REPO_TAG (size=%d)\n",
- ret);
- }
-
- if (copy_to_user(argp, &ibuf, sizeof(ibuf)))
- return -EFAULT;
-
- return 0;
-}
-
-static int gxp_trigger_debug_dump(struct gxp_client *client,
- __u32 __user *argp)
-{
- struct gxp_dev *gxp = client->gxp;
- int phys_core, i;
- u32 core_bits;
- int ret = 0;
-
- if (!uid_eq(current_euid(), GLOBAL_ROOT_UID))
- return -EPERM;
-
- if (!gxp_debug_dump_is_enabled()) {
- dev_err(gxp->dev, "Debug dump functionality is disabled\n");
- return -EINVAL;
- }
-
- if (copy_from_user(&core_bits, argp, sizeof(core_bits)))
- return -EFAULT;
-
- /* Caller must hold VIRTUAL_DEVICE wakelock */
- down_read(&client->semaphore);
-
- if (!check_client_has_available_vd_wakelock(client,
- "GXP_TRIGGER_DEBUG_DUMP")) {
- ret = -ENODEV;
- goto out_unlock_client_semaphore;
- }
-
- down_read(&gxp->vd_semaphore);
-
- for (i = 0; i < GXP_NUM_CORES; i++) {
- if (!(core_bits & BIT(i)))
- continue;
- phys_core = gxp_vd_virt_core_to_phys_core(client->vd, i);
- if (phys_core < 0) {
- dev_err(gxp->dev,
- "Trigger debug dump failed: Invalid virtual core id (%u)\n",
- i);
- ret = -EINVAL;
- continue;
- }
-
- if (gxp_is_fw_running(gxp, phys_core)) {
- gxp_notification_send(gxp, phys_core,
- CORE_NOTIF_GENERATE_DEBUG_DUMP);
- }
- }
-
- up_read(&gxp->vd_semaphore);
-out_unlock_client_semaphore:
- up_read(&client->semaphore);
-
- return ret;
-}
-
-static long gxp_ioctl(struct file *file, uint cmd, ulong arg)
-{
- struct gxp_client *client = file->private_data;
- void __user *argp = (void __user *)arg;
- long ret;
-
- switch (cmd) {
- case GXP_MAP_BUFFER:
- ret = gxp_map_buffer(client, argp);
- break;
- case GXP_UNMAP_BUFFER:
- ret = gxp_unmap_buffer(client, argp);
- break;
- case GXP_SYNC_BUFFER:
- ret = gxp_sync_buffer(client, argp);
- break;
- case GXP_MAILBOX_COMMAND_COMPAT:
- ret = gxp_mailbox_command_compat(client, argp);
- break;
- case GXP_MAILBOX_RESPONSE:
- ret = gxp_mailbox_response(client, argp);
- break;
- case GXP_GET_SPECS:
- ret = gxp_get_specs(client, argp);
- break;
- case GXP_ALLOCATE_VIRTUAL_DEVICE:
- ret = gxp_allocate_vd(client, argp);
- break;
- case GXP_ETM_TRACE_START_COMMAND:
- ret = gxp_etm_trace_start_command(client, argp);
- break;
- case GXP_ETM_TRACE_SW_STOP_COMMAND:
- ret = gxp_etm_trace_sw_stop_command(client, argp);
- break;
- case GXP_ETM_TRACE_CLEANUP_COMMAND:
- ret = gxp_etm_trace_cleanup_command(client, argp);
- break;
- case GXP_ETM_GET_TRACE_INFO_COMMAND:
- ret = gxp_etm_get_trace_info_command(client, argp);
- break;
- case GXP_ENABLE_TELEMETRY:
- ret = gxp_enable_telemetry(client, argp);
- break;
- case GXP_DISABLE_TELEMETRY:
- ret = gxp_disable_telemetry(client, argp);
- break;
- case GXP_MAP_TPU_MBX_QUEUE:
- ret = gxp_map_tpu_mbx_queue(client, argp);
- break;
- case GXP_UNMAP_TPU_MBX_QUEUE:
- ret = gxp_unmap_tpu_mbx_queue(client, argp);
- break;
- case GXP_REGISTER_TELEMETRY_EVENTFD:
- ret = gxp_register_telemetry_eventfd(client, argp);
- break;
- case GXP_UNREGISTER_TELEMETRY_EVENTFD:
- ret = gxp_unregister_telemetry_eventfd(client, argp);
- break;
- case GXP_READ_GLOBAL_COUNTER:
- ret = gxp_read_global_counter(client, argp);
- break;
- case GXP_ACQUIRE_WAKE_LOCK_COMPAT:
- ret = gxp_acquire_wake_lock_compat(client, argp);
- break;
- case GXP_RELEASE_WAKE_LOCK:
- ret = gxp_release_wake_lock(client, argp);
- break;
- case GXP_MAP_DMABUF:
- ret = gxp_map_dmabuf(client, argp);
- break;
- case GXP_UNMAP_DMABUF:
- ret = gxp_unmap_dmabuf(client, argp);
- break;
- case GXP_MAILBOX_COMMAND:
- ret = gxp_mailbox_command(client, argp);
- break;
- case GXP_REGISTER_MAILBOX_EVENTFD:
- ret = gxp_register_mailbox_eventfd(client, argp);
- break;
- case GXP_UNREGISTER_MAILBOX_EVENTFD:
- ret = gxp_unregister_mailbox_eventfd(client, argp);
- break;
- case GXP_ACQUIRE_WAKE_LOCK:
- ret = gxp_acquire_wake_lock(client, argp);
- break;
- case GXP_GET_INTERFACE_VERSION:
- ret = gxp_get_interface_version(client, argp);
- break;
- case GXP_TRIGGER_DEBUG_DUMP:
- ret = gxp_trigger_debug_dump(client, argp);
- break;
- default:
- ret = -ENOTTY; /* unknown command */
- }
-
- return ret;
-}
-
-static int gxp_mmap(struct file *file, struct vm_area_struct *vma)
-{
- struct gxp_client *client = file->private_data;
-
- if (!client)
- return -ENODEV;
-
- switch (vma->vm_pgoff << PAGE_SHIFT) {
- case GXP_MMAP_LOG_BUFFER_OFFSET:
- return gxp_telemetry_mmap_buffers(client->gxp,
- GXP_TELEMETRY_TYPE_LOGGING,
- vma);
- case GXP_MMAP_TRACE_BUFFER_OFFSET:
- return gxp_telemetry_mmap_buffers(client->gxp,
- GXP_TELEMETRY_TYPE_TRACING,
- vma);
- default:
- return -EINVAL;
- }
-}
-
-static const struct file_operations gxp_fops = {
- .owner = THIS_MODULE,
- .llseek = no_llseek,
- .mmap = gxp_mmap,
- .open = gxp_open,
- .release = gxp_release,
- .unlocked_ioctl = gxp_ioctl,
-};
-
static int gxp_platform_probe(struct platform_device *pdev)
{
- struct device *dev = &pdev->dev;
- struct gxp_dev *gxp;
- struct resource *r;
- phys_addr_t offset, base_addr;
- struct device_node *np;
- struct platform_device *tpu_pdev;
- struct platform_device *gsa_pdev;
- int ret;
- int __maybe_unused i;
- bool __maybe_unused tpu_found;
- u64 prop;
-
- dev_notice(dev, "Probing gxp driver with commit %s\n", GIT_REPO_TAG);
+ struct gxp_dev *gxp =
+ devm_kzalloc(&pdev->dev, sizeof(*gxp), GFP_KERNEL);
- gxp = devm_kzalloc(dev, sizeof(*gxp), GFP_KERNEL);
if (!gxp)
return -ENOMEM;
- platform_set_drvdata(pdev, gxp);
- gxp->dev = dev;
-
- gxp->misc_dev.minor = MISC_DYNAMIC_MINOR;
- gxp->misc_dev.name = "gxp";
- gxp->misc_dev.fops = &gxp_fops;
-
- gxp_wakelock_init(gxp);
-
- ret = misc_register(&gxp->misc_dev);
- if (ret) {
- dev_err(dev, "Failed to register misc device (ret = %d)\n",
- ret);
- devm_kfree(dev, (void *)gxp);
- return ret;
- }
-
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (IS_ERR_OR_NULL(r)) {
- dev_err(dev, "Failed to get memory resource\n");
- ret = -ENODEV;
- goto err;
- }
-
- gxp->regs.paddr = r->start;
- gxp->regs.size = resource_size(r);
- gxp->regs.vaddr = devm_ioremap_resource(dev, r);
- if (IS_ERR_OR_NULL(gxp->regs.vaddr)) {
- dev_err(dev, "Failed to map registers\n");
- ret = -ENODEV;
- goto err;
- }
-
- r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cmu");
- if (!IS_ERR_OR_NULL(r)) {
- gxp->cmu.paddr = r->start;
- gxp->cmu.size = resource_size(r);
- gxp->cmu.vaddr = devm_ioremap_resource(dev, r);
- }
- /*
- * TODO (b/224685748): Remove this block after CMU CSR is supported
- * in device tree config.
- */
- if (IS_ERR_OR_NULL(r) || IS_ERR_OR_NULL(gxp->cmu.vaddr)) {
- gxp->cmu.paddr = gxp->regs.paddr - GXP_CMU_OFFSET;
- gxp->cmu.size = GXP_CMU_SIZE;
- gxp->cmu.vaddr = devm_ioremap(dev, gxp->cmu.paddr, gxp->cmu.size);
- if (IS_ERR_OR_NULL(gxp->cmu.vaddr))
- dev_warn(dev, "Failed to map CMU registers\n");
- }
-
- ret = gxp_pm_init(gxp);
- if (ret) {
- dev_err(dev, "Failed to init power management (ret=%d)\n", ret);
- goto err;
- }
-
- for (i = 0; i < GXP_NUM_CORES; i++) {
- r = platform_get_resource(pdev, IORESOURCE_MEM, i + 1);
- if (IS_ERR_OR_NULL(r)) {
- dev_err(dev, "Failed to get mailbox%d resource\n", i);
- ret = -ENODEV;
- goto err_pm_destroy;
- }
-
- gxp->mbx[i].paddr = r->start;
- gxp->mbx[i].size = resource_size(r);
- gxp->mbx[i].vaddr = devm_ioremap_resource(dev, r);
- if (IS_ERR_OR_NULL(gxp->mbx[i].vaddr)) {
- dev_err(dev, "Failed to map mailbox%d registers\n", i);
- ret = -ENODEV;
- goto err_pm_destroy;
- }
- }
-
- tpu_found = true;
- /* Get TPU device from device tree */
- np = of_parse_phandle(dev->of_node, "tpu-device", 0);
- if (IS_ERR_OR_NULL(np)) {
- dev_warn(dev, "No tpu-device in device tree\n");
- tpu_found = false;
- }
- tpu_pdev = of_find_device_by_node(np);
- if (!tpu_pdev) {
- dev_err(dev, "TPU device not found\n");
- tpu_found = false;
- }
- /* get tpu mailbox register base */
- ret = of_property_read_u64_index(np, "reg", 0, &base_addr);
- of_node_put(np);
- if (ret) {
- dev_warn(dev, "Unable to get tpu-device base address\n");
- tpu_found = false;
- }
- /* get gxp-tpu mailbox register offset */
- ret = of_property_read_u64(dev->of_node, "gxp-tpu-mbx-offset",
- &offset);
- if (ret) {
- dev_warn(dev, "Unable to get tpu-device mailbox offset\n");
- tpu_found = false;
- }
- if (tpu_found) {
- gxp->tpu_dev.dev = &tpu_pdev->dev;
- get_device(gxp->tpu_dev.dev);
- gxp->tpu_dev.mbx_paddr = base_addr + offset;
- } else {
- dev_warn(dev, "TPU will not be available for interop\n");
- gxp->tpu_dev.mbx_paddr = 0;
- }
-
- ret = gxp_dma_init(gxp);
- if (ret) {
- dev_err(dev, "Failed to initialize GXP DMA interface\n");
- goto err_put_tpu_dev;
- }
-
- gxp->mailbox_mgr = gxp_mailbox_create_manager(gxp, GXP_NUM_CORES);
- if (IS_ERR_OR_NULL(gxp->mailbox_mgr)) {
- dev_err(dev, "Failed to create mailbox manager\n");
- ret = -ENOMEM;
- goto err_dma_exit;
- }
-
-#if IS_ENABLED(CONFIG_SUBSYSTEM_COREDUMP)
- ret = gxp_debug_dump_init(gxp, &gxp_sscd_dev, &gxp_sscd_pdata);
-#else
- ret = gxp_debug_dump_init(gxp, NULL, NULL);
-#endif // !CONFIG_SUBSYSTEM_COREDUMP
- if (ret) {
- dev_err(dev, "Failed to initialize debug dump\n");
- gxp_debug_dump_exit(gxp);
- }
-
- mutex_init(&gxp->dsp_firmware_lock);
- mutex_init(&gxp->pin_user_pages_lock);
-
- gxp->domain_pool = kmalloc(sizeof(*gxp->domain_pool), GFP_KERNEL);
- if (!gxp->domain_pool) {
- ret = -ENOMEM;
- goto err_debug_dump_exit;
- }
- ret = gxp_domain_pool_init(gxp, gxp->domain_pool,
- GXP_NUM_PREALLOCATED_DOMAINS);
- if (ret) {
- dev_err(dev,
- "Failed to initialize IOMMU domain pool (ret=%d)\n",
- ret);
- goto err_free_domain_pool;
- }
- ret = gxp_vd_init(gxp);
- if (ret) {
- dev_err(dev,
- "Failed to initialize virtual device manager (ret=%d)\n",
- ret);
- goto err_domain_pool_destroy;
- }
- gxp_dma_init_default_resources(gxp);
-
- /* Get GSA device from device tree */
- np = of_parse_phandle(dev->of_node, "gsa-device", 0);
- if (!np) {
- dev_warn(
- dev,
- "No gsa-device in device tree. Firmware authentication not available\n");
- } else {
- gsa_pdev = of_find_device_by_node(np);
- if (!gsa_pdev) {
- dev_err(dev, "GSA device not found\n");
- of_node_put(np);
- ret = -ENODEV;
- goto err_vd_destroy;
- }
- gxp->gsa_dev = get_device(&gsa_pdev->dev);
- of_node_put(np);
- dev_info(
- dev,
- "GSA device found, Firmware authentication available\n");
- }
-
- ret = of_property_read_u64(dev->of_node, "gxp-memory-per-core",
- &prop);
- if (ret) {
- dev_err(dev, "Unable to get memory-per-core from device tree\n");
- gxp->memory_per_core = 0;
- } else {
- gxp->memory_per_core = (u32)prop;
- }
-
- gxp_fw_data_init(gxp);
- gxp_telemetry_init(gxp);
- gxp_create_debugfs(gxp);
- gxp->thermal_mgr = gxp_thermal_init(gxp);
- if (!gxp->thermal_mgr)
- dev_err(dev, "Failed to init thermal driver\n");
- dev_dbg(dev, "Probe finished\n");
-
- INIT_LIST_HEAD(&gxp->client_list);
- mutex_init(&gxp->client_list_lock);
-
- gxp_debug_pointer = gxp;
-
- return 0;
-err_vd_destroy:
- gxp_vd_destroy(gxp);
-err_domain_pool_destroy:
- gxp_domain_pool_destroy(gxp->domain_pool);
-err_free_domain_pool:
- kfree(gxp->domain_pool);
-err_debug_dump_exit:
- gxp_debug_dump_exit(gxp);
-err_dma_exit:
- gxp_dma_exit(gxp);
-err_put_tpu_dev:
- put_device(gxp->tpu_dev.dev);
-err_pm_destroy:
- gxp_pm_destroy(gxp);
-err:
- misc_deregister(&gxp->misc_dev);
- devm_kfree(dev, (void *)gxp);
- return ret;
+ return gxp_common_platform_probe(pdev, gxp);
}
static int gxp_platform_remove(struct platform_device *pdev)
{
- struct device *dev = &pdev->dev;
- struct gxp_dev *gxp = platform_get_drvdata(pdev);
-
- gxp_remove_debugfs(gxp);
- gxp_fw_data_destroy(gxp);
- if (gxp->gsa_dev)
- put_device(gxp->gsa_dev);
- gxp_vd_destroy(gxp);
- gxp_domain_pool_destroy(gxp->domain_pool);
- kfree(gxp->domain_pool);
- gxp_debug_dump_exit(gxp);
- gxp_dma_exit(gxp);
- put_device(gxp->tpu_dev.dev);
- gxp_pm_destroy(gxp);
- misc_deregister(&gxp->misc_dev);
-
- devm_kfree(dev, (void *)gxp);
-
- gxp_debug_pointer = NULL;
-
- return 0;
-}
-
-#if IS_ENABLED(CONFIG_PM_SLEEP)
-
-static int gxp_platform_suspend(struct device *dev)
-{
- struct gxp_dev *gxp = dev_get_drvdata(dev);
-
- return gxp_wakelock_suspend(gxp);
+ return gxp_common_platform_remove(pdev);
}
-static int gxp_platform_resume(struct device *dev)
-{
- struct gxp_dev *gxp = dev_get_drvdata(dev);
-
- return gxp_wakelock_resume(gxp);
-}
-
-static const struct dev_pm_ops gxp_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(gxp_platform_suspend, gxp_platform_resume)
-};
-
-#endif /* IS_ENABLED(CONFIG_PM_SLEEP) */
-
#ifdef CONFIG_OF
static const struct of_device_id gxp_of_match[] = {
{ .compatible = "google,gxp", },
@@ -2317,21 +42,12 @@ static const struct of_device_id gxp_of_match[] = {
MODULE_DEVICE_TABLE(of, gxp_of_match);
#endif
-#ifdef CONFIG_ACPI
-static const struct acpi_device_id gxp_acpi_match[] = {
- { "CXRP0001", 0 },
- { /* end of list */ },
-};
-MODULE_DEVICE_TABLE(acpi, gxp_acpi_match);
-#endif
-
static struct platform_driver gxp_platform_driver = {
.probe = gxp_platform_probe,
.remove = gxp_platform_remove,
.driver = {
.name = GXP_DRIVER_NAME,
.of_match_table = of_match_ptr(gxp_of_match),
- .acpi_match_table = ACPI_PTR(gxp_acpi_match),
#if IS_ENABLED(CONFIG_PM_SLEEP)
.pm = &gxp_pm_ops,
#endif
@@ -2340,23 +56,24 @@ static struct platform_driver gxp_platform_driver = {
static int __init gxp_platform_init(void)
{
-#if IS_ENABLED(CONFIG_SUBSYSTEM_COREDUMP)
- /* Registers SSCD platform device */
- if (gxp_debug_dump_is_enabled()) {
- if (platform_device_register(&gxp_sscd_dev))
- pr_err("Unable to register SSCD platform device\n");
- }
-#endif
+ gxp_common_platform_reg_sscd();
return platform_driver_register(&gxp_platform_driver);
}
static void __exit gxp_platform_exit(void)
{
platform_driver_unregister(&gxp_platform_driver);
-#if IS_ENABLED(CONFIG_SUBSYSTEM_COREDUMP)
- if (gxp_debug_dump_is_enabled())
- platform_device_unregister(&gxp_sscd_dev);
-#endif
+ gxp_common_platform_unreg_sscd();
+}
+
+bool gxp_is_direct_mode(struct gxp_dev *gxp)
+{
+ return true;
+}
+
+enum gxp_chip_revision gxp_get_chip_revision(struct gxp_dev *gxp)
+{
+ return GXP_CHIP_ANY;
}
MODULE_DESCRIPTION("Google GXP platform driver");
diff --git a/gxp-pm.c b/gxp-pm.c
index 5ed9612..146e788 100644
--- a/gxp-pm.c
+++ b/gxp-pm.c
@@ -6,19 +6,27 @@
*/
#include <linux/acpm_dvfs.h>
+#include <linux/bits.h>
#include <linux/io.h>
#include <linux/pm_runtime.h>
#include <linux/types.h>
#include <linux/workqueue.h>
#include <soc/google/exynos_pm_qos.h>
+#include <gcip/gcip-pm.h>
+
#include "gxp-bpm.h"
#include "gxp-client.h"
+#include "gxp-config.h"
+#include "gxp-dma.h"
#include "gxp-doorbell.h"
#include "gxp-internal.h"
#include "gxp-lpm.h"
#include "gxp-pm.h"
+#define SHUTDOWN_DELAY_US_MIN 200
+#define SHUTDOWN_DELAY_US_MAX 400
+
/*
* The order of this array decides the voting priority, should be increasing in
* frequencies.
@@ -32,17 +40,20 @@ static const uint aur_memory_state_array[] = {
AUR_MEM_HIGH, AUR_MEM_VERY_HIGH, AUR_MEM_MAX
};
-/*
- * TODO(b/177692488): move frequency values into chip-specific config.
- * TODO(b/221168126): survey how these value are derived from. Below
- * values are copied from the implementation in TPU firmware for PRO,
- * i.e. google3/third_party/darwinn/firmware/janeiro/power_manager.cc.
- */
-static const s32 aur_memory_state2int_table[] = { 0, 0, 0, 200000,
- 332000, 465000, 533000 };
-static const s32 aur_memory_state2mif_table[] = { 0, 0, 0,
- 1014000, 1352000, 2028000,
- 3172000 };
+static const s32 aur_memory_state2int_table[] = { 0,
+ AUR_MEM_INT_MIN,
+ AUR_MEM_INT_VERY_LOW,
+ AUR_MEM_INT_LOW,
+ AUR_MEM_INT_HIGH,
+ AUR_MEM_INT_VERY_HIGH,
+ AUR_MEM_INT_MAX };
+static const s32 aur_memory_state2mif_table[] = { 0,
+ AUR_MEM_MIF_MIN,
+ AUR_MEM_MIF_VERY_LOW,
+ AUR_MEM_MIF_LOW,
+ AUR_MEM_MIF_HIGH,
+ AUR_MEM_MIF_VERY_HIGH,
+ AUR_MEM_MIF_MAX };
static struct gxp_pm_device_ops gxp_aur_ops = {
.pre_blk_powerup = NULL,
@@ -62,8 +73,9 @@ static int gxp_pm_blkpwr_up(struct gxp_dev *gxp)
*/
ret = pm_runtime_resume_and_get(gxp->dev);
if (ret)
- dev_err(gxp->dev, "%s: pm_runtime_resume_and_get returned %d\n",
- __func__, ret);
+ dev_err(gxp->dev,
+ "pm_runtime_resume_and_get returned %d during blk up\n",
+ ret);
return ret;
}
@@ -75,9 +87,7 @@ static int gxp_pm_blkpwr_down(struct gxp_dev *gxp)
* Need to put TOP LPM into active state before blk off
* b/189396709
*/
- lpm_write_32_psm(gxp, LPM_TOP_PSM, LPM_REG_ENABLE_STATE_1, 0x0);
- lpm_write_32_psm(gxp, LPM_TOP_PSM, LPM_REG_ENABLE_STATE_2, 0x0);
- if (!gxp_lpm_wait_state_eq(gxp, LPM_TOP_PSM, LPM_ACTIVE_STATE)) {
+ if (!gxp_lpm_wait_state_eq(gxp, LPM_PSM_TOP, LPM_ACTIVE_STATE)) {
dev_err(gxp->dev,
"failed to force TOP LPM to PS0 during blk down\n");
return -EAGAIN;
@@ -91,8 +101,9 @@ static int gxp_pm_blkpwr_down(struct gxp_dev *gxp)
* indicate the device is still in use somewhere. The only
* expected value here is 0, indicating no remaining users.
*/
- dev_err(gxp->dev, "%s: pm_runtime_put_sync returned %d\n",
- __func__, ret);
+ dev_err(gxp->dev,
+ "pm_runtime_put_sync returned %d during blk down\n",
+ ret);
/* Remove our vote for INT/MIF state (if any) */
exynos_pm_qos_update_request(&gxp->power_mgr->int_min, 0);
exynos_pm_qos_update_request(&gxp->power_mgr->mif_min, 0);
@@ -117,7 +128,7 @@ int gxp_pm_blk_set_rate_acpm(struct gxp_dev *gxp, unsigned long rate)
{
int ret = exynos_acpm_set_rate(AUR_DVFS_DOMAIN, rate);
- dev_dbg(gxp->dev, "%s: rate %lu, ret %d\n", __func__, rate, ret);
+ dev_dbg(gxp->dev, "set blk rate %lu, ret %d\n", rate, ret);
return ret;
}
@@ -199,35 +210,33 @@ out:
mutex_unlock(&set_acpm_state_work->gxp->power_mgr->pm_lock);
}
+#define AUR_DVFS_DEBUG_REQ BIT(31)
+#define AUR_DEBUG_CORE_FREQ (AUR_DVFS_DEBUG_REQ | (3 << 27))
+
int gxp_pm_blk_get_state_acpm(struct gxp_dev *gxp)
{
int ret = exynos_acpm_get_rate(AUR_DVFS_DOMAIN, AUR_DEBUG_CORE_FREQ);
- dev_dbg(gxp->dev, "%s: state %d\n", __func__, ret);
+ dev_dbg(gxp->dev, "current blk state %d\n", ret);
return ret;
}
int gxp_pm_blk_on(struct gxp_dev *gxp)
{
- int ret = 0;
-
- if (WARN_ON(!gxp->power_mgr)) {
- dev_err(gxp->dev, "%s: No PM found\n", __func__);
- return -ENODEV;
- }
+ int ret;
dev_info(gxp->dev, "Powering on BLK ...\n");
mutex_lock(&gxp->power_mgr->pm_lock);
ret = gxp_pm_blkpwr_up(gxp);
- if (!ret) {
- gxp_pm_blk_set_state_acpm(gxp, AUR_INIT_DVFS_STATE);
- gxp->power_mgr->curr_state = AUR_INIT_DVFS_STATE;
- }
-
+ if (ret)
+ goto out;
+ gxp_pm_blk_set_state_acpm(gxp, AUR_INIT_DVFS_STATE);
+ gxp->power_mgr->curr_state = AUR_INIT_DVFS_STATE;
+ gxp_iommu_setup_shareability(gxp);
/* Startup TOP's PSM */
gxp_lpm_init(gxp);
gxp->power_mgr->blk_switch_count++;
-
+out:
mutex_unlock(&gxp->power_mgr->pm_lock);
return ret;
@@ -237,10 +246,6 @@ int gxp_pm_blk_off(struct gxp_dev *gxp)
{
int ret = 0;
- if (WARN_ON(!gxp->power_mgr)) {
- dev_err(gxp->dev, "%s: No PM found\n", __func__);
- return -ENODEV;
- }
dev_info(gxp->dev, "Powering off BLK ...\n");
mutex_lock(&gxp->power_mgr->pm_lock);
/*
@@ -264,14 +269,54 @@ int gxp_pm_blk_off(struct gxp_dev *gxp)
return ret;
}
-int gxp_pm_get_blk_switch_count(struct gxp_dev *gxp)
+bool gxp_pm_is_blk_down(struct gxp_dev *gxp, uint timeout_ms)
+{
+ int timeout_cnt = 0, max_delay_count;
+ int curr_state;
+
+ if (!gxp->power_mgr->aur_status)
+ return gxp->power_mgr->curr_state == AUR_OFF;
+
+ max_delay_count = (timeout_ms * 1000) / SHUTDOWN_DELAY_US_MIN;
+
+ do {
+ /* Delay 200~400us per retry till blk shutdown finished */
+ usleep_range(SHUTDOWN_DELAY_US_MIN, SHUTDOWN_DELAY_US_MAX);
+ curr_state = readl(gxp->power_mgr->aur_status);
+ if (!curr_state)
+ return true;
+ timeout_cnt++;
+ } while (timeout_cnt < max_delay_count);
+
+ return false;
+}
+
+int gxp_pm_blk_reboot(struct gxp_dev *gxp, uint timeout_ms)
{
int ret;
- if (!gxp->power_mgr) {
- dev_err(gxp->dev, "%s: No PM found\n", __func__);
- return -ENODEV;
+ ret = gxp_pm_blk_off(gxp);
+ if (ret) {
+ dev_err(gxp->dev, "Failed to turn off BLK_AUR (ret=%d)\n", ret);
+ return ret;
}
+
+ if (!gxp_pm_is_blk_down(gxp, timeout_ms)) {
+ dev_err(gxp->dev, "BLK_AUR hasn't been turned off");
+ return -EBUSY;
+ }
+
+ ret = gxp_pm_blk_on(gxp);
+ if (ret)
+ dev_err(gxp->dev, "Failed to turn on BLK_AUR (ret=%d)\n", ret);
+
+ return ret;
+}
+
+int gxp_pm_get_blk_switch_count(struct gxp_dev *gxp)
+{
+ int ret;
+
mutex_lock(&gxp->power_mgr->pm_lock);
ret = gxp->power_mgr->blk_switch_count;
mutex_unlock(&gxp->power_mgr->pm_lock);
@@ -283,10 +328,6 @@ int gxp_pm_get_blk_state(struct gxp_dev *gxp)
{
int ret;
- if (!gxp->power_mgr) {
- dev_err(gxp->dev, "%s: No PM found\n", __func__);
- return -ENODEV;
- }
mutex_lock(&gxp->power_mgr->pm_lock);
ret = gxp->power_mgr->curr_state;
mutex_unlock(&gxp->power_mgr->pm_lock);
@@ -296,17 +337,17 @@ int gxp_pm_get_blk_state(struct gxp_dev *gxp)
int gxp_pm_core_on(struct gxp_dev *gxp, uint core, bool verbose)
{
- int ret = 0;
+ int ret;
- /*
- * Check if TOP LPM is already on.
- */
- WARN_ON(!gxp_lpm_is_initialized(gxp, LPM_TOP_PSM));
+ if (!gxp_lpm_is_initialized(gxp, LPM_PSM_TOP)) {
+ dev_err(gxp->dev, "unable to power on core without TOP powered");
+ return -EINVAL;
+ }
mutex_lock(&gxp->power_mgr->pm_lock);
ret = gxp_lpm_up(gxp, core);
if (ret) {
- dev_err(gxp->dev, "%s: Core %d on fail\n", __func__, core);
+ dev_err(gxp->dev, "Core %d on fail\n", core);
mutex_unlock(&gxp->power_mgr->pm_lock);
return ret;
}
@@ -314,22 +355,19 @@ int gxp_pm_core_on(struct gxp_dev *gxp, uint core, bool verbose)
mutex_unlock(&gxp->power_mgr->pm_lock);
if (verbose)
- dev_notice(gxp->dev, "%s: Core %d up\n", __func__, core);
+ dev_notice(gxp->dev, "Core %d powered up\n", core);
return ret;
}
-int gxp_pm_core_off(struct gxp_dev *gxp, uint core)
+void gxp_pm_core_off(struct gxp_dev *gxp, uint core)
{
- /*
- * Check if TOP LPM is already on.
- */
- WARN_ON(!gxp_lpm_is_initialized(gxp, LPM_TOP_PSM));
+ if (!gxp_lpm_is_initialized(gxp, LPM_PSM_TOP))
+ return;
mutex_lock(&gxp->power_mgr->pm_lock);
gxp_lpm_down(gxp, core);
mutex_unlock(&gxp->power_mgr->pm_lock);
- dev_notice(gxp->dev, "%s: Core %d down\n", __func__, core);
- return 0;
+ dev_notice(gxp->dev, "Core %d powered down\n", core);
}
static int gxp_pm_req_state_locked(struct gxp_dev *gxp,
@@ -662,39 +700,88 @@ static int gxp_pm_update_requested_memory_power_state(
}
int gxp_pm_update_requested_power_states(
- struct gxp_dev *gxp, enum aur_power_state origin_state,
- bool origin_requested_low_clkmux, enum aur_power_state requested_state,
- bool requested_low_clkmux, enum aur_memory_power_state origin_mem_state,
- enum aur_memory_power_state requested_mem_state)
+ struct gxp_dev *gxp, struct gxp_power_states origin_vote,
+ struct gxp_power_states requested_states)
{
int ret = 0;
mutex_lock(&gxp->power_mgr->pm_lock);
- if (origin_state != requested_state ||
- origin_requested_low_clkmux != requested_low_clkmux) {
+ if (origin_vote.power != requested_states.power ||
+ origin_vote.low_clkmux != requested_states.low_clkmux) {
ret = gxp_pm_update_requested_power_state(
- gxp, origin_state, origin_requested_low_clkmux,
- requested_state, requested_low_clkmux);
+ gxp, origin_vote.power, origin_vote.low_clkmux,
+ requested_states.power, requested_states.low_clkmux);
if (ret)
goto out;
}
- if (origin_mem_state != requested_mem_state)
+ if (origin_vote.memory != requested_states.memory)
ret = gxp_pm_update_requested_memory_power_state(
- gxp, origin_mem_state, requested_mem_state);
+ gxp, origin_vote.memory, requested_states.memory);
out:
mutex_unlock(&gxp->power_mgr->pm_lock);
return ret;
}
+int gxp_pm_update_pm_qos(struct gxp_dev *gxp, s32 int_val, s32 mif_val)
+{
+ return gxp_pm_req_pm_qos(gxp, int_val, mif_val);
+}
+
+static int gxp_pm_power_up(void *data)
+{
+ struct gxp_dev *gxp = data;
+ int ret = gxp_pm_blk_on(gxp);
+
+ if (ret) {
+ dev_err(gxp->dev, "Failed to power on BLK_AUR (ret=%d)\n", ret);
+ return ret;
+ }
+
+ if (gxp->pm_after_blk_on) {
+ ret = gxp->pm_after_blk_on(gxp);
+ if (ret) {
+ gxp_pm_blk_off(gxp);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int gxp_pm_power_down(void *data)
+{
+ struct gxp_dev *gxp = data;
+
+ if (gxp->pm_before_blk_off)
+ gxp->pm_before_blk_off(gxp);
+ return gxp_pm_blk_off(gxp);
+}
+
int gxp_pm_init(struct gxp_dev *gxp)
{
struct gxp_power_manager *mgr;
+ struct platform_device *pdev =
+ container_of(gxp->dev, struct platform_device, dev);
+ const struct gcip_pm_args args = {
+ .dev = gxp->dev,
+ .data = gxp,
+ .power_up = gxp_pm_power_up,
+ .power_down = gxp_pm_power_down,
+ };
+ struct resource *r;
uint i;
mgr = devm_kzalloc(gxp->dev, sizeof(*mgr), GFP_KERNEL);
if (!mgr)
return -ENOMEM;
mgr->gxp = gxp;
+
+ mgr->pm = gcip_pm_create(&args);
+ if (IS_ERR(mgr->pm)) {
+ devm_kfree(gxp->dev, mgr);
+ return PTR_ERR(mgr->pm);
+ }
+
mutex_init(&mgr->pm_lock);
mgr->curr_state = AUR_OFF;
mgr->curr_memory_state = AUR_MEM_UNDEFINED;
@@ -719,6 +806,20 @@ int gxp_pm_init(struct gxp_dev *gxp)
gxp->power_mgr->force_mux_normal_count = 0;
gxp->power_mgr->blk_switch_count = 0l;
+ r = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "pmu_aur_status");
+ if (!r) {
+ dev_warn(gxp->dev, "Failed to find PMU register base\n");
+ } else {
+ gxp->power_mgr->aur_status = devm_ioremap_resource(gxp->dev, r);
+ if (IS_ERR(gxp->power_mgr->aur_status)) {
+ dev_err(gxp->dev,
+ "Failed to map PMU register base, ret=%ld\n",
+ PTR_ERR(gxp->power_mgr->aur_status));
+ gxp->power_mgr->aur_status = NULL;
+ }
+ }
+
pm_runtime_enable(gxp->dev);
exynos_pm_qos_add_request(&mgr->int_min, PM_QOS_DEVICE_THROUGHPUT, 0);
exynos_pm_qos_add_request(&mgr->mif_min, PM_QOS_BUS_THROUGHPUT, 0);
@@ -728,9 +829,13 @@ int gxp_pm_init(struct gxp_dev *gxp)
int gxp_pm_destroy(struct gxp_dev *gxp)
{
- struct gxp_power_manager *mgr;
+ struct gxp_power_manager *mgr = gxp->power_mgr;
+
+ if (IS_GXP_TEST && !mgr)
+ return 0;
+
+ gcip_pm_destroy(mgr->pm);
- mgr = gxp->power_mgr;
exynos_pm_qos_remove_request(&mgr->mif_min);
exynos_pm_qos_remove_request(&mgr->int_min);
pm_runtime_disable(gxp->dev);
diff --git a/gxp-pm.h b/gxp-pm.h
index c214a8b..217c3df 100644
--- a/gxp-pm.h
+++ b/gxp-pm.h
@@ -9,20 +9,11 @@
#include <soc/google/exynos_pm_qos.h>
+#include <gcip/gcip-pm.h>
+
#include "gxp-internal.h"
-#define AUR_DVFS_MIN_RATE 178000
-static const uint aur_power_state2rate[] = {
- 0, /* AUR_OFF */
- 178000, /* AUR_UUD */
- 373000, /* AUR_SUD */
- 750000, /* AUR_UD */
- 1160000, /* AUR_NOM */
- 178000, /* AUR_READY */
- 268000, /* AUR_UUD_PLUS */
- 560000, /* AUR_SUD_PLUS */
- 975000, /* AUR_UD_PLUS */
-};
+#define AUR_DVFS_MIN_RATE AUR_UUD_RATE
enum aur_power_state {
AUR_OFF = 0,
@@ -36,6 +27,18 @@ enum aur_power_state {
AUR_UD_PLUS = 8,
};
+static const uint aur_power_state2rate[] = {
+ AUR_OFF_RATE,
+ AUR_UUD_RATE,
+ AUR_SUD_RATE,
+ AUR_UD_RATE,
+ AUR_NOM_RATE,
+ AUR_READY_RATE,
+ AUR_UUD_PLUS_RATE,
+ AUR_SUD_PLUS_RATE,
+ AUR_UD_PLUS_RATE,
+};
+
enum aur_memory_power_state {
AUR_MEM_UNDEFINED = 0,
AUR_MEM_MIN = 1,
@@ -91,8 +94,20 @@ struct gxp_req_pm_qos_work {
bool using;
};
+struct gxp_power_states {
+ enum aur_power_state power;
+ enum aur_memory_power_state memory;
+ bool low_clkmux;
+};
+
+static const struct gxp_power_states off_states = { AUR_OFF, AUR_MEM_UNDEFINED,
+ false };
+static const struct gxp_power_states uud_states = { AUR_UUD, AUR_MEM_UNDEFINED,
+ false };
+
struct gxp_power_manager {
struct gxp_dev *gxp;
+ struct gcip_pm *pm;
struct mutex pm_lock;
uint pwr_state_req_count[AUR_NUM_POWER_STATE];
uint low_clkmux_pwr_state_req_count[AUR_NUM_POWER_STATE];
@@ -107,7 +122,7 @@ struct gxp_power_manager {
/* Last requested clock mux state */
bool last_scheduled_low_clkmux;
int curr_state;
- int curr_memory_state;
+ int curr_memory_state; /* Note: this state will not be maintained in the MCU mode. */
struct gxp_pm_device_ops *ops;
struct gxp_set_acpm_state_work
set_acpm_state_work[AUR_NUM_POWER_STATE_WORKER];
@@ -126,15 +141,20 @@ struct gxp_power_manager {
/* Max frequency that the thermal driver/ACPM will allow in Hz */
unsigned long thermal_limit;
u64 blk_switch_count;
+ /* PMU AUR_STATUS base address for block status, maybe NULL */
+ void __iomem *aur_status;
};
/**
* gxp_pm_blk_on() - Turn on the power for BLK_AUR
* @gxp: The GXP device to turn on
*
+ * Note: For most cases you should use gxp_acquire_wakelock() to ensure the
+ * device is ready to use, unless you really want to power on the block without
+ * setting up the device state.
+ *
* Return:
* * 0 - BLK ON successfully
- * * -ENODEV - Cannot find PM interface
*/
int gxp_pm_blk_on(struct gxp_dev *gxp);
@@ -144,12 +164,30 @@ int gxp_pm_blk_on(struct gxp_dev *gxp);
*
* Return:
* * 0 - BLK OFF successfully
- * * -ENODEV - Cannot find PM interface
- * * -EBUSY - Wakelock is held, blk is still busy
*/
int gxp_pm_blk_off(struct gxp_dev *gxp);
/**
+ * gxp_pm_is_blk_down() - Check weather the blk is turned off or not.
+ * @gxp: The GXP device to check
+ * @timeout_ms: Wait for the block to be turned off for this duration.
+ *
+ * Return:
+ * * true - blk is turned off.
+ */
+bool gxp_pm_is_blk_down(struct gxp_dev *gxp, uint timeout_ms);
+
+/**
+ * gxp_pm_blk_reboot() - Reboot the blk.
+ * @gxp: The GXP device to reboot
+ * @timeout_ms: Wait for the block to be turned off for this duration.
+ *
+ * Return:
+ * * 0 - BLK rebooted successfully
+ */
+int gxp_pm_blk_reboot(struct gxp_dev *gxp, uint timeout_ms);
+
+/**
* gxp_pm_get_blk_state() - Get the blk power state
* @gxp: The GXP device to sample state
*
@@ -183,11 +221,8 @@ int gxp_pm_core_on(struct gxp_dev *gxp, uint core, bool verbose);
* gxp_pm_core_off() - Turn off a core on GXP device
* @gxp: The GXP device to operate
* @core: The core ID to turn off
- *
- * Return:
- * * 0 - Core off process finished successfully
*/
-int gxp_pm_core_off(struct gxp_dev *gxp, uint core);
+void gxp_pm_core_off(struct gxp_dev *gxp, uint core);
/**
* gxp_pm_init() - API for initialize PM interface for GXP, should only be
@@ -239,28 +274,36 @@ int gxp_pm_blk_get_state_acpm(struct gxp_dev *gxp);
* gxp_pm_update_requested_power_states() - API for a GXP client to vote for a
* requested power state and a requested memory power state.
* @gxp: The GXP device to operate.
- * @origin_state: An existing old requested state, will be cleared. If this is
- * the first vote, pass AUR_OFF.
- * @origin_requested_low_clkmux: Specify whether the existing vote was requested with
- * low frequency CLKMUX flag.
- * @requested_state: The new requested state.
- * @requested_low_clkmux: Specify whether the new vote is requested with low frequency
- * CLKMUX flag. Will take no effect if the @requested state is
- * AUR_OFF.
- * @origin_mem_state: An existing old requested state, will be cleared. If this is
- * the first vote, pass AUR_MEM_UNDEFINED.
- * @requested_mem_state: The new requested state.
+ * @origin_states: An existing old requested states, will be cleared. If this is
+ * the first vote, pass AUR_OFF and AUR_MEM_UNDEFINED for field
+ * power_state and memory_state. The low_clkmux field will take no
+ * effect if requested state is AUR_OFF.
+ * @requested_states: The new requested states.
*
* Return:
* * 0 - Voting registered
* * -EINVAL - Invalid original state or requested state
*/
-int gxp_pm_update_requested_power_states(
- struct gxp_dev *gxp, enum aur_power_state origin_state,
- bool origin_requested_low_clkmux, enum aur_power_state requested_state,
- bool requested_low_clkmux, enum aur_memory_power_state origin_mem_state,
- enum aur_memory_power_state requested_mem_state);
+int gxp_pm_update_requested_power_states(struct gxp_dev *gxp,
+ struct gxp_power_states origin_states,
+ struct gxp_power_states requested_states);
+
+/**
+ * gxp_pm_update_pm_qos() - API for updating the memory power state but passing the values of
+ * INT and MIF frequencies directly. This function will ignore the vote ratings and update the
+ * frequencies right away.
+ * @gxp: The GXP device to operate.
+ * @int_val: The value of INT frequency.
+ * @mif_val: The value of MIF frequency.
+ *
+ * Note: This function will not update the @curr_memory_state of gxp_power_manager.
+ *
+ * Return:
+ * * 0 - The memory power state has been changed
+ * * -EINVAL - Invalid requested state
+ */
+int gxp_pm_update_pm_qos(struct gxp_dev *gxp, s32 int_val, s32 mif_val);
/*
* gxp_pm_force_clkmux_normal() - Force PLL_CON0_NOC_USER and PLL_CON0_PLL_AUR MUX
diff --git a/gxp-range-alloc.c b/gxp-range-alloc.c
deleted file mode 100644
index 73aa6af..0000000
--- a/gxp-range-alloc.c
+++ /dev/null
@@ -1,118 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GXP ranged resource allocator.
- *
- * Copyright (C) 2021 Google LLC
- */
-
-#include "gxp-range-alloc.h"
-
-struct range_alloc *range_alloc_create(int start, int end)
-{
- struct range_alloc *ra;
- int count;
- int size;
-
- count = end - start;
- if (count <= 0)
- return ERR_PTR(-EINVAL);
-
- size = sizeof(struct range_alloc) + count * sizeof(int);
- ra = kzalloc(size, GFP_KERNEL);
- if (!ra)
- return ERR_PTR(-ENOMEM);
-
- ra->total_count = count;
- ra->free_count = count;
- ra->start_index = start;
- mutex_init(&ra->lock);
-
- return ra;
-}
-
-int range_alloc_get(struct range_alloc *r, int element)
-{
- int index = element - r->start_index;
-
- mutex_lock(&r->lock);
- if (index < 0 || index >= r->total_count) {
- mutex_unlock(&r->lock);
- return -EINVAL;
- }
-
- if (r->elements[index]) {
- mutex_unlock(&r->lock);
- return -EBUSY;
- }
-
- r->elements[index] = 1;
- r->free_count--;
-
- mutex_unlock(&r->lock);
- return 0;
-}
-
-int range_alloc_get_any(struct range_alloc *r, int *element)
-{
- int i;
-
- mutex_lock(&r->lock);
- if (!r->free_count) {
- mutex_unlock(&r->lock);
- return -ENOMEM;
- }
-
- for (i = 0; i < r->total_count; i++) {
- if (r->elements[i] == 0) {
- r->elements[i] = 1;
- r->free_count--;
- *element = i + r->start_index;
- mutex_unlock(&r->lock);
- return 0;
- }
- }
- mutex_unlock(&r->lock);
- return -ENOMEM;
-}
-
-int range_alloc_put(struct range_alloc *r, int element)
-{
- int index = element - r->start_index;
-
- mutex_lock(&r->lock);
- if (index < 0 || index >= r->total_count) {
- mutex_unlock(&r->lock);
- return -EINVAL;
- }
-
- if (r->elements[index] == 0) {
- mutex_unlock(&r->lock);
- return -EBUSY;
- }
-
- r->elements[index] = 0;
- r->free_count++;
-
- mutex_unlock(&r->lock);
- return 0;
-}
-
-int range_alloc_num_free(struct range_alloc *r)
-{
- int free_count;
-
- mutex_lock(&r->lock);
- free_count = r->free_count;
- mutex_unlock(&r->lock);
-
- return free_count;
-}
-
-int range_alloc_destroy(struct range_alloc *r)
-{
- if (!r)
- return -EFAULT;
- kfree(r);
-
- return 0;
-}
diff --git a/gxp-range-alloc.h b/gxp-range-alloc.h
deleted file mode 100644
index ed8c2f0..0000000
--- a/gxp-range-alloc.h
+++ /dev/null
@@ -1,94 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * GXP ranged resource allocator.
- *
- * Copyright (C) 2021 Google LLC
- */
-#ifndef __GXP_RANGE_ALLOC_H__
-#define __GXP_RANGE_ALLOC_H__
-
-#include <linux/mutex.h>
-#include <linux/slab.h>
-
-struct range_alloc {
- int total_count;
- int free_count;
- int start_index;
- struct mutex lock;
- int elements[];
-};
-
-/**
- * range_alloc_create() - Creates a range allocator starting at the specified
- * start (inclusive) and ends at the specified end
- * (exclusive).
- * @start: The start of the range (inclusive).
- * @end: The end of the range (exclusive)
- *
- * Return:
- * ptr - A pointer of the newly created allocator handle on success, an
- * error pointer (PTR_ERR) otherwise.
- * -EINVAL - Invalid start/end combination
- * -ENOMEM - Insufficient memory to create the allocator
- */
-struct range_alloc *range_alloc_create(int start, int end);
-
-/**
- * range_alloc_get() - Gets the specified element from the range.
- * @r: The range allocator
- * @element: The element to acquire from the range
- *
- * The @element argument should be within the allocator's range and has not been
- * allocated before.
- *
- * Return:
- * 0 - Successfully reserved @element
- * -EINVAL - Invalid element index (negative or outside allocator range)
- * -EBUSY - Element is already allocated
- */
-int range_alloc_get(struct range_alloc *r, int element);
-
-/**
- * range_alloc_get_any() - Gets any free element in the range.
- * @r: The range allocator
- * @element: A pointer to use to store the allocated element
- *
- * Return:
- * 0 - Successful reservation
- * -ENOMEM - No elements left in the range to allocate
- */
-int range_alloc_get_any(struct range_alloc *r, int *element);
-
-/**
- * range_alloc_put() - Puts an element back into the range.
- * @r: The range allocator
- * @element: The element to put back into the range
- *
- * Return:
- * 0 - Successful placement back into the range
- * -EINVAL - Invalid element index (negative or outside allocator range)
- * -EBUSY - The element is still present in the range
- */
-int range_alloc_put(struct range_alloc *r, int element);
-
-/**
- * range_alloc_num_free() - Returns the number of free elements in the range.
- * @r: The range allocator
- *
- * Return: the number of free elements in the range
- */
-int range_alloc_num_free(struct range_alloc *r);
-
-/**
- * range_alloc_destroy() - Destroys the range allocator
- * @r: The range allocator to destroy
- *
- * The destruction does not validate that the range is empty.
- *
- * Return:
- * 0 - Successfully destroyed range allocator
- * -EFAULT - Invalid allocator address
- */
-int range_alloc_destroy(struct range_alloc *r);
-
-#endif /* __GXP_RANGE_ALLOC_H__ */
diff --git a/gxp-ssmt.c b/gxp-ssmt.c
new file mode 100644
index 0000000..403da5d
--- /dev/null
+++ b/gxp-ssmt.c
@@ -0,0 +1,93 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * GXP SSMT driver.
+ *
+ * Copyright (C) 2022 Google LLC
+ */
+
+#include <linux/platform_device.h>
+
+#include "gxp-config.h"
+#include "gxp-internal.h"
+#include "gxp-ssmt.h"
+
+static inline void ssmt_set_vid_for_idx(void __iomem *ssmt, uint vid, uint idx)
+{
+ /* NS_READ_STREAM_VID_<sid> */
+ writel(vid, ssmt + 0x1000u + 0x4u * idx);
+ /* NS_WRITE_STREAM_VID_<sid> */
+ writel(vid, ssmt + 0x1200u + 0x4u * idx);
+}
+
+int gxp_ssmt_init(struct gxp_dev *gxp, struct gxp_ssmt *ssmt)
+{
+ struct platform_device *pdev =
+ container_of(gxp->dev, struct platform_device, dev);
+ struct resource *r;
+
+ ssmt->gxp = gxp;
+ r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ssmt_idma");
+ if (!r) {
+ dev_err(gxp->dev, "Failed to find IDMA SSMT register base\n");
+ return -EINVAL;
+ }
+
+ ssmt->idma_ssmt_base = devm_ioremap_resource(gxp->dev, r);
+ if (IS_ERR(ssmt->idma_ssmt_base)) {
+ dev_err(gxp->dev,
+ "Failed to map IDMA SSMT register base (%ld)\n",
+ PTR_ERR(ssmt->idma_ssmt_base));
+ return PTR_ERR(ssmt->idma_ssmt_base);
+ }
+
+ r = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "ssmt_inst_data");
+ if (!r) {
+ dev_err(gxp->dev,
+ "Failed to find instruction/data SSMT register base\n");
+ return -EINVAL;
+ }
+
+ ssmt->inst_data_ssmt_base = devm_ioremap_resource(gxp->dev, r);
+ if (IS_ERR(ssmt->inst_data_ssmt_base)) {
+ dev_err(gxp->dev,
+ "Failed to map instruction/data SSMT register base (%ld)\n",
+ PTR_ERR(ssmt->inst_data_ssmt_base));
+ return PTR_ERR(ssmt->inst_data_ssmt_base);
+ }
+
+ return 0;
+}
+
+void gxp_ssmt_set_core_vid(struct gxp_ssmt *ssmt, uint core, uint vid)
+{
+ const u8 sids[] = {
+ INST_SID_FOR_CORE(core),
+ DATA_SID_FOR_CORE(core),
+ IDMA_SID_FOR_CORE(core),
+ };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sids); i++) {
+ ssmt_set_vid_for_idx(ssmt->idma_ssmt_base, vid, sids[i]);
+ ssmt_set_vid_for_idx(ssmt->inst_data_ssmt_base, vid, sids[i]);
+ }
+}
+
+void gxp_ssmt_set_bypass(struct gxp_ssmt *ssmt)
+{
+ u32 mode;
+ uint core, i;
+
+ mode = readl(ssmt->idma_ssmt_base + SSMT_CFG_OFFSET);
+ if (mode == SSMT_MODE_CLIENT) {
+ for (i = 0; i < MAX_NUM_CONTEXTS; i++) {
+ ssmt_set_vid_for_idx(ssmt->idma_ssmt_base, i, i);
+ ssmt_set_vid_for_idx(ssmt->inst_data_ssmt_base, i, i);
+ }
+ } else {
+ for (core = 0; core < GXP_NUM_CORES; core++)
+ gxp_ssmt_set_core_vid(ssmt, core,
+ SSMT_CLAMP_MODE_BYPASS);
+ }
+}
diff --git a/gxp-ssmt.h b/gxp-ssmt.h
new file mode 100644
index 0000000..b35829d
--- /dev/null
+++ b/gxp-ssmt.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * GXP SSMT driver.
+ *
+ * Copyright (C) 2022 Google LLC
+ */
+
+#ifndef __GXP_SSMT_H__
+#define __GXP_SSMT_H__
+
+#include "gxp-internal.h"
+
+#define SSMT_CFG_OFFSET (0x0004)
+#define SSMT_MODE_CLAMPED (0x0u)
+#define SSMT_MODE_CLIENT (0x1u)
+
+#define SSMT_CLAMP_MODE_BYPASS (1u << 31)
+#define MAX_NUM_CONTEXTS 8
+
+struct gxp_ssmt {
+ struct gxp_dev *gxp;
+ void __iomem *idma_ssmt_base;
+ void __iomem *inst_data_ssmt_base;
+};
+
+/*
+ * Initializes @ssmt structure.
+ *
+ * Resources allocated in this function are all device-managed.
+ *
+ * Returns 0 on success, -errno otherwise.
+ */
+int gxp_ssmt_init(struct gxp_dev *gxp, struct gxp_ssmt *ssmt);
+
+/*
+ * Programs SSMT to have @core (0 ~ GXP_NUM_CORES - 1) issue transactions
+ * with VID = @vid.
+ */
+void gxp_ssmt_set_core_vid(struct gxp_ssmt *ssmt, uint core, uint vid);
+
+/*
+ * Programs SSMT to always use SCIDs as VIDs.
+ * Supports both client-driven and clamp mode.
+ */
+void gxp_ssmt_set_bypass(struct gxp_ssmt *ssmt);
+
+#endif /* __GXP_SSMT_H__ */
diff --git a/gxp-telemetry.c b/gxp-telemetry.c
deleted file mode 100644
index 7eb18cb..0000000
--- a/gxp-telemetry.c
+++ /dev/null
@@ -1,705 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GXP telemetry support
- *
- * Copyright (C) 2021 Google LLC
- */
-
-#include <linux/slab.h>
-#include <linux/wait.h>
-
-#include "gxp-config.h"
-#include "gxp-dma.h"
-#include "gxp-firmware.h"
-#include "gxp-firmware-data.h"
-#include "gxp-host-device-structs.h"
-#include "gxp-notification.h"
-#include "gxp-telemetry.h"
-#include "gxp-vd.h"
-
-static inline bool is_core_telemetry_enabled(struct gxp_dev *gxp, uint core,
- u8 type)
-{
- u32 device_status =
- gxp_fw_data_get_telemetry_device_status(gxp, core, type);
-
- return device_status & GXP_TELEMETRY_DEVICE_STATUS_ENABLED;
-}
-
-static void telemetry_status_notification_work(struct work_struct *work)
-{
- struct gxp_telemetry_work *telem_work =
- container_of(work, struct gxp_telemetry_work, work);
- struct gxp_dev *gxp = telem_work->gxp;
- uint core = telem_work->core;
- struct gxp_telemetry_manager *mgr = telem_work->gxp->telemetry_mgr;
-
- /* Wake any threads waiting on an telemetry disable ACK */
- wake_up(&mgr->waitq);
-
- /* Signal the appropriate eventfd for any active telemetry types */
- mutex_lock(&mgr->lock);
-
- if (is_core_telemetry_enabled(gxp, core, GXP_TELEMETRY_TYPE_LOGGING) &&
- mgr->logging_efd)
- eventfd_signal(mgr->logging_efd, 1);
-
- if (is_core_telemetry_enabled(gxp, core, GXP_TELEMETRY_TYPE_TRACING) &&
- mgr->tracing_efd)
- eventfd_signal(mgr->tracing_efd, 1);
-
- mutex_unlock(&mgr->lock);
-}
-
-int gxp_telemetry_init(struct gxp_dev *gxp)
-{
- struct gxp_telemetry_manager *mgr;
- uint i;
-
- mgr = devm_kzalloc(gxp->dev, sizeof(*mgr), GFP_KERNEL);
- if (!mgr)
- return -ENOMEM;
-
- mutex_init(&mgr->lock);
- for (i = 0; i < GXP_NUM_CORES; i++) {
- INIT_WORK(&mgr->notification_works[i].work,
- telemetry_status_notification_work);
- mgr->notification_works[i].gxp = gxp;
- mgr->notification_works[i].core = i;
-
- }
- init_waitqueue_head(&mgr->waitq);
-
- gxp->telemetry_mgr = mgr;
-
- return 0;
-}
-
-/* Wrapper struct to be used by the telemetry vma_ops. */
-struct telemetry_vma_data {
- struct gxp_dev *gxp;
- struct buffer_data *buff_data;
- u8 type;
- refcount_t ref_count;
-};
-
-static void gxp_telemetry_vma_open(struct vm_area_struct *vma)
-{
- struct telemetry_vma_data *vma_data =
- (struct telemetry_vma_data *)vma->vm_private_data;
- struct gxp_dev *gxp = vma_data->gxp;
-
- mutex_lock(&gxp->telemetry_mgr->lock);
-
- refcount_inc(&vma_data->ref_count);
-
- mutex_unlock(&gxp->telemetry_mgr->lock);
-}
-
-static void free_telemetry_buffers(struct gxp_dev *gxp, struct buffer_data *data);
-
-static void gxp_telemetry_vma_close(struct vm_area_struct *vma)
-{
- struct telemetry_vma_data *vma_data =
- (struct telemetry_vma_data *)vma->vm_private_data;
- struct gxp_dev *gxp = vma_data->gxp;
- struct buffer_data *buff_data = vma_data->buff_data;
- u8 type = vma_data->type;
-
- mutex_lock(&gxp->telemetry_mgr->lock);
-
- if (!refcount_dec_and_test(&vma_data->ref_count))
- goto out;
-
- /*
- * Free the telemetry buffers if they are no longer in use.
- *
- * If a client enabled telemetry, then closed their VMA without
- * disabling it, firmware will still be expecting those buffers to be
- * mapped. If this is the case, telemetry will be disabled, and the
- * buffers freed, when the client is closed.
- *
- * We cannot disable telemetry here, since attempting to lock the
- * `vd_semaphore` while holding the mmap lock can lead to deadlocks.
- */
- if (refcount_dec_and_test(&buff_data->ref_count)) {
- switch (type) {
- case GXP_TELEMETRY_TYPE_LOGGING:
- gxp->telemetry_mgr->logging_buff_data = NULL;
- break;
- case GXP_TELEMETRY_TYPE_TRACING:
- gxp->telemetry_mgr->tracing_buff_data = NULL;
- break;
- default:
- dev_warn(gxp->dev, "%s called with invalid type %u\n",
- __func__, type);
- }
- free_telemetry_buffers(gxp, buff_data);
- }
-
- kfree(vma_data);
-
-out:
- mutex_unlock(&gxp->telemetry_mgr->lock);
-}
-
-static const struct vm_operations_struct gxp_telemetry_vma_ops = {
- .open = gxp_telemetry_vma_open,
- .close = gxp_telemetry_vma_close,
-};
-
-/**
- * check_telemetry_type_availability() - Checks if @type is valid and whether
- * buffers of that type already exists.
- * @gxp: The GXP device to check availability for
- * @type: Either `GXP_TELEMETRY_TYPE_LOGGING` or `GXP_TELEMETRY_TYPE_TRACING`
- *
- * Caller must hold the telemetry_manager's lock.
- *
- * Return:
- * * 0 - @type is valid and can have new buffers created
- * * -EBUSY - Buffers already exist for @type
- * * -EINVAL - @type is not a valid telemetry type
- */
-static int check_telemetry_type_availability(struct gxp_dev *gxp, u8 type)
-{
- switch (type) {
- case GXP_TELEMETRY_TYPE_LOGGING:
- if (gxp->telemetry_mgr->logging_buff_data)
- return -EBUSY;
- break;
- case GXP_TELEMETRY_TYPE_TRACING:
- if (gxp->telemetry_mgr->tracing_buff_data)
- return -EBUSY;
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-/**
- * allocate_telemetry_buffers() - Allocate and populate a `struct buffer_data`,
- * including allocating and mapping one coherent
- * buffer of @size bytes per core.
- * @gxp: The GXP device to allocate the buffers for
- * @size: The size of buffer to allocate for each core
- *
- * Caller must hold the telemetry_manager's lock.
- *
- * Return: A pointer to the `struct buffer_data` if successful, NULL otherwise
- */
-static struct buffer_data *allocate_telemetry_buffers(struct gxp_dev *gxp,
- size_t size)
-{
- struct buffer_data *data;
- int i;
- void *buf;
- dma_addr_t daddr;
-
- size = size < PAGE_SIZE ? PAGE_SIZE : size;
-
- data = kzalloc(sizeof(*data), GFP_KERNEL);
- if (!data)
- return NULL;
-
- /* Allocate cache-coherent buffers for logging/tracing to */
- for (i = 0; i < GXP_NUM_CORES; i++) {
- /* Allocate a coherent buffer in the default domain */
- buf = dma_alloc_coherent(gxp->dev, size, &daddr, GFP_KERNEL);
- if (!buf) {
- dev_err(gxp->dev,
- "Failed to allocate coherent buffer\n");
- goto err_alloc;
- }
- data->buffers[i] = buf;
- data->buffer_daddrs[i] = daddr;
- }
- data->size = size;
- refcount_set(&data->ref_count, 1);
- data->is_enabled = false;
-
- return data;
-
-err_alloc:
- while (i--)
- dma_free_coherent(gxp->dev, size, data->buffers[i],
- data->buffer_daddrs[i]);
- kfree(data);
-
- return NULL;
-}
-
-/**
- * free_telemetry_buffers() - Unmap and free a `struct buffer_data`
- * @gxp: The GXP device the buffers were allocated for
- * @data: The descriptor of the buffers to unmap and free
- *
- * Caller must hold the telemetry_manager's lock.
- */
-static void free_telemetry_buffers(struct gxp_dev *gxp, struct buffer_data *data)
-{
- int i;
-
- lockdep_assert_held(&gxp->telemetry_mgr->lock);
-
- for (i = 0; i < GXP_NUM_CORES; i++)
- dma_free_coherent(gxp->dev, data->size, data->buffers[i],
- data->buffer_daddrs[i]);
-
- kfree(data);
-}
-
-/**
- * remap_telemetry_buffers() - Remaps a set of telemetry buffers into a
- * user-space vm_area.
- * @gxp: The GXP device the buffers were allocated for
- * @vma: A vm area to remap the buffers into
- * @buff_data: The data describing a set of telemetry buffers to remap
- *
- * Caller must hold the telemetry_manager's lock.
- *
- * Return:
- * * 0 - Success
- * * otherwise - Error returned by `remap_pfn_range()`
- */
-static int remap_telemetry_buffers(struct gxp_dev *gxp,
- struct vm_area_struct *vma,
- struct buffer_data *buff_data)
-{
- unsigned long orig_pgoff = vma->vm_pgoff;
- int i;
- unsigned long offset;
- phys_addr_t phys;
- int ret = 0;
-
- /* mmap the buffers */
- vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
- vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP;
- vma->vm_pgoff = 0;
-
- for (i = 0; i < GXP_NUM_CORES; i++) {
- /*
- * Remap each core's buffer a page at a time, in case it is not
- * physically contiguous.
- */
- for (offset = 0; offset < buff_data->size; offset += PAGE_SIZE) {
- /*
- * `virt_to_phys()` does not work on memory allocated
- * by `dma_alloc_coherent()`, so we have to use
- * `iommu_iova_to_phys()` instead. Since all buffers
- * are mapped to the default domain as well as any per-
- * core domains, we can use it here to get the physical
- * address of any valid IOVA, regardless of its core.
- */
- phys = iommu_iova_to_phys(
- iommu_get_domain_for_dev(gxp->dev),
- buff_data->buffer_daddrs[i] + offset);
- ret = remap_pfn_range(
- vma,
- vma->vm_start + buff_data->size * i + offset,
- phys >> PAGE_SHIFT, PAGE_SIZE,
- vma->vm_page_prot);
- if (ret)
- goto out;
- }
- }
-
-out:
- vma->vm_pgoff = orig_pgoff;
- vma->vm_ops = &gxp_telemetry_vma_ops;
-
- return ret;
-}
-
-int gxp_telemetry_mmap_buffers(struct gxp_dev *gxp, u8 type,
- struct vm_area_struct *vma)
-{
- int ret = 0;
- struct telemetry_vma_data *vma_data;
- size_t total_size = vma->vm_end - vma->vm_start;
- size_t size = total_size / GXP_NUM_CORES;
- struct buffer_data *buff_data;
- int i;
-
- if (!gxp->telemetry_mgr)
- return -ENODEV;
-
- /* Total size must divide evenly into 1 page-aligned buffer per core */
- if (!total_size || total_size % (PAGE_SIZE * GXP_NUM_CORES))
- return -EINVAL;
-
- mutex_lock(&gxp->telemetry_mgr->lock);
-
- ret = check_telemetry_type_availability(gxp, type);
- if (ret)
- goto err;
-
- vma_data = kmalloc(sizeof(*vma_data), GFP_KERNEL);
- if (!vma_data) {
- ret = -ENOMEM;
- goto err;
- }
-
- buff_data = allocate_telemetry_buffers(gxp, size);
- if (!buff_data) {
- ret = -ENOMEM;
- goto err_free_vma_data;
- }
-
- ret = remap_telemetry_buffers(gxp, vma, buff_data);
- if (ret)
- goto err_free_buffers;
-
- vma_data->gxp = gxp;
- vma_data->buff_data = buff_data;
- vma_data->type = type;
- refcount_set(&vma_data->ref_count, 1);
- vma->vm_private_data = vma_data;
-
- /* Save book-keeping on the buffers in the telemetry manager */
- if (type == GXP_TELEMETRY_TYPE_LOGGING)
- gxp->telemetry_mgr->logging_buff_data = buff_data;
- else /* type == GXP_TELEMETRY_TYPE_TRACING */
- gxp->telemetry_mgr->tracing_buff_data = buff_data;
-
- mutex_unlock(&gxp->telemetry_mgr->lock);
-
- return 0;
-
-err_free_buffers:
- for (i = 0; i < GXP_NUM_CORES; i++)
- dma_free_coherent(gxp->dev, buff_data->size,
- buff_data->buffers[i],
- buff_data->buffer_daddrs[i]);
- kfree(buff_data);
-
-err_free_vma_data:
- kfree(vma_data);
-
-err:
- mutex_unlock(&gxp->telemetry_mgr->lock);
- return ret;
-}
-
-int gxp_telemetry_enable(struct gxp_dev *gxp, u8 type)
-{
- struct buffer_data *data;
- int ret = 0;
- uint core, virt_core;
- struct gxp_virtual_device *vd;
-
- /*
- * `vd_semaphore` cannot be acquired while holding the telemetry lock,
- * so acquire it here before locking the telemetry lock.
- */
- down_read(&gxp->vd_semaphore);
- mutex_lock(&gxp->telemetry_mgr->lock);
-
- switch (type) {
- case GXP_TELEMETRY_TYPE_LOGGING:
- data = gxp->telemetry_mgr->logging_buff_data;
- break;
- case GXP_TELEMETRY_TYPE_TRACING:
- data = gxp->telemetry_mgr->tracing_buff_data;
- break;
- default:
- ret = -EINVAL;
- goto out;
- }
-
- if (!data) {
- ret = -ENXIO;
- goto out;
- }
-
- /* Map the buffers for any cores already running */
- for (core = 0; core < GXP_NUM_CORES; core++) {
- vd = gxp->core_to_vd[core];
- if (vd != NULL) {
- virt_core = gxp_vd_phys_core_to_virt_core(vd, core);
- ret = gxp_dma_map_allocated_coherent_buffer(
- gxp, data->buffers[core], vd, BIT(virt_core),
- data->size, data->buffer_daddrs[core], 0);
- if (ret)
- goto err;
- }
- }
-
- /* Populate the buffer fields in firmware-data */
- data->host_status |= GXP_TELEMETRY_HOST_STATUS_ENABLED;
- gxp_fw_data_set_telemetry_descriptors(gxp, type, data->host_status,
- data->buffer_daddrs, data->size);
-
- /* Notify any running cores that firmware-data was updated */
- for (core = 0; core < GXP_NUM_CORES; core++) {
- if (gxp_is_fw_running(gxp, core))
- gxp_notification_send(gxp, core,
- CORE_NOTIF_TELEMETRY_STATUS);
- }
-
- refcount_inc(&data->ref_count);
- data->is_enabled = true;
-
- goto out;
-err:
- while (core--) {
- vd = gxp->core_to_vd[core];
- if (vd != NULL) {
- virt_core = gxp_vd_phys_core_to_virt_core(vd, core);
- gxp_dma_unmap_allocated_coherent_buffer(
- gxp, vd, BIT(virt_core), data->size,
- data->buffer_daddrs[core]);
- }
- }
-
-out:
- mutex_unlock(&gxp->telemetry_mgr->lock);
- up_read(&gxp->vd_semaphore);
-
- return ret;
-}
-
-/**
- * notify_core_and_wait_for_disable() - Notify a core that telemetry state has
- * been changed by the host and wait for
- * the core to stop using telemetry.
- * @gxp: The GXP device telemetry is changing for
- * @core: The core in @gxp to notify of the telemetry state change
- * @type: Either `GXP_TELEMETRY_TYPE_LOGGING` or `GXP_TELEMETRY_TYPE_TRACING`
- *
- * Caller must hold `telemetry_mgr->lock`.
- * Caller must hold @gxp's virtual device lock
- *
- * Return:
- * * 0 - Firmware on @core is no longer using telemetry of @type
- * * -ENXIO - Firmware on @core is unresponsive
- */
-static int notify_core_and_wait_for_disable(struct gxp_dev *gxp, uint core,
- u8 type)
-{
- uint retries_left = 50;
-
- gxp_notification_send(gxp, core, CORE_NOTIF_TELEMETRY_STATUS);
-
- /* Wait for ACK from firmware */
- while (is_core_telemetry_enabled(gxp, core, type) &&
- gxp_is_fw_running(gxp, core) && retries_left) {
- /* Release vd_semaphore while waiting */
- up_read(&gxp->vd_semaphore);
-
- /*
- * The VD lock must be held to check if firmware is running, so
- * the wait condition is only whether the firmware data has been
- * updated to show the core disabling telemetry.
- *
- * If a core does stop running firmware while this function is
- * asleep, it will be seen at the next timeout.
- */
- wait_event_timeout(gxp->telemetry_mgr->waitq,
- !is_core_telemetry_enabled(gxp, core, type),
- msecs_to_jiffies(10));
- retries_left--;
-
- /*
- * No function may attempt to acquire the `vd_semaphore` while
- * holding the telemetry lock, so it must be released, then
- * re-acquired once the `vd_semaphore` is held.
- */
- mutex_unlock(&gxp->telemetry_mgr->lock);
- down_read(&gxp->vd_semaphore);
- mutex_lock(&gxp->telemetry_mgr->lock);
- }
-
- /*
- * If firmware has stopped running altogether, that is sufficient to be
- * considered disabled. If firmware is started on this core again, it
- * is responsible for clearing its status.
- */
- if (unlikely(is_core_telemetry_enabled(gxp, core, type) &&
- gxp_is_fw_running(gxp, core)))
- return -ENXIO;
-
- return 0;
-}
-
-/**
- * telemetry_disable_locked() - Helper function to break out the actual
- * process of disabling telemetry so that it
- * can be invoked by internal functions that are
- * already holding the telemetry lock.
- * @gxp: The GXP device to disable either logging or tracing for
- * @type: Either `GXP_TELEMETRY_TYPE_LOGGING` or `GXP_TELEMETRY_TYPE_TRACING`
- *
- * Caller must hold `telemetry_mgr->lock`.
- * Caller must hold `gxp->vd_semaphore` for reading.
- *
- * Return:
- * * 0 - Success
- * * -EINVAL - The @type provided is not valid
- * * -ENXIO - Buffers for @type have not been created/mapped yet
- */
-static int telemetry_disable_locked(struct gxp_dev *gxp, u8 type)
-{
- struct buffer_data *data;
- int ret = 0;
- dma_addr_t null_daddrs[GXP_NUM_CORES] = {0};
- uint core, virt_core;
- struct gxp_virtual_device *vd;
-
- /* Cleanup telemetry manager's book-keeping */
- switch (type) {
- case GXP_TELEMETRY_TYPE_LOGGING:
- data = gxp->telemetry_mgr->logging_buff_data;
- break;
- case GXP_TELEMETRY_TYPE_TRACING:
- data = gxp->telemetry_mgr->tracing_buff_data;
- break;
- default:
- return -EINVAL;
- }
-
- if (!data)
- return -ENXIO;
-
- if (!(data->host_status & GXP_TELEMETRY_HOST_STATUS_ENABLED))
- return 0;
-
- data->is_enabled = false;
-
- /* Clear the log buffer fields in firmware-data */
- data->host_status &= ~GXP_TELEMETRY_HOST_STATUS_ENABLED;
- gxp_fw_data_set_telemetry_descriptors(gxp, type, data->host_status,
- null_daddrs, 0);
-
- /* Notify any running cores that firmware-data was updated */
- for (core = 0; core < GXP_NUM_CORES; core++) {
- if (gxp_is_fw_running(gxp, core)) {
- ret = notify_core_and_wait_for_disable(gxp, core, type);
- if (ret)
- dev_warn(
- gxp->dev,
- "%s: core%u failed to disable telemetry (type=%u, ret=%d)\n",
- __func__, core, type, ret);
- }
- vd = gxp->core_to_vd[core];
- if (vd != NULL) {
- virt_core = gxp_vd_phys_core_to_virt_core(vd, core);
- gxp_dma_unmap_allocated_coherent_buffer(
- gxp, vd, BIT(virt_core), data->size,
- data->buffer_daddrs[core]);
- }
- }
-
- if (refcount_dec_and_test(&data->ref_count)) {
- switch (type) {
- case GXP_TELEMETRY_TYPE_LOGGING:
- gxp->telemetry_mgr->logging_buff_data = NULL;
- break;
- case GXP_TELEMETRY_TYPE_TRACING:
- gxp->telemetry_mgr->tracing_buff_data = NULL;
- break;
- default:
- /* NO-OP, we returned above if `type` was invalid */
- break;
- }
- free_telemetry_buffers(gxp, data);
- }
-
- return 0;
-}
-
-int gxp_telemetry_disable(struct gxp_dev *gxp, u8 type)
-{
- int ret;
-
- /*
- * `vd_semaphore` cannot be acquired while holding the telemetry lock,
- * so acquire it here before locking the telemetry lock.
- */
- down_read(&gxp->vd_semaphore);
- mutex_lock(&gxp->telemetry_mgr->lock);
-
- ret = telemetry_disable_locked(gxp, type);
-
- mutex_unlock(&gxp->telemetry_mgr->lock);
- up_read(&gxp->vd_semaphore);
-
- return ret;
-}
-
-int gxp_telemetry_register_eventfd(struct gxp_dev *gxp, u8 type, int fd)
-{
- struct eventfd_ctx *new_ctx;
- struct eventfd_ctx **ctx_to_set = NULL;
- int ret = 0;
-
- new_ctx = eventfd_ctx_fdget(fd);
- if (IS_ERR(new_ctx))
- return PTR_ERR(new_ctx);
-
- mutex_lock(&gxp->telemetry_mgr->lock);
-
- switch (type) {
- case GXP_TELEMETRY_TYPE_LOGGING:
- ctx_to_set = &gxp->telemetry_mgr->logging_efd;
- break;
- case GXP_TELEMETRY_TYPE_TRACING:
- ctx_to_set = &gxp->telemetry_mgr->tracing_efd;
- break;
- default:
- ret = -EINVAL;
- goto out;
- }
-
- if (*ctx_to_set) {
- dev_warn(gxp->dev,
- "Replacing existing telemetry eventfd (type=%u)\n",
- type);
- eventfd_ctx_put(*ctx_to_set);
- }
-
- *ctx_to_set = new_ctx;
-
-out:
- mutex_unlock(&gxp->telemetry_mgr->lock);
- return ret;
-}
-
-int gxp_telemetry_unregister_eventfd(struct gxp_dev *gxp, u8 type)
-{
- int ret = 0;
-
- mutex_lock(&gxp->telemetry_mgr->lock);
-
- switch (type) {
- case GXP_TELEMETRY_TYPE_LOGGING:
- if (gxp->telemetry_mgr->logging_efd)
- eventfd_ctx_put(gxp->telemetry_mgr->logging_efd);
- gxp->telemetry_mgr->logging_efd = NULL;
- break;
- case GXP_TELEMETRY_TYPE_TRACING:
- if (gxp->telemetry_mgr->tracing_efd)
- eventfd_ctx_put(gxp->telemetry_mgr->tracing_efd);
- gxp->telemetry_mgr->tracing_efd = NULL;
- break;
- default:
- ret = -EINVAL;
- }
-
- mutex_unlock(&gxp->telemetry_mgr->lock);
-
- return ret;
-}
-
-struct work_struct *gxp_telemetry_get_notification_handler(struct gxp_dev *gxp,
- uint core)
-{
- struct gxp_telemetry_manager *mgr = gxp->telemetry_mgr;
-
- if (!mgr || core >= GXP_NUM_CORES)
- return NULL;
-
- return &mgr->notification_works[core].work;
-}
diff --git a/gxp-telemetry.h b/gxp-telemetry.h
deleted file mode 100644
index d2e63de..0000000
--- a/gxp-telemetry.h
+++ /dev/null
@@ -1,135 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * GXP telemetry support
- *
- * Copyright (C) 2021 Google LLC
- */
-#ifndef __GXP_TELEMETRY_H__
-#define __GXP_TELEMETRY_H__
-
-#include <linux/eventfd.h>
-#include <linux/refcount.h>
-#include <linux/types.h>
-
-#include "gxp-internal.h"
-#include "gxp.h"
-
-struct gxp_telemetry_work {
- struct work_struct work;
- struct gxp_dev *gxp;
- uint core;
-};
-
-struct gxp_telemetry_manager {
- struct buffer_data {
- u32 host_status;
- void *buffers[GXP_NUM_CORES];
- dma_addr_t buffer_daddrs[GXP_NUM_CORES];
- u32 size;
- refcount_t ref_count;
- bool is_enabled;
- } *logging_buff_data, *tracing_buff_data;
- /* Protects logging_buff_data and tracing_buff_data */
- struct mutex lock;
- struct gxp_telemetry_work notification_works[GXP_NUM_CORES];
- wait_queue_head_t waitq;
- struct eventfd_ctx *logging_efd;
- struct eventfd_ctx *tracing_efd;
-};
-
-/**
- * gxp_telemetry_init() - Initialize telemetry support
- * @gxp: The GXP device to initialize telemetry support for
- *
- * Return:
- * * 0 - Success
- * * -ENOMEM - Insufficient memory is available to initialize support
- */
-int gxp_telemetry_init(struct gxp_dev *gxp);
-
-/**
- * gxp_telemetry_mmap_buffers() - Allocate a telemetry buffer for each core and
- * map them to their core and the user-space vma
- * @gxp: The GXP device to create the buffers for
- * @type: EIther `GXP_TELEMETRY_TYPE_LOGGING` or `GXP_TELEMETRY_TYPE_TRACING`
- * @vma: The vma from user-space which all cores' buffers will be mapped into
- *
- * Return:
- * * 0 - Success
- * * -ENODEV - Telemetry support has not been initialized. Must explicitly
- * check this, since this function is called based on user-input.
- * * -EBUSY - The requested telemetry @type is already in use
- * * -EINVAL - Either the vma size is not aligned or @type is not valid
- * * -ENOMEM - Insufficient memory is available to allocate and map the buffers
- */
-int gxp_telemetry_mmap_buffers(struct gxp_dev *gxp, u8 type,
- struct vm_area_struct *vma);
-
-/**
- * gxp_telemetry_enable() - Enable logging or tracing for all DSP cores
- * @gxp: The GXP device to enable either logging or tracing for
- * @type: Either `GXP_TELEMETRY_TYPE_LOGGING` or `GXP_TELEMETRY_TYPE_TRACING`
- *
- * Return:
- * * 0 - Success
- * * -EINVAL - The @type provided is not valid
- * * -ENXIO - Buffers for @type have not been created/mapped yet
- */
-int gxp_telemetry_enable(struct gxp_dev *gxp, u8 type);
-
-/**
- * gxp_telemetry_disable() - Disable logging or tracing for all DSP cores
- * @gxp: The GXP device to disable either logging or tracing for
- * @type: Either `GXP_TELEMETRY_TYPE_LOGGING` or `GXP_TELEMETRY_TYPE_TRACING`
- *
- * Return:
- * * 0 - Success
- * * -EINVAL - The @type provided is not valid
- * * -ENXIO - Buffers for @type have not been created/mapped yet
- */
-int gxp_telemetry_disable(struct gxp_dev *gxp, u8 type);
-
-/**
- * gxp_telemetry_register_eventfd() - Register an eventfd to be signaled when
- * telemetry notifications arrive while the
- * specified @type of telemetry is enabled
- * @gxp: The GXP device to register the eventfd for
- * @type: Either `GXP_TELEMETRY_TYPE_LOGGING` or `GXP_TELEMETRY_TYPE_TRACING`
- * @fd: A file descriptor for an eventfd from user-space
- *
- * If another eventfd has already been registered for the given @type, the old
- * eventfd will be unregistered and replaced.
- *
- * Return:
- * * 0 - Success
- * * -EBADF - @fd is not a valid file descriptor (via `eventfd_ctx_fdget()`)
- * * -EINVAL - Invalid @type or @fd is not an eventfd
- */
-int gxp_telemetry_register_eventfd(struct gxp_dev *gxp, u8 type, int fd);
-
-/**
- * gxp_telemetry_unregister_eventfd() - Unregister and release a reference to
- * a previously registered eventfd
- * @gxp: The GXP device to unregister the eventfd for
- * @type: Either `GXP_TELEMETRY_TYPE_LOGGING` or `GXP_TELEMETRY_TYPE_TRACING`
- *
- * Return:
- * * 0 - Success
- * * -EINVAL - The @type provided is not valid
- */
-int gxp_telemetry_unregister_eventfd(struct gxp_dev *gxp, u8 type);
-
-/**
- * gxp_telemetry_get_notification_handler() - Get the notification handler work
- * for the specified core
- * @gxp: The GXP device to obtain the handler for
- * @core: The physical core number to obtain the handler
- *
- * Return: A pointer to the work_struct for the @core's notification handler if
- * successful. NULL if telemetry has not been initialized or @core is
- * invalid.
- */
-struct work_struct *gxp_telemetry_get_notification_handler(struct gxp_dev *gxp,
- uint core);
-
-#endif /* __GXP_TELEMETRY_H__ */
diff --git a/gxp-thermal.c b/gxp-thermal.c
index ae6049d..671d140 100644
--- a/gxp-thermal.c
+++ b/gxp-thermal.c
@@ -2,322 +2,93 @@
/*
* Platform thermal driver for GXP.
*
- * Copyright (C) 2021 Google LLC
+ * Copyright (C) 2021-2023 Google LLC
*/
#include <linux/acpm_dvfs.h>
-#include <linux/debugfs.h>
#include <linux/device.h>
-#include <linux/gfp.h>
-#include <linux/kernel.h>
-#include <linux/mutex.h>
-#include <linux/of.h>
-#include <linux/platform_device.h>
-#include <linux/pm_runtime.h>
-#include <linux/slab.h>
-#include <linux/thermal.h>
-#include <linux/version.h>
+#include <linux/minmax.h>
+#include <gcip/gcip-pm.h>
+#include <gcip/gcip-thermal.h>
+
+#include "gxp-config.h"
#include "gxp-internal.h"
#include "gxp-pm.h"
#include "gxp-thermal.h"
-#include "gxp-lpm.h"
+#if GXP_HAS_MCU
+#include "gxp-kci.h"
+#include "gxp-mcu.h"
+#endif /* GXP_HAS_MCU */
-/*
- * Value comes from internal measurement
- * b/229623553
- */
-static struct gxp_state_pwr state_pwr_map[] = {
- {1155000, 78},
- {975000, 58},
- {750000, 40},
- {560000, 27},
- {373000, 20},
- {268000, 16},
- {178000, 13},
-};
-
-static int gxp_get_max_state(struct thermal_cooling_device *cdev,
- unsigned long *state)
+static int gxp_thermal_get_rate(void *data, unsigned long *rate)
{
- struct gxp_thermal_manager *thermal = cdev->devdata;
-
- if (!thermal->gxp_num_states)
- return -EIO;
+ *rate = exynos_acpm_get_rate(AUR_DVFS_DOMAIN, 0);
- *state = thermal->gxp_num_states - 1;
return 0;
}
-/*
- * Set cooling state.
- */
-static int gxp_set_cur_state(struct thermal_cooling_device *cdev,
- unsigned long cooling_state)
+static int gxp_thermal_set_rate(void *data, unsigned long rate)
{
+ struct gxp_dev *gxp = data;
int ret = 0;
- struct gxp_thermal_manager *thermal = cdev->devdata;
- struct device *dev = thermal->gxp->dev;
- unsigned long pwr_state;
- if (cooling_state >= thermal->gxp_num_states) {
- dev_err(dev, "%s: invalid cooling state %lu\n", __func__,
- cooling_state);
- return -EINVAL;
- }
+ if (!gxp_is_direct_mode(gxp)) {
+#if GXP_HAS_MCU
+ struct gxp_mcu *mcu = gxp_mcu_of(gxp);
- mutex_lock(&thermal->lock);
- cooling_state = max(thermal->sysfs_req, cooling_state);
- if (cooling_state >= ARRAY_SIZE(state_pwr_map)) {
- dev_err(dev, "Unsupported cooling state: %lu\n", cooling_state);
- ret = -EINVAL;
- goto out;
- }
- pwr_state = state_pwr_map[cooling_state].state;
- dev_dbg(dev, "setting policy %ld\n", pwr_state);
- if (cooling_state != thermal->cooling_state) {
-#ifdef CONFIG_GXP_CLOUDRIPPER
- ret = exynos_acpm_set_policy(AUR_DVFS_DOMAIN,
- pwr_state < aur_power_state2rate[AUR_UUD] ?
- aur_power_state2rate[AUR_UUD] :
- pwr_state);
-#endif
- if (ret) {
- dev_err(dev,
- "error setting gxp cooling policy: %d\n", ret);
- goto out;
- }
- thermal->cooling_state = cooling_state;
- gxp_pm_set_thermal_limit(thermal->gxp, pwr_state);
+ ret = gxp_kci_notify_throttling(&mcu->kci, rate);
+#endif /* GXP_HAS_MCU */
} else {
- ret = -EALREADY;
+ rate = max_t(unsigned long, rate,
+ aur_power_state2rate[AUR_UUD]);
+ ret = gxp_pm_blk_set_rate_acpm(gxp, rate);
}
-out:
- mutex_unlock(&thermal->lock);
- return ret;
-}
-
-static int gxp_get_cur_state(struct thermal_cooling_device *cdev,
- unsigned long *state)
-{
- int ret = 0;
- struct gxp_thermal_manager *thermal = cdev->devdata;
-
- mutex_lock(&thermal->lock);
- *state = thermal->cooling_state;
- if (*state >= thermal->gxp_num_states) {
- dev_err(thermal->gxp->dev,
- "Unknown cooling state: %lu, resetting\n", *state);
- ret = -EINVAL;
- goto out;
- }
-out:
- mutex_unlock(&thermal->lock);
- return ret;
-}
-
-static int gxp_state2power_internal(unsigned long state, u32 *power,
- struct gxp_thermal_manager *thermal)
-{
- int i;
-
- for (i = 0; i < thermal->gxp_num_states; i++) {
- if (state == state_pwr_map[i].state) {
- *power = state_pwr_map[i].power;
- return 0;
- }
- }
- dev_err(thermal->gxp->dev, "Unknown state req for: %lu\n", state);
- *power = 0;
- return -EINVAL;
-}
-
-static int gxp_get_requested_power(struct thermal_cooling_device *cdev,
- u32 *power)
-{
- unsigned long power_state;
- struct gxp_thermal_manager *cooling = cdev->devdata;
-
- power_state = exynos_acpm_get_rate(AUR_DVFS_DOMAIN, 0);
- return gxp_state2power_internal(power_state, power, cooling);
-}
-
-/* TODO(b/213272324): Move state2power table to dts */
-static int gxp_state2power(struct thermal_cooling_device *cdev,
- unsigned long state, u32 *power)
-{
- struct gxp_thermal_manager *thermal = cdev->devdata;
-
- if (state >= thermal->gxp_num_states) {
- dev_err(thermal->gxp->dev, "%s: invalid state: %lu\n", __func__,
- state);
- return -EINVAL;
+ if (ret) {
+ dev_err(gxp->dev, "error setting gxp cooling state: %d\n", ret);
+ return ret;
}
- return gxp_state2power_internal(state_pwr_map[state].state, power,
- thermal);
-}
-
-static int gxp_power2state(struct thermal_cooling_device *cdev,
- u32 power, unsigned long *state)
-{
- int i, penultimate_throttle_state;
- struct gxp_thermal_manager *thermal = cdev->devdata;
-
- *state = 0;
- /* Less than 2 state means we cannot really throttle */
- if (thermal->gxp_num_states < 2)
- return thermal->gxp_num_states == 1 ? 0 : -EIO;
+ gxp_pm_set_thermal_limit(gxp, rate);
- penultimate_throttle_state = thermal->gxp_num_states - 2;
- /*
- * argument "power" is the maximum allowed power consumption in mW as
- * defined by the PID control loop. Check for the first state that is
- * less than or equal to the current allowed power. state_pwr_map is
- * descending, so lowest power consumption is last value in the array
- * return lowest state even if it consumes more power than allowed as
- * not all platforms can handle throttling below an active state
- */
- for (i = penultimate_throttle_state; i >= 0; --i) {
- if (power < state_pwr_map[i].power) {
- *state = i + 1;
- break;
- }
- }
return 0;
}
-static struct thermal_cooling_device_ops gxp_cooling_ops = {
- .get_max_state = gxp_get_max_state,
- .get_cur_state = gxp_get_cur_state,
- .set_cur_state = gxp_set_cur_state,
- .get_requested_power = gxp_get_requested_power,
- .state2power = gxp_state2power,
- .power2state = gxp_power2state,
-};
-
-static void gxp_thermal_exit(struct gxp_thermal_manager *thermal)
+static int gxp_thermal_control(void *data, bool enable)
{
- if (!IS_ERR_OR_NULL(thermal->cdev))
- thermal_cooling_device_unregister(thermal->cdev);
+ return -EOPNOTSUPP;
}
-static void devm_gxp_thermal_release(struct device *dev, void *res)
+int gxp_thermal_init(struct gxp_dev *gxp)
{
- struct gxp_thermal_manager *thermal = res;
-
- gxp_thermal_exit(thermal);
-}
-
-static ssize_t
-user_vote_show(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct thermal_cooling_device *cdev =
- container_of(dev, struct thermal_cooling_device,
- device);
- struct gxp_thermal_manager *cooling = cdev->devdata;
-
- if (!cooling)
- return -ENODEV;
-
- return sysfs_emit(buf, "%lu\n", cooling->sysfs_req);
-}
-
-static ssize_t user_vote_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct thermal_cooling_device *cdev =
- container_of(dev, struct thermal_cooling_device,
- device);
- struct gxp_thermal_manager *cooling = cdev->devdata;
- int ret;
- unsigned long state;
-
- if (!cooling)
- return -ENODEV;
-
- ret = kstrtoul(buf, 0, &state);
- if (ret)
- return ret;
-
- if (state >= cooling->gxp_num_states)
- return -EINVAL;
-
- mutex_lock(&cdev->lock);
- cooling->sysfs_req = state;
- cdev->updated = false;
- mutex_unlock(&cdev->lock);
- thermal_cdev_update(cdev);
- return count;
-}
+ const struct gcip_thermal_args args = {
+ .dev = gxp->dev,
+ .pm = gxp->power_mgr->pm,
+ .dentry = gxp->d_entry,
+ .node_name = GXP_COOLING_NAME,
+ .type = GXP_COOLING_NAME,
+ .data = gxp,
+ .get_rate = gxp_thermal_get_rate,
+ .set_rate = gxp_thermal_set_rate,
+ .control = gxp_thermal_control,
+ };
+ struct gcip_thermal *thermal;
+
+ if (gxp->thermal)
+ return -EEXIST;
+
+ thermal = gcip_thermal_create(&args);
+ if (IS_ERR(thermal))
+ return PTR_ERR(thermal);
+
+ gxp->thermal = thermal;
-static DEVICE_ATTR_RW(user_vote);
-
-static int
-gxp_thermal_cooling_register(struct gxp_thermal_manager *thermal, char *type)
-{
- struct device_node *cooling_node = NULL;
-
- thermal->op_data = NULL;
- thermal->gxp_num_states = ARRAY_SIZE(state_pwr_map);
-
- mutex_init(&thermal->lock);
- cooling_node = of_find_node_by_name(NULL, GXP_COOLING_NAME);
-
- /* TODO: Change this to fatal error once dts change is merged */
- if (!cooling_node)
- dev_warn(thermal->gxp->dev, "failed to find cooling node\n");
- /* Initialize the cooling state as 0, means "no cooling" */
- thermal->cooling_state = 0;
- thermal->cdev = thermal_of_cooling_device_register(
- cooling_node, type, thermal, &gxp_cooling_ops);
- if (IS_ERR(thermal->cdev))
- return PTR_ERR(thermal->cdev);
-
- return device_create_file(&thermal->cdev->device, &dev_attr_user_vote);
-}
-
-static int cooling_init(struct gxp_thermal_manager *thermal, struct device *dev)
-{
- int err;
- struct dentry *d;
-
- d = debugfs_create_dir("cooling", thermal->gxp->d_entry);
- /* don't let debugfs creation failure abort the init procedure */
- if (IS_ERR_OR_NULL(d))
- dev_warn(dev, "failed to create debug fs for cooling");
- thermal->cooling_root = d;
-
- err = gxp_thermal_cooling_register(thermal, GXP_COOLING_NAME);
- if (err) {
- dev_err(dev, "failed to initialize external cooling\n");
- gxp_thermal_exit(thermal);
- return err;
- }
return 0;
}
-struct gxp_thermal_manager
-*gxp_thermal_init(struct gxp_dev *gxp)
+void gxp_thermal_exit(struct gxp_dev *gxp)
{
- struct device *dev = gxp->dev;
- struct gxp_thermal_manager *thermal;
- int err;
-
- thermal = devres_alloc(devm_gxp_thermal_release, sizeof(*thermal),
- GFP_KERNEL);
- if (!thermal)
- return ERR_PTR(-ENOMEM);
-
- thermal->gxp = gxp;
- err = cooling_init(thermal, dev);
- if (err) {
- devres_free(thermal);
- return ERR_PTR(err);
- }
-
- devres_add(dev, thermal);
- return thermal;
+ gcip_thermal_destroy(gxp->thermal);
+ gxp->thermal = NULL;
}
diff --git a/gxp-thermal.h b/gxp-thermal.h
index c1939ef..aa4fe3a 100644
--- a/gxp-thermal.h
+++ b/gxp-thermal.h
@@ -2,43 +2,18 @@
/*
* Platform thermal driver for GXP.
*
- * Copyright (C) 2021 Google LLC
+ * Copyright (C) 2021-2023 Google LLC
*/
#ifndef __GXP_THERMAL_H__
#define __GXP_THERMAL_H__
-#include <linux/debugfs.h>
-#include <linux/device.h>
-#include <linux/mutex.h>
-#include <linux/thermal.h>
+#include <gcip/gcip-thermal.h>
#include "gxp-internal.h"
-#include "gxp-pm.h"
#define GXP_COOLING_NAME "gxp-cooling"
-struct gxp_thermal_manager {
- struct dentry *cooling_root;
- struct thermal_cooling_device *cdev;
- struct mutex lock;
- void *op_data;
- unsigned long cooling_state;
- unsigned long sysfs_req;
- unsigned int gxp_num_states;
- struct gxp_dev *gxp;
- bool thermal_suspended; /* GXP thermal suspended state */
-};
-
-/*
- * Internal structure to do the state/pwr mapping
- * state: kHz that AUR is running
- * power: mW that the state consume
- */
-struct gxp_state_pwr {
- unsigned long state;
- u32 power;
-};
-
-struct gxp_thermal_manager *gxp_thermal_init(struct gxp_dev *gxp);
+int gxp_thermal_init(struct gxp_dev *gxp);
+void gxp_thermal_exit(struct gxp_dev *gxp);
#endif /* __GXP_THERMAL_H__ */
diff --git a/gxp-vd.c b/gxp-vd.c
index ae07455..37d8ab5 100644
--- a/gxp-vd.c
+++ b/gxp-vd.c
@@ -5,53 +5,656 @@
* Copyright (C) 2021 Google LLC
*/
+#include <linux/atomic.h>
#include <linux/bitops.h>
+#include <linux/idr.h>
+#include <linux/iommu.h>
+#include <linux/refcount.h>
#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <gcip/gcip-alloc-helper.h>
+#include <gcip/gcip-image-config.h>
+
+#include "gxp-config.h"
+#include "gxp-core-telemetry.h"
#include "gxp-debug-dump.h"
#include "gxp-dma.h"
#include "gxp-domain-pool.h"
-#include "gxp-firmware.h"
+#include "gxp-doorbell.h"
+#include "gxp-eventfd.h"
#include "gxp-firmware-data.h"
+#include "gxp-firmware-loader.h"
+#include "gxp-firmware.h"
#include "gxp-host-device-structs.h"
#include "gxp-internal.h"
#include "gxp-lpm.h"
#include "gxp-mailbox.h"
#include "gxp-notification.h"
#include "gxp-pm.h"
-#include "gxp-telemetry.h"
#include "gxp-vd.h"
-#include "gxp-wakelock.h"
static inline void hold_core_in_reset(struct gxp_dev *gxp, uint core)
{
- gxp_write_32_core(gxp, core, GXP_REG_ETM_PWRCTL,
- 1 << GXP_REG_ETM_PWRCTL_CORE_RESET_SHIFT);
+ gxp_write_32(gxp, GXP_CORE_REG_ETM_PWRCTL(core),
+ BIT(GXP_REG_ETM_PWRCTL_CORE_RESET_SHIFT));
}
-int gxp_vd_init(struct gxp_dev *gxp)
+void gxp_vd_init(struct gxp_dev *gxp)
{
uint core;
- int ret;
init_rwsem(&gxp->vd_semaphore);
/* All cores start as free */
for (core = 0; core < GXP_NUM_CORES; core++)
gxp->core_to_vd[core] = NULL;
+ atomic_set(&gxp->next_vdid, 0);
+ ida_init(&gxp->shared_slice_idp);
+}
+
+void gxp_vd_destroy(struct gxp_dev *gxp)
+{
+ ida_destroy(&gxp->shared_slice_idp);
+}
+
+/* Allocates an SGT and map @daddr to it. */
+static int map_ns_region(struct gxp_virtual_device *vd, dma_addr_t daddr,
+ size_t size)
+{
+ struct gxp_dev *gxp = vd->gxp;
+ struct sg_table *sgt;
+ size_t idx;
+ const size_t n_reg = ARRAY_SIZE(vd->ns_regions);
+ int ret;
+
+ for (idx = 0; idx < n_reg; idx++) {
+ if (!vd->ns_regions[idx].sgt)
+ break;
+ }
+ if (idx == n_reg) {
+ dev_err(gxp->dev, "NS regions array %zx is full", n_reg);
+ return -ENOSPC;
+ }
+ sgt = gcip_alloc_noncontiguous(gxp->dev, size, GFP_KERNEL);
+ if (!sgt)
+ return -ENOMEM;
+
+ ret = gxp_dma_map_iova_sgt(gxp, vd->domain, daddr, sgt,
+ IOMMU_READ | IOMMU_WRITE);
+ if (ret) {
+ dev_err(gxp->dev, "NS map %pad with size %#zx failed", &daddr,
+ size);
+ gcip_free_noncontiguous(sgt);
+ return ret;
+ }
+ vd->ns_regions[idx].daddr = daddr;
+ vd->ns_regions[idx].sgt = sgt;
+
+ return 0;
+}
+
+static void unmap_ns_region(struct gxp_virtual_device *vd, dma_addr_t daddr)
+{
+ struct gxp_dev *gxp = vd->gxp;
+ struct sg_table *sgt;
+ size_t idx;
+ const size_t n_reg = ARRAY_SIZE(vd->ns_regions);
+
+ for (idx = 0; idx < n_reg; idx++) {
+ if (daddr == vd->ns_regions[idx].daddr)
+ break;
+ }
+ if (idx == n_reg) {
+ dev_warn(gxp->dev, "unable to find NS mapping @ %pad", &daddr);
+ return;
+ }
+
+ sgt = vd->ns_regions[idx].sgt;
+ vd->ns_regions[idx].sgt = NULL;
+ vd->ns_regions[idx].daddr = 0;
+ gxp_dma_unmap_iova_sgt(gxp, vd->domain, daddr, sgt);
+ gcip_free_noncontiguous(sgt);
+}
+
+/* Maps the shared buffer region to @vd->domain. */
+static int map_core_shared_buffer(struct gxp_virtual_device *vd)
+{
+ struct gxp_dev *gxp = vd->gxp;
+ const size_t shared_size = GXP_SHARED_SLICE_SIZE;
+
+ if (!gxp->shared_buf.paddr)
+ return 0;
+ return gxp_iommu_map(gxp, vd->domain, gxp->shared_buf.daddr,
+ gxp->shared_buf.paddr +
+ shared_size * vd->slice_index,
+ shared_size, IOMMU_READ | IOMMU_WRITE);
+}
+
+/* Reverts map_core_shared_buffer. */
+static void unmap_core_shared_buffer(struct gxp_virtual_device *vd)
+{
+ struct gxp_dev *gxp = vd->gxp;
+ const size_t shared_size = GXP_SHARED_SLICE_SIZE;
+
+ if (!gxp->shared_buf.paddr)
+ return;
+ gxp_iommu_unmap(gxp, vd->domain, gxp->shared_buf.daddr, shared_size);
+}
+
+/* Maps @res->daddr to @res->paddr to @vd->domain. */
+static int map_resource(struct gxp_virtual_device *vd,
+ struct gxp_mapped_resource *res)
+{
+ if (res->daddr == 0)
+ return 0;
+ return gxp_iommu_map(vd->gxp, vd->domain, res->daddr, res->paddr,
+ res->size, IOMMU_READ | IOMMU_WRITE);
+}
+
+/* Reverts map_resource. */
+static void unmap_resource(struct gxp_virtual_device *vd,
+ struct gxp_mapped_resource *res)
+{
+ if (res->daddr == 0)
+ return;
+ gxp_iommu_unmap(vd->gxp, vd->domain, res->daddr, res->size);
+}
+
+/*
+ * Assigns @res's IOVA, size from image config.
+ */
+static void assign_resource(struct gxp_mapped_resource *res,
+ struct gcip_image_config *img_cfg,
+ enum gxp_imgcfg_idx idx)
+{
+ res->daddr = img_cfg->iommu_mappings[idx].virt_address;
+ res->size = gcip_config_to_size(
+ img_cfg->iommu_mappings[idx].image_config_value);
+}
+
+/*
+ * This function does follows:
+ * - Get CORE_CFG, VD_CFG, SYS_CFG's IOVAs and sizes from image config.
+ * - Map above regions with this layout:
+ * Pool
+ * +------------------------------------+
+ * | SLICE_0: CORE_CFG |
+ * | SLICE_0: VD_CFG |
+ * | <padding to GXP_SHARED_SLICE_SIZE> |
+ * +------------------------------------+
+ * | SLICE_1: CORE_CFG |
+ * | SLICE_1: VD_CFG |
+ * | <padding to GXP_SHARED_SLICE_SIZE> |
+ * +------------------------------------+
+ * | ... SLICE_N |
+ * +------------------------------------+
+ * | <padding> |
+ * +------------------------------------+
+ * | SYS_CFG |
+ * +------------------------------------+
+ *
+ * To keep compatibility, if not both mapping[0, 1] present then this function
+ * falls back to map the MCU-core shared region with hard-coded IOVA and size.
+ */
+static int map_cfg_regions(struct gxp_virtual_device *vd,
+ struct gcip_image_config *img_cfg)
+{
+ struct gxp_dev *gxp = vd->gxp;
+ struct gxp_mapped_resource pool;
+ struct gxp_mapped_resource res;
+ size_t offset;
+ int ret;
+
+ if (img_cfg->num_iommu_mappings < 3)
+ return map_core_shared_buffer(vd);
+ pool = gxp_fw_data_resource(gxp);
+
+ assign_resource(&res, img_cfg, CORE_CFG_REGION_IDX);
+ offset = vd->slice_index * GXP_SHARED_SLICE_SIZE;
+ res.vaddr = pool.vaddr + offset;
+ res.paddr = pool.paddr + offset;
+ ret = map_resource(vd, &res);
+ if (ret) {
+ dev_err(gxp->dev, "map core config %pad -> offset %#zx failed",
+ &res.daddr, offset);
+ return ret;
+ }
+ vd->core_cfg = res;
+
+ assign_resource(&res, img_cfg, VD_CFG_REGION_IDX);
+ offset += vd->core_cfg.size;
+ res.vaddr = pool.vaddr + offset;
+ res.paddr = pool.paddr + offset;
+ ret = map_resource(vd, &res);
+ if (ret) {
+ dev_err(gxp->dev, "map VD config %pad -> offset %#zx failed",
+ &res.daddr, offset);
+ goto err_unmap_core;
+ }
+ vd->vd_cfg = res;
+ /* image config correctness check */
+ if (vd->core_cfg.size + vd->vd_cfg.size > GXP_SHARED_SLICE_SIZE) {
+ dev_err(gxp->dev,
+ "Core CFG (%#llx) + VD CFG (%#llx) exceeds %#x",
+ vd->core_cfg.size, vd->vd_cfg.size,
+ GXP_SHARED_SLICE_SIZE);
+ ret = -ENOSPC;
+ goto err_unmap_vd;
+ }
+ assign_resource(&res, img_cfg, SYS_CFG_REGION_IDX);
+ if (res.size != GXP_FW_DATA_SYSCFG_SIZE) {
+ dev_err(gxp->dev, "invalid system cfg size: %#llx", res.size);
+ ret = -EINVAL;
+ goto err_unmap_vd;
+ }
+ res.vaddr = gxp_fw_data_system_cfg(gxp);
+ offset = res.vaddr - pool.vaddr;
+ res.paddr = pool.paddr + offset;
+ ret = map_resource(vd, &res);
+ if (ret) {
+ dev_err(gxp->dev, "map sys config %pad -> offset %#zx failed",
+ &res.daddr, offset);
+ goto err_unmap_vd;
+ }
+ vd->sys_cfg = res;
- ret = gxp_fw_init(gxp);
+ return 0;
+err_unmap_vd:
+ unmap_resource(vd, &vd->vd_cfg);
+ vd->vd_cfg.daddr = 0;
+err_unmap_core:
+ unmap_resource(vd, &vd->core_cfg);
+ vd->core_cfg.daddr = 0;
return ret;
}
-void gxp_vd_destroy(struct gxp_dev *gxp)
+static void unmap_cfg_regions(struct gxp_virtual_device *vd)
{
- down_write(&gxp->vd_semaphore);
+ if (vd->core_cfg.daddr == 0)
+ return unmap_core_shared_buffer(vd);
- gxp_fw_destroy(gxp);
+ unmap_resource(vd, &vd->sys_cfg);
+ unmap_resource(vd, &vd->vd_cfg);
+ unmap_resource(vd, &vd->core_cfg);
+}
- up_write(&gxp->vd_semaphore);
+static int gxp_vd_imgcfg_map(void *data, dma_addr_t daddr, phys_addr_t paddr,
+ size_t size, unsigned int flags)
+{
+ struct gxp_virtual_device *vd = data;
+
+ if (flags & GCIP_IMAGE_CONFIG_FLAGS_SECURE)
+ return 0;
+
+ return map_ns_region(vd, daddr, size);
+}
+
+static void gxp_vd_imgcfg_unmap(void *data, dma_addr_t daddr, size_t size,
+ unsigned int flags)
+{
+ struct gxp_virtual_device *vd = data;
+
+ if (flags & GCIP_IMAGE_CONFIG_FLAGS_SECURE)
+ return;
+
+ unmap_ns_region(vd, daddr);
+}
+
+static int
+map_fw_image_config(struct gxp_dev *gxp, struct gxp_virtual_device *vd,
+ struct gxp_firmware_loader_manager *fw_loader_mgr)
+{
+ int ret;
+ struct gcip_image_config *cfg;
+ static const struct gcip_image_config_ops gxp_vd_imgcfg_ops = {
+ .map = gxp_vd_imgcfg_map,
+ .unmap = gxp_vd_imgcfg_unmap,
+ };
+
+ cfg = &fw_loader_mgr->core_img_cfg;
+ ret = gcip_image_config_parser_init(&vd->cfg_parser, &gxp_vd_imgcfg_ops,
+ gxp->dev, vd);
+ /* parser_init() never fails unless we pass invalid OPs. */
+ if (unlikely(ret))
+ return ret;
+ ret = gcip_image_config_parse(&vd->cfg_parser, cfg);
+ if (ret) {
+ dev_err(gxp->dev, "Image config mapping failed");
+ return ret;
+ }
+ ret = map_cfg_regions(vd, cfg);
+ if (ret) {
+ dev_err(gxp->dev, "Config regions mapping failed");
+ gcip_image_config_clear(&vd->cfg_parser);
+ return ret;
+ }
+ vd->fw_ro_size = cfg->firmware_size;
+ /*
+ * To be compatible with image config without setting firmware_size,
+ * fall back to map the whole region to carveout.
+ */
+ if (vd->fw_ro_size == 0)
+ vd->fw_ro_size = gxp->fwbufs[0].size;
+
+ return 0;
+}
+
+static void unmap_fw_image_config(struct gxp_dev *gxp,
+ struct gxp_virtual_device *vd)
+{
+ unmap_cfg_regions(vd);
+ gcip_image_config_clear(&vd->cfg_parser);
+}
+
+/*
+ * For each core,
+ * - fw_rw_size = fwbufs[core].size - fw_ro_size
+ * - allocates rwdata_sgt[core] with size fw_rw_size
+ * - maps fwbufs[core].daddr -> fwbufs[core].paddr with size fw_ro_size
+ * - maps fwbufs[core].daddr + fw_ro_size -> rwdata_sgt[core]
+ */
+static int alloc_and_map_fw_image(struct gxp_dev *gxp,
+ struct gxp_virtual_device *vd)
+{
+ size_t ro_size = vd->fw_ro_size, rw_size;
+ struct gxp_iommu_domain *gdomain = vd->domain;
+ int i, ret;
+
+ /* Maps all FW regions together and no rwdata_sgt in this case. */
+ if (ro_size == gxp->fwbufs[0].size)
+ return gxp_iommu_map(gxp, gdomain, gxp->fwbufs[0].daddr,
+ gxp->fwbufs[0].paddr,
+ ro_size * GXP_NUM_CORES,
+ IOMMU_READ | IOMMU_WRITE);
+
+ dev_info(gxp->dev, "mapping firmware RO size %#zx", ro_size);
+ rw_size = gxp->fwbufs[0].size - ro_size;
+ for (i = 0; i < GXP_NUM_CORES; i++) {
+ vd->rwdata_sgt[i] =
+ gcip_alloc_noncontiguous(gxp->dev, rw_size, GFP_KERNEL);
+ if (!vd->rwdata_sgt[i]) {
+ dev_err(gxp->dev,
+ "allocate firmware data for core %d failed", i);
+ ret = -ENOMEM;
+ goto err_free_sgt;
+ }
+ }
+ for (i = 0; i < GXP_NUM_CORES; i++) {
+ ret = gxp_iommu_map(gxp, gdomain, gxp->fwbufs[i].daddr,
+ gxp->fwbufs[i].paddr, ro_size,
+ IOMMU_READ | IOMMU_WRITE);
+ if (ret) {
+ dev_err(gxp->dev, "map firmware RO for core %d failed",
+ i);
+ goto err_unmap;
+ }
+ ret = gxp_dma_map_iova_sgt(gxp, vd->domain,
+ gxp->fwbufs[i].daddr + ro_size,
+ vd->rwdata_sgt[i],
+ IOMMU_READ | IOMMU_WRITE);
+ if (ret) {
+ dev_err(gxp->dev, "map firmware RW for core %d failed",
+ i);
+ gxp_iommu_unmap(gxp, gdomain, gxp->fwbufs[i].daddr,
+ ro_size);
+ goto err_unmap;
+ }
+ }
+ return 0;
+
+err_unmap:
+ while (i--) {
+ gxp_iommu_unmap(gxp, gdomain, gxp->fwbufs[i].daddr, ro_size);
+ gxp_dma_unmap_iova_sgt(gxp, vd->domain,
+ gxp->fwbufs[i].daddr + ro_size,
+ vd->rwdata_sgt[i]);
+ }
+err_free_sgt:
+ for (i = 0; i < GXP_NUM_CORES; i++) {
+ if (vd->rwdata_sgt[i])
+ gcip_free_noncontiguous(vd->rwdata_sgt[i]);
+ vd->rwdata_sgt[i] = NULL;
+ }
+ return ret;
+}
+
+static void unmap_and_free_fw_image(struct gxp_dev *gxp,
+ struct gxp_virtual_device *vd)
+{
+ size_t ro_size = vd->fw_ro_size;
+ struct gxp_iommu_domain *gdomain = vd->domain;
+ int i;
+
+ if (ro_size == gxp->fwbufs[0].size) {
+ gxp_iommu_unmap(gxp, gdomain, gxp->fwbufs[0].daddr,
+ ro_size * GXP_NUM_CORES);
+ return;
+ }
+
+ for (i = 0; i < GXP_NUM_CORES; i++) {
+ gxp_iommu_unmap(gxp, gdomain, gxp->fwbufs[i].daddr, ro_size);
+ gxp_dma_unmap_iova_sgt(gxp, vd->domain,
+ gxp->fwbufs[i].daddr + ro_size,
+ vd->rwdata_sgt[i]);
+ }
+ for (i = 0; i < GXP_NUM_CORES; i++) {
+ if (vd->rwdata_sgt[i])
+ gcip_free_noncontiguous(vd->rwdata_sgt[i]);
+ vd->rwdata_sgt[i] = NULL;
+ }
+}
+
+static int map_core_telemetry_buffers(struct gxp_dev *gxp,
+ struct gxp_virtual_device *vd,
+ uint core_list)
+{
+ struct buffer_data *data[2];
+ int i, core, ret;
+
+ if (!gxp->core_telemetry_mgr)
+ return 0;
+
+ mutex_lock(&gxp->core_telemetry_mgr->lock);
+ data[0] = gxp->core_telemetry_mgr->logging_buff_data;
+ data[1] = gxp->core_telemetry_mgr->tracing_buff_data;
+
+ for (i = 0; i < ARRAY_SIZE(data); i++) {
+ if (!data[i] || !data[i]->is_enabled)
+ continue;
+ for (core = 0; core < GXP_NUM_CORES; core++) {
+ if (!(BIT(core) & core_list))
+ continue;
+ ret = gxp_dma_map_allocated_coherent_buffer(
+ gxp, &data[i]->buffers[core], vd->domain, 0);
+ if (ret) {
+ dev_err(gxp->dev,
+ "Mapping core telemetry buffer to core %d failed",
+ core);
+ goto error;
+ }
+ }
+ }
+ mutex_unlock(&gxp->core_telemetry_mgr->lock);
+ return 0;
+error:
+ while (core--) {
+ if (!(BIT(core) & core_list))
+ continue;
+ gxp_dma_unmap_allocated_coherent_buffer(
+ gxp, vd->domain, &data[i]->buffers[core]);
+ }
+ while (i--) {
+ if (!data[i] || !data[i]->is_enabled)
+ continue;
+ for (core = 0; core < GXP_NUM_CORES; core++) {
+ if (!(BIT(core) & core_list))
+ continue;
+ gxp_dma_unmap_allocated_coherent_buffer(
+ gxp, vd->domain, &data[i]->buffers[core]);
+ }
+ }
+ mutex_unlock(&gxp->core_telemetry_mgr->lock);
+ return ret;
+}
+
+static void unmap_core_telemetry_buffers(struct gxp_dev *gxp,
+ struct gxp_virtual_device *vd,
+ uint core_list)
+{
+ struct buffer_data *data[2];
+ int i, core;
+
+ if (!gxp->core_telemetry_mgr)
+ return;
+ mutex_lock(&gxp->core_telemetry_mgr->lock);
+ data[0] = gxp->core_telemetry_mgr->logging_buff_data;
+ data[1] = gxp->core_telemetry_mgr->tracing_buff_data;
+
+ for (i = 0; i < ARRAY_SIZE(data); i++) {
+ if (!data[i] || !data[i]->is_enabled)
+ continue;
+ for (core = 0; core < GXP_NUM_CORES; core++) {
+ if (!(BIT(core) & core_list))
+ continue;
+ gxp_dma_unmap_allocated_coherent_buffer(
+ gxp, vd->domain, &data[i]->buffers[core]);
+ }
+ }
+ mutex_unlock(&gxp->core_telemetry_mgr->lock);
+}
+
+static int map_debug_dump_buffer(struct gxp_dev *gxp,
+ struct gxp_virtual_device *vd)
+{
+ if (!gxp->debug_dump_mgr)
+ return 0;
+
+ return gxp_dma_map_allocated_coherent_buffer(
+ gxp, &gxp->debug_dump_mgr->buf, vd->domain, 0);
+}
+
+static void unmap_debug_dump_buffer(struct gxp_dev *gxp,
+ struct gxp_virtual_device *vd)
+{
+ if (!gxp->debug_dump_mgr)
+ return;
+
+ gxp_dma_unmap_allocated_coherent_buffer(gxp, vd->domain,
+ &gxp->debug_dump_mgr->buf);
+}
+
+static int assign_cores(struct gxp_virtual_device *vd)
+{
+ struct gxp_dev *gxp = vd->gxp;
+ uint core;
+ uint available_cores = 0;
+
+ if (!gxp_core_boot(gxp)) {
+ /* We don't do core assignment when cores are managed by MCU. */
+ vd->core_list = BIT(GXP_NUM_CORES) - 1;
+ return 0;
+ }
+ vd->core_list = 0;
+ for (core = 0; core < GXP_NUM_CORES; core++) {
+ if (gxp->core_to_vd[core] == NULL) {
+ if (available_cores < vd->num_cores)
+ vd->core_list |= BIT(core);
+ available_cores++;
+ }
+ }
+ if (available_cores < vd->num_cores) {
+ dev_err(gxp->dev,
+ "Insufficient available cores. Available: %u. Requested: %u\n",
+ available_cores, vd->num_cores);
+ return -EBUSY;
+ }
+ for (core = 0; core < GXP_NUM_CORES; core++)
+ if (vd->core_list & BIT(core))
+ gxp->core_to_vd[core] = vd;
+ return 0;
+}
+
+static void unassign_cores(struct gxp_virtual_device *vd)
+{
+ struct gxp_dev *gxp = vd->gxp;
+ uint core;
+
+ if (!gxp_core_boot(gxp))
+ return;
+ for (core = 0; core < GXP_NUM_CORES; core++) {
+ if (gxp->core_to_vd[core] == vd)
+ gxp->core_to_vd[core] = NULL;
+ }
+}
+
+/* Saves the state of this VD's doorbells and clears them. */
+static void vd_save_doorbells(struct gxp_virtual_device *vd)
+{
+ struct gxp_dev *gxp = vd->gxp;
+ uint base_doorbell;
+ uint i;
+
+ if (!gxp_fw_data_use_per_vd_config(vd))
+ return;
+ base_doorbell = GXP_DOORBELLS_START +
+ gxp_vd_hw_slot_id(vd) * GXP_NUM_DOORBELLS_PER_VD;
+ for (i = 0; i < ARRAY_SIZE(vd->doorbells_state); i++) {
+ vd->doorbells_state[i] =
+ gxp_doorbell_status(gxp, base_doorbell + i);
+ gxp_doorbell_clear(gxp, base_doorbell + i);
+ }
+}
+
+/* Restores the state of this VD's doorbells. */
+static void vd_restore_doorbells(struct gxp_virtual_device *vd)
+{
+ struct gxp_dev *gxp = vd->gxp;
+ uint base_doorbell;
+ uint i;
+
+ if (!gxp_fw_data_use_per_vd_config(vd))
+ return;
+ base_doorbell = GXP_DOORBELLS_START +
+ gxp_vd_hw_slot_id(vd) * GXP_NUM_DOORBELLS_PER_VD;
+ for (i = 0; i < ARRAY_SIZE(vd->doorbells_state); i++)
+ if (vd->doorbells_state[i])
+ gxp_doorbell_set(gxp, base_doorbell + i);
+ else
+ gxp_doorbell_clear(gxp, base_doorbell + i);
+}
+
+static void set_config_version(struct gxp_dev *gxp,
+ struct gxp_virtual_device *vd)
+{
+ vd->config_version = gxp->fw_loader_mgr->core_img_cfg.config_version;
+ /*
+ * Let gxp_dma_map_core_resources() map this region only when using the
+ * legacy protocol.
+ *
+ * TODO(b/265748027): remove this
+ */
+ if (gxp_fw_data_use_per_vd_config(vd))
+ gxp->fwdatabuf.daddr = 0;
+}
+
+static void debug_dump_lock(struct gxp_dev *gxp, struct gxp_virtual_device *vd)
+{
+ if (!mutex_trylock(&vd->debug_dump_lock)) {
+ /*
+ * Release @gxp->vd_semaphore to let other virtual devices proceed
+ * their works and wait for the debug dump to finish.
+ */
+ up_write(&gxp->vd_semaphore);
+ mutex_lock(&vd->debug_dump_lock);
+ down_write(&gxp->vd_semaphore);
+ }
+}
+
+static inline void debug_dump_unlock(struct gxp_virtual_device *vd)
+{
+ mutex_unlock(&vd->debug_dump_lock);
}
struct gxp_virtual_device *gxp_vd_allocate(struct gxp_dev *gxp,
@@ -61,6 +664,7 @@ struct gxp_virtual_device *gxp_vd_allocate(struct gxp_dev *gxp,
int i;
int err;
+ lockdep_assert_held_write(&gxp->vd_semaphore);
/* Assumes 0 < requested_cores <= GXP_NUM_CORES */
if (requested_cores == 0 || requested_cores > GXP_NUM_CORES)
return ERR_PTR(-EINVAL);
@@ -72,30 +676,41 @@ struct gxp_virtual_device *gxp_vd_allocate(struct gxp_dev *gxp,
vd->gxp = gxp;
vd->num_cores = requested_cores;
vd->state = GXP_VD_OFF;
-
- vd->core_domains =
- kcalloc(requested_cores, sizeof(*vd->core_domains), GFP_KERNEL);
- if (!vd->core_domains) {
- err = -ENOMEM;
+ vd->slice_index = -1;
+ vd->client_id = -1;
+ vd->tpu_client_id = -1;
+ spin_lock_init(&vd->credit_lock);
+ refcount_set(&vd->refcount, 1);
+ vd->credit = GXP_COMMAND_CREDIT_PER_VD;
+ vd->first_open = true;
+ vd->vdid = atomic_inc_return(&gxp->next_vdid);
+ mutex_init(&vd->fence_list_lock);
+ INIT_LIST_HEAD(&vd->gxp_fence_list);
+ mutex_init(&vd->debug_dump_lock);
+
+ vd->domain = gxp_domain_pool_alloc(gxp->domain_pool);
+ if (!vd->domain) {
+ err = -EBUSY;
goto error_free_vd;
}
- for (i = 0; i < requested_cores; i++) {
- vd->core_domains[i] = gxp_domain_pool_alloc(gxp->domain_pool);
- if (!vd->core_domains[i]) {
- err = -EBUSY;
- goto error_free_domains;
- }
+
+ vd->slice_index = ida_alloc_max(&gxp->shared_slice_idp,
+ GXP_NUM_SHARED_SLICES - 1, GFP_KERNEL);
+ if (vd->slice_index < 0) {
+ err = vd->slice_index;
+ goto error_free_domain;
}
vd->mailbox_resp_queues = kcalloc(
vd->num_cores, sizeof(*vd->mailbox_resp_queues), GFP_KERNEL);
if (!vd->mailbox_resp_queues) {
err = -ENOMEM;
- goto error_free_domains;
+ goto error_free_slice_index;
}
for (i = 0; i < vd->num_cores; i++) {
- INIT_LIST_HEAD(&vd->mailbox_resp_queues[i].queue);
+ INIT_LIST_HEAD(&vd->mailbox_resp_queues[i].wait_queue);
+ INIT_LIST_HEAD(&vd->mailbox_resp_queues[i].dest_queue);
spin_lock_init(&vd->mailbox_resp_queues[i].lock);
init_waitqueue_head(&vd->mailbox_resp_queues[i].waitq);
}
@@ -103,12 +718,54 @@ struct gxp_virtual_device *gxp_vd_allocate(struct gxp_dev *gxp,
vd->mappings_root = RB_ROOT;
init_rwsem(&vd->mappings_semaphore);
+ err = assign_cores(vd);
+ if (err)
+ goto error_free_resp_queues;
+
+ /*
+ * Here assumes firmware is requested before allocating a VD, which is
+ * true because we request firmware on first GXP device open.
+ */
+ err = map_fw_image_config(gxp, vd, gxp->fw_loader_mgr);
+ if (err)
+ goto error_unassign_cores;
+
+ set_config_version(gxp, vd);
+ /* After map_fw_image_config because it needs vd->vd/core_cfg. */
+ gxp_fw_data_populate_vd_cfg(gxp, vd);
+ err = gxp_dma_map_core_resources(gxp, vd->domain, vd->core_list,
+ vd->slice_index);
+ if (err)
+ goto error_unmap_imgcfg;
+ err = alloc_and_map_fw_image(gxp, vd);
+ if (err)
+ goto error_unmap_core_resources;
+ err = map_core_telemetry_buffers(gxp, vd, vd->core_list);
+ if (err)
+ goto error_unmap_fw_data;
+ err = map_debug_dump_buffer(gxp, vd);
+ if (err)
+ goto error_unmap_core_telemetry_buffer;
+
return vd;
-error_free_domains:
- for (i -= 1; i >= 0; i--)
- gxp_domain_pool_free(gxp->domain_pool, vd->core_domains[i]);
- kfree(vd->core_domains);
+error_unmap_core_telemetry_buffer:
+ unmap_core_telemetry_buffers(gxp, vd, vd->core_list);
+error_unmap_fw_data:
+ unmap_and_free_fw_image(gxp, vd);
+error_unmap_core_resources:
+ gxp_dma_unmap_core_resources(gxp, vd->domain, vd->core_list);
+error_unmap_imgcfg:
+ unmap_fw_image_config(gxp, vd);
+error_unassign_cores:
+ unassign_cores(vd);
+error_free_resp_queues:
+ kfree(vd->mailbox_resp_queues);
+error_free_slice_index:
+ if (vd->slice_index >= 0)
+ ida_free(&gxp->shared_slice_idp, vd->slice_index);
+error_free_domain:
+ gxp_domain_pool_free(gxp->domain_pool, vd->domain);
error_free_vd:
kfree(vd);
@@ -117,28 +774,29 @@ error_free_vd:
void gxp_vd_release(struct gxp_virtual_device *vd)
{
- struct gxp_async_response *cur, *nxt;
- int i;
- unsigned long flags;
struct rb_node *node;
struct gxp_mapping *mapping;
+ struct gxp_dev *gxp = vd->gxp;
+ uint core_list = vd->core_list;
- /* Cleanup any unconsumed responses */
- for (i = 0; i < vd->num_cores; i++) {
- /*
- * Since VD is releasing, it is not necessary to lock here.
- * Do it anyway for consistency.
- */
- spin_lock_irqsave(&vd->mailbox_resp_queues[i].lock, flags);
- list_for_each_entry_safe(cur, nxt,
- &vd->mailbox_resp_queues[i].queue,
- list_entry) {
- list_del(&cur->list_entry);
- kfree(cur);
- }
- spin_unlock_irqrestore(&vd->mailbox_resp_queues[i].lock, flags);
+ lockdep_assert_held_write(&gxp->vd_semaphore);
+ debug_dump_lock(gxp, vd);
+
+ if (vd->is_secure) {
+ mutex_lock(&gxp->secure_vd_lock);
+ gxp->secure_vd = NULL;
+ mutex_unlock(&gxp->secure_vd_lock);
}
+ unmap_debug_dump_buffer(gxp, vd);
+ unmap_core_telemetry_buffers(gxp, vd, core_list);
+ unmap_and_free_fw_image(gxp, vd);
+ gxp_dma_unmap_core_resources(gxp, vd->domain, core_list);
+ unmap_fw_image_config(gxp, vd);
+ unassign_cores(vd);
+
+ vd->gxp->mailbox_mgr->release_unconsumed_async_resps(vd);
+
/*
* Release any un-mapped mappings
* Once again, it's not necessary to lock the mappings_semaphore here
@@ -152,317 +810,319 @@ void gxp_vd_release(struct gxp_virtual_device *vd)
}
up_write(&vd->mappings_semaphore);
- for (i = 0; i < vd->num_cores; i++)
- gxp_domain_pool_free(vd->gxp->domain_pool, vd->core_domains[i]);
- kfree(vd->core_domains);
kfree(vd->mailbox_resp_queues);
- kfree(vd);
+ if (vd->slice_index >= 0)
+ ida_free(&vd->gxp->shared_slice_idp, vd->slice_index);
+ gxp_domain_pool_free(vd->gxp->domain_pool, vd->domain);
+
+ if (vd->invalidate_eventfd)
+ gxp_eventfd_put(vd->invalidate_eventfd);
+ vd->invalidate_eventfd = NULL;
+
+ vd->state = GXP_VD_RELEASED;
+ debug_dump_unlock(vd);
+ gxp_vd_put(vd);
}
-static int map_telemetry_buffers(struct gxp_dev *gxp,
- struct gxp_virtual_device *vd, uint virt_core,
- uint core)
+int gxp_vd_block_ready(struct gxp_virtual_device *vd)
{
- int ret = 0;
- struct buffer_data *buff_data;
-
- mutex_lock(&gxp->telemetry_mgr->lock);
+ struct gxp_dev *gxp = vd->gxp;
+ enum gxp_virtual_device_state orig_state;
+ int ret;
- /* Map logging buffers if logging is enabled */
- buff_data = gxp->telemetry_mgr->logging_buff_data;
- if (buff_data && buff_data->is_enabled) {
- ret = gxp_dma_map_allocated_coherent_buffer(
- gxp, buff_data->buffers[core], vd, BIT(virt_core),
- buff_data->size, buff_data->buffer_daddrs[core], 0);
- /* Don't bother checking tracing if logging fails */
- if (ret)
- goto out;
- }
+ lockdep_assert_held_write(&gxp->vd_semaphore);
- /* Map tracing buffers if tracing is enabled */
- buff_data = gxp->telemetry_mgr->tracing_buff_data;
- if (buff_data && buff_data->is_enabled) {
- ret = gxp_dma_map_allocated_coherent_buffer(
- gxp, buff_data->buffers[core], vd, BIT(virt_core),
- buff_data->size, buff_data->buffer_daddrs[core], 0);
- /* If tracing fails, unmap logging if it was enabled */
+ orig_state = vd->state;
+ if (orig_state != GXP_VD_OFF && orig_state != GXP_VD_SUSPENDED)
+ return -EINVAL;
+ ret = gxp_dma_domain_attach_device(gxp, vd->domain, vd->core_list);
+ if (ret)
+ return ret;
+ if (orig_state == GXP_VD_OFF)
+ vd->state = GXP_VD_READY;
+ if (gxp->after_vd_block_ready) {
+ ret = gxp->after_vd_block_ready(gxp, vd);
if (ret) {
- buff_data = gxp->telemetry_mgr->logging_buff_data;
- if (buff_data && buff_data->is_enabled)
- gxp_dma_unmap_allocated_coherent_buffer(
- gxp, vd, BIT(virt_core),
- buff_data->size,
- buff_data->buffer_daddrs[core]);
+ gxp_dma_domain_detach_device(gxp, vd->domain);
+ vd->state = orig_state;
+ return ret;
}
}
-
-out:
- mutex_unlock(&gxp->telemetry_mgr->lock);
-
- return ret;
-}
-
-static void unmap_telemetry_buffers(struct gxp_dev *gxp,
- struct gxp_virtual_device *vd,
- uint virt_core, uint core)
-{
- struct buffer_data *buff_data;
-
- mutex_lock(&gxp->telemetry_mgr->lock);
-
- buff_data = gxp->telemetry_mgr->logging_buff_data;
- if (buff_data && buff_data->is_enabled)
- gxp_dma_unmap_allocated_coherent_buffer(
- gxp, vd, BIT(virt_core), buff_data->size,
- buff_data->buffer_daddrs[core]);
-
- buff_data = gxp->telemetry_mgr->tracing_buff_data;
- if (buff_data && buff_data->is_enabled)
- gxp_dma_unmap_allocated_coherent_buffer(
- gxp, vd, BIT(virt_core), buff_data->size,
- buff_data->buffer_daddrs[core]);
-
- mutex_unlock(&gxp->telemetry_mgr->lock);
+ return 0;
}
-static int map_debug_dump_buffer(struct gxp_dev *gxp,
- struct gxp_virtual_device *vd, uint virt_core,
- uint core)
+void gxp_vd_block_unready(struct gxp_virtual_device *vd)
{
- /* If debug-dump is not enabled, nothing to map */
- if (!gxp->debug_dump_mgr)
- return 0;
-
- return gxp_dma_map_allocated_coherent_buffer(
- gxp, gxp->debug_dump_mgr->buf.vaddr, vd, BIT(virt_core),
- gxp->debug_dump_mgr->buf.size, gxp->debug_dump_mgr->buf.daddr,
- 0);
-}
+ struct gxp_dev *gxp = vd->gxp;
-static void unmap_debug_dump_buffer(struct gxp_dev *gxp,
- struct gxp_virtual_device *vd,
- uint virt_core, uint core)
-{
- if (!gxp->debug_dump_mgr)
- return;
+ lockdep_assert_held_write(&gxp->vd_semaphore);
- gxp_dma_unmap_allocated_coherent_buffer(
- gxp, vd, BIT(virt_core), gxp->debug_dump_mgr->buf.size,
- gxp->debug_dump_mgr->buf.daddr);
+ if (gxp->before_vd_block_unready)
+ gxp->before_vd_block_unready(gxp, vd);
+ gxp_dma_domain_detach_device(gxp, vd->domain);
}
-/* Caller must hold gxp->vd_semaphore for writing */
-int gxp_vd_start(struct gxp_virtual_device *vd)
+int gxp_vd_run(struct gxp_virtual_device *vd)
{
struct gxp_dev *gxp = vd->gxp;
- uint core;
- uint available_cores = 0;
- uint cores_remaining = vd->num_cores;
- uint core_list = 0;
- uint virt_core = 0;
- int ret = 0;
-
- for (core = 0; core < GXP_NUM_CORES; core++) {
- if (gxp->core_to_vd[core] == NULL) {
- if (available_cores < vd->num_cores)
- core_list |= BIT(core);
- available_cores++;
- }
- }
-
- if (available_cores < vd->num_cores) {
- dev_err(gxp->dev, "Insufficient available cores. Available: %u. Requested: %u\n",
- available_cores, vd->num_cores);
- return -EBUSY;
- }
-
- vd->fw_app = gxp_fw_data_create_app(gxp, core_list);
-
- for (core = 0; core < GXP_NUM_CORES; core++) {
- if (cores_remaining == 0)
- break;
+ int ret;
+ enum gxp_virtual_device_state orig_state = vd->state;
- if (core_list & BIT(core)) {
- gxp->core_to_vd[core] = vd;
- cores_remaining--;
- ret = gxp_dma_domain_attach_device(gxp, vd, virt_core,
- core);
- if (ret)
- goto err_clean_all_cores;
- ret = gxp_dma_map_core_resources(gxp, vd, virt_core,
- core);
- if (ret)
- goto err_detach_domain;
- ret = map_telemetry_buffers(gxp, vd, virt_core, core);
- if (ret)
- goto err_unmap_res;
- ret = map_debug_dump_buffer(gxp, vd, virt_core, core);
- if (ret)
- goto err_unmap_telem;
- virt_core++;
- }
+ lockdep_assert_held_write(&gxp->vd_semaphore);
+ if (orig_state != GXP_VD_READY && orig_state != GXP_VD_OFF)
+ return -EINVAL;
+ if (orig_state == GXP_VD_OFF) {
+ ret = gxp_vd_block_ready(vd);
+ /*
+ * The failure of `gxp_vd_block_ready` function means following two things:
+ *
+ * 1. The MCU firmware is not working for some reason and if it was crash,
+ * @vd->state would be set to UNAVAILABLE by the crash handler. However, by the
+ * race, if this function holds @gxp->vd_semaphore earlier than that handler,
+ * it is reasonable to set @vd->state to UNAVAILABLE from here.
+ *
+ * 2. Some information of vd (or client) such as client_id, slice_index are
+ * incorrect or not allowed by the MCU firmware for some reasons and the
+ * `allocate_vmbox` or `link_offload_vmbox` has been failed. In this case,
+ * setting the @vd->state to UNAVAILABLE and letting the runtime close its fd
+ * and reallocate a vd would be better than setting @vd->state to OFF.
+ *
+ * Therefore, let's set @vd->state to UNAVAILABLE if it returns an error.
+ */
+ if (ret)
+ goto err_vd_unavailable;
}
- ret = gxp_firmware_run(gxp, vd, core_list);
+ debug_dump_lock(gxp, vd);
+ /* Clear all doorbells */
+ vd_restore_doorbells(vd);
+ ret = gxp_firmware_run(gxp, vd, vd->core_list);
if (ret)
- goto err_clean_all_cores;
-
+ goto err_vd_block_unready;
vd->state = GXP_VD_RUNNING;
- return ret;
+ debug_dump_unlock(vd);
-err_unmap_telem:
- unmap_telemetry_buffers(gxp, vd, virt_core, core);
-err_unmap_res:
- gxp_dma_unmap_core_resources(gxp, vd, virt_core, core);
-err_detach_domain:
- gxp_dma_domain_detach_device(gxp, vd, virt_core);
-err_clean_all_cores:
- gxp->core_to_vd[core] = NULL;
- virt_core--;
- while (core > 0) {
- core--;
- if (core_list & BIT(core)) {
- unmap_debug_dump_buffer(gxp, vd, virt_core, core);
- unmap_telemetry_buffers(gxp, vd, virt_core, core);
- gxp_dma_unmap_core_resources(gxp, vd, virt_core, core);
- gxp_dma_domain_detach_device(gxp, vd, virt_core);
- gxp->core_to_vd[core] = NULL;
- virt_core--;
- }
- }
- gxp_fw_data_destroy_app(gxp, vd->fw_app);
+ return 0;
+err_vd_block_unready:
+ debug_dump_unlock(vd);
+ /* Run this only when gxp_vd_block_ready was executed. */
+ if (orig_state == GXP_VD_OFF)
+ gxp_vd_block_unready(vd);
+err_vd_unavailable:
+ vd->state = GXP_VD_UNAVAILABLE;
return ret;
}
-/* Caller must hold gxp->vd_semaphore for writing */
+/*
+ * Caller must hold gxp->vd_semaphore.
+ *
+ * This function will be called from the `gxp_client_destroy` function if @vd->state is not
+ * GXP_VD_OFF.
+ *
+ * Note for the case of the MCU firmware crahses:
+ *
+ * In the MCU mode, the `gxp_vd_suspend` function will redirect to this function, but it will not
+ * happen when the @vd->state is GXP_VD_UNAVAILABLE. Therefore, if the MCU firmware crashes,
+ * @vd->state will be changed to GXP_VD_UNAVAILABLE and this function will not be called even
+ * though the runtime is going to release the vd wakelock.
+ *
+ * It means @vd->state will not be changed to GXP_VD_OFF when the vd wkelock is released (i.e., the
+ * state will be kept as GXP_VD_UNAVAILABLE) and when the `gxp_vd_block_unready` function is called
+ * by releasing the block wakelock, it will not send `release_vmbox` and `unlink_offload_vmbox` KCI
+ * commands to the crashed MCU firmware. This function will be finally called when the runtime
+ * closes the fd of the device file.
+ */
void gxp_vd_stop(struct gxp_virtual_device *vd)
{
struct gxp_dev *gxp = vd->gxp;
- uint core, core_list = 0;
- uint virt_core = 0;
+ uint phys_core;
+ uint core_list = vd->core_list;
uint lpm_state;
- if ((vd->state == GXP_VD_OFF || vd->state == GXP_VD_RUNNING) &&
+ lockdep_assert_held_write(&gxp->vd_semaphore);
+ debug_dump_lock(gxp, vd);
+
+ if (gxp_core_boot(gxp) &&
+ (vd->state == GXP_VD_OFF || vd->state == GXP_VD_READY ||
+ vd->state == GXP_VD_RUNNING) &&
gxp_pm_get_blk_state(gxp) != AUR_OFF) {
- /*
- * Put all cores in the VD into reset so they can not wake each other up
- */
- for (core = 0; core < GXP_NUM_CORES; core++) {
- if (gxp->core_to_vd[core] == vd) {
- lpm_state = gxp_lpm_get_state(gxp, core);
- if (lpm_state != LPM_PG_STATE)
- hold_core_in_reset(gxp, core);
+
+ for (phys_core = 0; phys_core < GXP_NUM_CORES; phys_core++) {
+ if (core_list & BIT(phys_core)) {
+
+ lpm_state = gxp_lpm_get_state(gxp, CORE_TO_PSM(phys_core));
+
+ if (lpm_state == LPM_ACTIVE_STATE) {
+ /*
+ * If the core is in PS0 (not idle), it should
+ * be held in reset before attempting SW PG.
+ */
+ hold_core_in_reset(gxp, phys_core);
+ } else {
+ /*
+ * If the core is idle and has already transtioned to PS1,
+ * we can attempt HW PG. In this case, we should ensure
+ * that the core doesn't get awakened by an external
+ * interrupt source before we attempt to HW PG the core.
+ */
+ gxp_firmware_disable_ext_interrupts(gxp, phys_core);
+ }
}
}
}
- for (core = 0; core < GXP_NUM_CORES; core++)
- if (gxp->core_to_vd[core] == vd)
- core_list |= BIT(core);
-
gxp_firmware_stop(gxp, vd, core_list);
+ if (vd->state != GXP_VD_UNAVAILABLE)
+ vd->state = GXP_VD_OFF;
- for (core = 0; core < GXP_NUM_CORES; core++) {
- if (gxp->core_to_vd[core] == vd) {
- unmap_debug_dump_buffer(gxp, vd, virt_core, core);
- unmap_telemetry_buffers(gxp, vd, virt_core, core);
- gxp_dma_unmap_core_resources(gxp, vd, virt_core, core);
- if (vd->state == GXP_VD_RUNNING)
- gxp_dma_domain_detach_device(gxp, vd, virt_core);
- gxp->core_to_vd[core] = NULL;
- virt_core++;
- }
+ debug_dump_unlock(vd);
+}
+
+static inline uint select_core(struct gxp_virtual_device *vd, uint virt_core,
+ uint phys_core)
+{
+ return gxp_fw_data_use_per_vd_config(vd) ? virt_core : phys_core;
+}
+
+static bool boot_state_is_suspend(struct gxp_dev *gxp,
+ struct gxp_virtual_device *vd, uint core,
+ u32 *boot_state)
+{
+ if (gxp_fw_data_use_per_vd_config(vd)) {
+ *boot_state = gxp_firmware_get_boot_status(gxp, vd, core);
+ return *boot_state == GXP_BOOT_STATUS_SUSPENDED;
}
- if (!IS_ERR_OR_NULL(vd->fw_app)) {
- gxp_fw_data_destroy_app(gxp, vd->fw_app);
- vd->fw_app = NULL;
+ *boot_state = gxp_firmware_get_boot_mode(gxp, vd, core);
+ return *boot_state == GXP_BOOT_MODE_STATUS_SUSPEND_COMPLETED;
+}
+
+static bool boot_state_is_active(struct gxp_dev *gxp,
+ struct gxp_virtual_device *vd, uint core,
+ u32 *boot_state)
+{
+ if (gxp_fw_data_use_per_vd_config(vd)) {
+ *boot_state = gxp_firmware_get_boot_status(gxp, vd, core);
+ return *boot_state == GXP_BOOT_STATUS_ACTIVE;
}
+
+ *boot_state = gxp_firmware_get_boot_mode(gxp, vd, core);
+ return *boot_state == GXP_BOOT_MODE_STATUS_RESUME_COMPLETED;
}
/*
* Caller must have locked `gxp->vd_semaphore` for writing.
+ *
+ * This function will be called from the `gxp_client_release_vd_wakelock` function when the runtime
+ * is going to release the vd wakelock only if the @vd->state is not GXP_VD_UNAVAILABLE.
+ *
+ * In the MCU mode, this function will redirect to the `gxp_vd_stop` function.
*/
void gxp_vd_suspend(struct gxp_virtual_device *vd)
{
- uint core;
+ uint virt_core, phys_core;
struct gxp_dev *gxp = vd->gxp;
+ uint core_list = vd->core_list;
u32 boot_state;
uint failed_cores = 0;
- uint virt_core;
+ if (!gxp_is_direct_mode(gxp) && gxp_core_boot(gxp))
+ return gxp_vd_stop(vd);
lockdep_assert_held_write(&gxp->vd_semaphore);
- dev_info(gxp->dev, "Suspending VD ...\n");
+ debug_dump_lock(gxp, vd);
+
+ dev_info(gxp->dev, "Suspending VD vdid=%d client_id=%d...\n", vd->vdid,
+ vd->client_id);
if (vd->state == GXP_VD_SUSPENDED) {
dev_err(gxp->dev,
"Attempt to suspend a virtual device twice\n");
- return;
+ goto out;
+ }
+ if (!gxp_core_boot(gxp)) {
+ vd->state = GXP_VD_SUSPENDED;
+ goto out;
}
gxp_pm_force_clkmux_normal(gxp);
/*
* Start the suspend process for all of this VD's cores without waiting
* for completion.
*/
- for (core = 0; core < GXP_NUM_CORES; core++) {
- if (gxp->core_to_vd[core] == vd) {
- if (!gxp_lpm_wait_state_ne(gxp, core, LPM_ACTIVE_STATE)) {
- vd->state = GXP_VD_UNAVAILABLE;
- failed_cores |= BIT(core);
- hold_core_in_reset(gxp, core);
- dev_err(gxp->dev, "Core %u stuck at LPM_ACTIVE_STATE", core);
- continue;
- }
- /* Mark the boot mode as a suspend event */
- gxp_firmware_set_boot_mode(gxp, core,
- GXP_BOOT_MODE_REQUEST_SUSPEND);
- /*
- * Request a suspend event by sending a mailbox
- * notification.
- */
- gxp_notification_send(gxp, core,
- CORE_NOTIF_SUSPEND_REQUEST);
+ virt_core = 0;
+ for (phys_core = 0; phys_core < GXP_NUM_CORES; phys_core++) {
+ uint core = select_core(vd, virt_core, phys_core);
+
+ if (!(core_list & BIT(phys_core)))
+ continue;
+ if (!gxp_lpm_wait_state_ne(gxp, CORE_TO_PSM(phys_core),
+ LPM_ACTIVE_STATE)) {
+ vd->state = GXP_VD_UNAVAILABLE;
+ failed_cores |= BIT(phys_core);
+ hold_core_in_reset(gxp, phys_core);
+ dev_err(gxp->dev, "Core %u stuck at LPM_ACTIVE_STATE",
+ phys_core);
+ continue;
}
+ /* Mark the boot mode as a suspend event */
+ if (gxp_fw_data_use_per_vd_config(vd)) {
+ gxp_firmware_set_boot_status(gxp, vd, core,
+ GXP_BOOT_STATUS_NONE);
+ gxp_firmware_set_boot_mode(gxp, vd, core,
+ GXP_BOOT_MODE_SUSPEND);
+ } else {
+ gxp_firmware_set_boot_mode(
+ gxp, vd, core, GXP_BOOT_MODE_REQUEST_SUSPEND);
+ }
+ /*
+ * Request a suspend event by sending a mailbox
+ * notification.
+ */
+ gxp_notification_send(gxp, phys_core,
+ CORE_NOTIF_SUSPEND_REQUEST);
+ virt_core++;
}
- virt_core = 0;
/* Wait for all cores to complete core suspension. */
- for (core = 0; core < GXP_NUM_CORES; core++) {
- if (gxp->core_to_vd[core] == vd) {
- if (!(failed_cores & BIT(core))) {
- if (!gxp_lpm_wait_state_eq(gxp, core,
- LPM_PG_STATE)) {
- boot_state = gxp_firmware_get_boot_mode(
- gxp, core);
- if (boot_state !=
- GXP_BOOT_MODE_STATUS_SUSPEND_COMPLETED) {
- dev_err(gxp->dev,
- "Suspension request on core %u failed (status: %u)",
- core, boot_state);
- vd->state = GXP_VD_UNAVAILABLE;
- failed_cores |= BIT(core);
- hold_core_in_reset(gxp, core);
- }
- } else {
- /* Re-set PS1 as the default low power state. */
- gxp_lpm_enable_state(gxp, core,
- LPM_CG_STATE);
- }
+ virt_core = 0;
+ for (phys_core = 0; phys_core < GXP_NUM_CORES; phys_core++) {
+ uint core = select_core(vd, virt_core, phys_core);
+
+ if (!(core_list & BIT(phys_core)))
+ continue;
+ virt_core++;
+ if (failed_cores & BIT(phys_core))
+ continue;
+ if (!gxp_lpm_wait_state_eq(gxp, CORE_TO_PSM(phys_core),
+ LPM_PG_STATE)) {
+ if (!boot_state_is_suspend(gxp, vd, core,
+ &boot_state)) {
+ dev_err(gxp->dev,
+ "Suspension request on core %u failed (status: %u)",
+ phys_core, boot_state);
+ vd->state = GXP_VD_UNAVAILABLE;
+ failed_cores |= BIT(phys_core);
+ hold_core_in_reset(gxp, phys_core);
}
- gxp_dma_domain_detach_device(gxp, vd, virt_core);
- virt_core++;
+ } else {
+ /* Re-set PS1 as the default low power state. */
+ gxp_lpm_enable_state(gxp, CORE_TO_PSM(phys_core),
+ LPM_CG_STATE);
}
}
if (vd->state == GXP_VD_UNAVAILABLE) {
/* shutdown all cores if virtual device is unavailable */
- for (core = 0; core < GXP_NUM_CORES; core++)
- if (gxp->core_to_vd[core] == vd)
- gxp_pm_core_off(gxp, core);
+ for (phys_core = 0; phys_core < GXP_NUM_CORES; phys_core++)
+ if (core_list & BIT(phys_core))
+ gxp_pm_core_off(gxp, phys_core);
} else {
+ /* Save and clear all doorbells. */
+ vd_save_doorbells(vd);
vd->blk_switch_count_when_suspended =
gxp_pm_get_blk_switch_count(gxp);
vd->state = GXP_VD_SUSPENDED;
}
gxp_pm_resume_clkmux(gxp);
+out:
+ debug_dump_unlock(vd);
}
/*
@@ -471,8 +1131,8 @@ void gxp_vd_suspend(struct gxp_virtual_device *vd)
int gxp_vd_resume(struct gxp_virtual_device *vd)
{
int ret = 0;
- uint core;
- uint virt_core = 0;
+ uint phys_core, virt_core;
+ uint core_list = vd->core_list;
uint timeout;
u32 boot_state;
struct gxp_dev *gxp = vd->gxp;
@@ -480,90 +1140,112 @@ int gxp_vd_resume(struct gxp_virtual_device *vd)
uint failed_cores = 0;
lockdep_assert_held_write(&gxp->vd_semaphore);
- dev_info(gxp->dev, "Resuming VD ...\n");
+ debug_dump_lock(gxp, vd);
+ dev_info(gxp->dev, "Resuming VD vdid=%d client_id=%d...\n", vd->vdid,
+ vd->client_id);
if (vd->state != GXP_VD_SUSPENDED) {
dev_err(gxp->dev,
"Attempt to resume a virtual device which was not suspended\n");
- return -EBUSY;
+ ret = -EBUSY;
+ goto out;
+ }
+ if (!gxp_core_boot(gxp)) {
+ vd->state = GXP_VD_RUNNING;
+ goto out;
}
gxp_pm_force_clkmux_normal(gxp);
curr_blk_switch_count = gxp_pm_get_blk_switch_count(gxp);
+
+ /* Restore the doorbells state for this VD. */
+ vd_restore_doorbells(vd);
+
/*
* Start the resume process for all of this VD's cores without waiting
* for completion.
*/
- for (core = 0; core < GXP_NUM_CORES; core++) {
- if (gxp->core_to_vd[core] == vd) {
- gxp_dma_domain_attach_device(gxp, vd, virt_core, core);
- /*
- * The comparison is to check if blk_switch_count is
- * changed. If it's changed, it means the block is rebooted and
- * therefore we need to set up the hardware again.
- */
- if (vd->blk_switch_count_when_suspended != curr_blk_switch_count) {
- ret = gxp_firmware_setup_hw_after_block_off(
- gxp, core, /*verbose=*/false);
- if (ret) {
- vd->state = GXP_VD_UNAVAILABLE;
- failed_cores |= BIT(core);
- virt_core++;
- dev_err(gxp->dev, "Failed to power up core %u\n", core);
- continue;
- }
+ virt_core = 0;
+ for (phys_core = 0; phys_core < GXP_NUM_CORES; phys_core++) {
+ uint core = select_core(vd, virt_core, phys_core);
+
+ if (!(core_list & BIT(phys_core)))
+ continue;
+ /*
+ * The comparison is to check if blk_switch_count is
+ * changed. If it's changed, it means the block is rebooted and
+ * therefore we need to set up the hardware again.
+ */
+ if (vd->blk_switch_count_when_suspended !=
+ curr_blk_switch_count) {
+ ret = gxp_firmware_setup_hw_after_block_off(
+ gxp, core, phys_core,
+ /*verbose=*/false);
+ if (ret) {
+ vd->state = GXP_VD_UNAVAILABLE;
+ failed_cores |= BIT(phys_core);
+ dev_err(gxp->dev,
+ "Failed to power up core %u\n",
+ phys_core);
+ continue;
}
- /* Mark this as a resume power-up event. */
- gxp_firmware_set_boot_mode(gxp, core,
- GXP_BOOT_MODE_REQUEST_RESUME);
- /*
- * Power on the core by explicitly switching its PSM to
- * PS0 (LPM_ACTIVE_STATE).
- */
- gxp_lpm_set_state(gxp, core, LPM_ACTIVE_STATE,
- /*verbose=*/false);
- virt_core++;
}
+ /* Mark this as a resume power-up event. */
+ if (gxp_fw_data_use_per_vd_config(vd)) {
+ gxp_firmware_set_boot_status(gxp, vd, core,
+ GXP_BOOT_STATUS_NONE);
+ gxp_firmware_set_boot_mode(gxp, vd, core,
+ GXP_BOOT_MODE_RESUME);
+ } else {
+ gxp_firmware_set_boot_mode(
+ gxp, vd, core, GXP_BOOT_MODE_REQUEST_RESUME);
+ }
+ /*
+ * Power on the core by explicitly switching its PSM to
+ * PS0 (LPM_ACTIVE_STATE).
+ */
+ gxp_lpm_set_state(gxp, CORE_TO_PSM(phys_core), LPM_ACTIVE_STATE,
+ /*verbose=*/false);
+ virt_core++;
}
/* Wait for all cores to complete core resumption. */
- for (core = 0; core < GXP_NUM_CORES; core++) {
- if (gxp->core_to_vd[core] == vd) {
- if (!(failed_cores & BIT(core))) {
- /* in microseconds */
- timeout = 1000000;
- while (--timeout) {
- boot_state = gxp_firmware_get_boot_mode(
- gxp, core);
- if (boot_state ==
- GXP_BOOT_MODE_STATUS_RESUME_COMPLETED)
- break;
- udelay(1 * GXP_TIME_DELAY_FACTOR);
- }
- if (timeout == 0 &&
- boot_state !=
- GXP_BOOT_MODE_STATUS_RESUME_COMPLETED) {
- dev_err(gxp->dev,
- "Resume request on core %u failed (status: %u)",
- core, boot_state);
- ret = -EBUSY;
- vd->state = GXP_VD_UNAVAILABLE;
- failed_cores |= BIT(core);
- }
+ virt_core = 0;
+ for (phys_core = 0; phys_core < GXP_NUM_CORES; phys_core++) {
+ uint core = select_core(vd, virt_core, phys_core);
+
+ if (!(core_list & BIT(phys_core)))
+ continue;
+
+ if (!(failed_cores & BIT(phys_core))) {
+ /* in microseconds */
+ timeout = 1000000;
+ while (--timeout) {
+ if (boot_state_is_active(gxp, vd, core,
+ &boot_state))
+ break;
+ udelay(1 * GXP_TIME_DELAY_FACTOR);
+ }
+ if (timeout == 0) {
+ dev_err(gxp->dev,
+ "Resume request on core %u failed (status: %u)",
+ phys_core, boot_state);
+ ret = -EBUSY;
+ vd->state = GXP_VD_UNAVAILABLE;
+ failed_cores |= BIT(phys_core);
}
}
+ virt_core++;
}
if (vd->state == GXP_VD_UNAVAILABLE) {
/* shutdown all cores if virtual device is unavailable */
- virt_core = 0;
- for (core = 0; core < GXP_NUM_CORES; core++) {
- if (gxp->core_to_vd[core] == vd) {
- gxp_dma_domain_detach_device(gxp, vd, virt_core);
- gxp_pm_core_off(gxp, core);
- virt_core++;
- }
+ for (phys_core = 0; phys_core < GXP_NUM_CORES; phys_core++) {
+ if (core_list & BIT(phys_core))
+ gxp_pm_core_off(gxp, phys_core);
}
} else {
vd->state = GXP_VD_RUNNING;
}
gxp_pm_resume_clkmux(gxp);
+out:
+ debug_dump_unlock(vd);
return ret;
}
@@ -575,11 +1257,9 @@ int gxp_vd_virt_core_to_phys_core(struct gxp_virtual_device *vd, u16 virt_core)
uint virt_core_index = 0;
for (phys_core = 0; phys_core < GXP_NUM_CORES; phys_core++) {
- if (gxp->core_to_vd[phys_core] == vd) {
- if (virt_core_index == virt_core) {
- /* Found virtual core */
+ if (vd->core_list & BIT(phys_core)) {
+ if (virt_core_index == virt_core)
return phys_core;
- }
virt_core_index++;
}
@@ -589,64 +1269,7 @@ int gxp_vd_virt_core_to_phys_core(struct gxp_virtual_device *vd, u16 virt_core)
return -EINVAL;
}
-/* Caller must have locked `gxp->vd_semaphore` for reading */
-uint gxp_vd_virt_core_list_to_phys_core_list(struct gxp_virtual_device *vd,
- u16 virt_core_list)
-{
- uint phys_core_list = 0;
- uint virt_core = 0;
- int phys_core;
-
- while (virt_core_list) {
- /*
- * Get the next virt core by finding the index of the first
- * set bit in the core list.
- *
- * Subtract 1 since `ffs()` returns a 1-based index. Since
- * virt_core_list cannot be 0 at this point, no need to worry
- * about wrap-around.
- */
- virt_core = ffs(virt_core_list) - 1;
-
- /* Any invalid virt cores invalidate the whole list */
- phys_core = gxp_vd_virt_core_to_phys_core(vd, virt_core);
- if (phys_core < 0)
- return 0;
-
- phys_core_list |= BIT(phys_core);
- virt_core_list &= ~BIT(virt_core);
- }
-
- return phys_core_list;
-}
-
-/* Caller must have locked `gxp->vd_semaphore` for reading */
-int gxp_vd_phys_core_to_virt_core(struct gxp_virtual_device *vd,
- u16 phys_core)
-{
- struct gxp_dev *gxp = vd->gxp;
- int virt_core = 0;
- uint core;
-
- if (gxp->core_to_vd[phys_core] != vd) {
- virt_core = -EINVAL;
- goto out;
- }
-
- /*
- * A core's virtual core ID == the number of physical cores in the same
- * virtual device with a lower physical core ID than its own.
- */
- for (core = 0; core < phys_core; core++) {
- if (gxp->core_to_vd[core] == vd)
- virt_core++;
- }
-out:
- return virt_core;
-}
-
-int gxp_vd_mapping_store(struct gxp_virtual_device *vd,
- struct gxp_mapping *map)
+int gxp_vd_mapping_store(struct gxp_virtual_device *vd, struct gxp_mapping *map)
{
struct rb_node **link;
struct rb_node *parent = NULL;
@@ -785,3 +1408,154 @@ struct gxp_mapping *gxp_vd_mapping_search_host(struct gxp_virtual_device *vd,
return NULL;
}
+
+bool gxp_vd_has_and_use_credit(struct gxp_virtual_device *vd)
+{
+ bool ret = true;
+ unsigned long flags;
+
+ spin_lock_irqsave(&vd->credit_lock, flags);
+ if (vd->credit == 0)
+ ret = false;
+ else
+ vd->credit--;
+ spin_unlock_irqrestore(&vd->credit_lock, flags);
+
+ return ret;
+}
+
+void gxp_vd_release_credit(struct gxp_virtual_device *vd)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&vd->credit_lock, flags);
+ if (unlikely(vd->credit >= GXP_COMMAND_CREDIT_PER_VD))
+ dev_err(vd->gxp->dev, "unbalanced VD credit");
+ else
+ vd->credit++;
+ spin_unlock_irqrestore(&vd->credit_lock, flags);
+}
+
+void gxp_vd_put(struct gxp_virtual_device *vd)
+{
+ if (!vd)
+ return;
+ if (refcount_dec_and_test(&vd->refcount))
+ kfree(vd);
+}
+
+void gxp_vd_invalidate_with_client_id(struct gxp_dev *gxp, int client_id,
+ uint core_list)
+{
+ struct gxp_client *client = NULL, *c;
+ uint core;
+
+ /*
+ * Prevent @gxp->client_list is being changed while handling the crash.
+ * The user cannot open or close an FD until this function releases the lock.
+ */
+ mutex_lock(&gxp->client_list_lock);
+
+ /*
+ * Find corresponding vd with client_id.
+ * If it holds a block wakelock, we should discard all pending/unconsumed UCI responses
+ * and change the state of the vd to GXP_VD_UNAVAILABLE.
+ */
+ list_for_each_entry (c, &gxp->client_list, list_entry) {
+ down_write(&c->semaphore);
+ down_write(&gxp->vd_semaphore);
+ if (c->vd && c->vd->client_id == client_id) {
+ client = c;
+ break;
+ }
+ up_write(&gxp->vd_semaphore);
+ up_write(&c->semaphore);
+ }
+
+ mutex_unlock(&gxp->client_list_lock);
+
+ if (!client) {
+ dev_err(gxp->dev, "Failed to find a VD, client_id=%d",
+ client_id);
+ /*
+ * Invalidate all debug dump segments if debug dump
+ * is enabled and core_list is not empty.
+ */
+ if (!gxp_debug_dump_is_enabled() || !core_list)
+ return;
+ for (core = 0; core < GXP_NUM_CORES; core++) {
+ if (!(BIT(core) & core_list))
+ continue;
+ mutex_lock(&gxp->debug_dump_mgr->debug_dump_lock);
+ gxp_debug_dump_invalidate_segments(gxp, core);
+ mutex_unlock(&gxp->debug_dump_mgr->debug_dump_lock);
+ }
+ return;
+ }
+
+ gxp_vd_invalidate(gxp, client->vd);
+ gxp_vd_generate_debug_dump(gxp, client->vd, core_list);
+
+ up_write(&gxp->vd_semaphore);
+ up_write(&client->semaphore);
+}
+
+void gxp_vd_invalidate(struct gxp_dev *gxp, struct gxp_virtual_device *vd)
+{
+ lockdep_assert_held_write(&gxp->vd_semaphore);
+
+ dev_err(gxp->dev, "Invalidate a VD, VDID=%d, client_id=%d", vd->vdid,
+ vd->client_id);
+
+ if (vd->state != GXP_VD_UNAVAILABLE) {
+ if (gxp->mailbox_mgr->release_unconsumed_async_resps)
+ gxp->mailbox_mgr->release_unconsumed_async_resps(vd);
+
+ vd->state = GXP_VD_UNAVAILABLE;
+
+ if (vd->invalidate_eventfd)
+ gxp_eventfd_signal(vd->invalidate_eventfd);
+ } else {
+ dev_dbg(gxp->dev, "This VD is already invalidated");
+ }
+}
+
+void gxp_vd_generate_debug_dump(struct gxp_dev *gxp,
+ struct gxp_virtual_device *vd, uint core_list)
+{
+ int ret;
+
+ if (!gxp_debug_dump_is_enabled() || !core_list)
+ return;
+
+ lockdep_assert_held_write(&gxp->vd_semaphore);
+
+ /*
+ * We should increase the refcount of @vd because @gxp->vd_semaphore will be
+ * released below and the client can release it asynchronously.
+ */
+ vd = gxp_vd_get(vd);
+
+ /*
+ * Release @gxp->vd_semaphore before generating a debug dump and hold it
+ * again after completing debug dump to not block other virtual devices
+ * proceeding their work.
+ */
+ up_write(&gxp->vd_semaphore);
+ mutex_lock(&vd->debug_dump_lock);
+
+ /*
+ * Process debug dump if its enabled and core_list is not empty.
+ * Keep on hold the client lock while processing the dumps. vd
+ * lock would be taken and released inside the debug dump
+ * implementation logic ahead.
+ */
+ ret = gxp_debug_dump_process_dump_mcu_mode(gxp, core_list, vd);
+ if (ret)
+ dev_err(gxp->dev, "debug dump processing failed (ret=%d).\n",
+ ret);
+
+ mutex_unlock(&vd->debug_dump_lock);
+ down_write(&gxp->vd_semaphore);
+ gxp_vd_put(vd);
+}
diff --git a/gxp-vd.h b/gxp-vd.h
index feab79f..e0bd5ce 100644
--- a/gxp-vd.h
+++ b/gxp-vd.h
@@ -2,51 +2,80 @@
/*
* GXP virtual device manager.
*
- * Copyright (C) 2021 Google LLC
+ * Copyright (C) 2021-2022 Google LLC
*/
+
#ifndef __GXP_VD_H__
#define __GXP_VD_H__
#include <linux/iommu.h>
#include <linux/list.h>
+#include <linux/mutex.h>
#include <linux/rbtree.h>
+#include <linux/refcount.h>
#include <linux/rwsem.h>
+#include <linux/scatterlist.h>
#include <linux/spinlock.h>
#include <linux/types.h>
#include <linux/wait.h>
+#include <gcip/gcip-image-config.h>
+
+#include "gxp-host-device-structs.h"
#include "gxp-internal.h"
#include "gxp-mapping.h"
+/* TODO(b/259192112): set to 8 once the runtime has added the credit limit. */
+#define GXP_COMMAND_CREDIT_PER_VD 256
+
+/* A special client ID for secure workloads pre-agreed with MCU firmware. */
+#define SECURE_CLIENT_ID (3 << 10)
+
struct mailbox_resp_queue {
- /* Queue of `struct gxp_async_response`s */
- struct list_head queue;
+ /* Queue of waiting async responses */
+ struct list_head wait_queue;
+ /* Queue of arrived async responses */
+ struct list_head dest_queue;
/* Lock protecting access to the `queue` */
spinlock_t lock;
/* Waitqueue to wait on if the queue is empty */
wait_queue_head_t waitq;
+ /*
+ * If true, the user cannot send requests anymore.
+ * This must be protected by @lock.
+ */
+ bool wait_queue_closed;
};
enum gxp_virtual_device_state {
- GXP_VD_OFF = 0,
- GXP_VD_RUNNING = 1,
- GXP_VD_SUSPENDED = 2,
+ GXP_VD_OFF,
+ GXP_VD_READY,
+ GXP_VD_RUNNING,
+ GXP_VD_SUSPENDED,
/*
* If the virtual device is in the unavailable state, it won't be changed
* back no matter what we do.
* Note: this state will only be set on suspend/resume failure.
*/
- GXP_VD_UNAVAILABLE = 3,
+ GXP_VD_UNAVAILABLE,
+ /*
+ * gxp_vd_release() has been called. VD with this state means it's
+ * waiting for the last reference to be put(). All fields in VD is
+ * invalid in this state.
+ */
+ GXP_VD_RELEASED,
};
struct gxp_virtual_device {
struct gxp_dev *gxp;
uint num_cores;
void *fw_app;
- struct iommu_domain **core_domains;
+ struct gxp_iommu_domain *domain;
struct mailbox_resp_queue *mailbox_resp_queues;
struct rb_root mappings_root;
struct rw_semaphore mappings_semaphore;
+ /* Used to save doorbell state on VD resume. */
+ uint doorbells_state[GXP_NUM_DOORBELLS_PER_VD];
enum gxp_virtual_device_state state;
/*
* Record the gxp->power_mgr->blk_switch_count when the vd was
@@ -55,18 +84,87 @@ struct gxp_virtual_device {
* process.
*/
u64 blk_switch_count_when_suspended;
+ /*
+ * @domain of each virtual device will map a slice of shared buffer. It stores which index
+ * of slice is used by this VD.
+ */
+ int slice_index;
+ /*
+ * The SG table that holds the firmware RW data region.
+ */
+ struct sg_table *rwdata_sgt[GXP_NUM_CORES];
+ /*
+ * The SG table that holds the regions specified in the image config's
+ * non-secure IOMMU mappings.
+ */
+ struct {
+ dma_addr_t daddr;
+ struct sg_table *sgt;
+ } ns_regions[GCIP_IMG_CFG_MAX_NS_IOMMU_MAPPINGS];
+ /* The firmware size specified in image config. */
+ u32 fw_ro_size;
+ /*
+ * The config regions specified in image config.
+ * core_cfg's size should be a multiple of GXP_NUM_CORES.
+ */
+ struct gxp_mapped_resource core_cfg, vd_cfg, sys_cfg;
+ uint core_list;
+ /*
+ * The ID of DSP client. -1 if it is not allocated.
+ * This is allocated by the DSP kernel driver, but will be set to this variable only when
+ * the client of this vd acquires the block wakelock successfully. (i.e, after the kernel
+ * driver allocates a virtual mailbox with the firmware side successfully by sending the
+ * `allocate_vmbox` KCI command.)
+ */
+ int client_id;
+ /*
+ * The ID of TPU client. -1 if it is not allocated.
+ * This ID will be fetched from the TPU kernel driver.
+ */
+ int tpu_client_id;
+ /*
+ * Protects credit. Use a spin lock because the critical section of
+ * using @credit is pretty small.
+ */
+ spinlock_t credit_lock;
+ /*
+ * Credits for sending mailbox commands. It's initialized as
+ * GXP_COMMAND_CREDIT_PER_VD. The value is decreased on sending
+ * mailbox commands; increased on receiving mailbox responses.
+ * Mailbox command requests are rejected when this value reaches 0.
+ *
+ * Only used in MCU mode.
+ */
+ uint credit;
+ /* Whether it's the first time allocating a VMBox for this VD. */
+ bool first_open;
+ bool is_secure;
+ refcount_t refcount;
+ /* A constant ID assigned after VD is allocated. For debug only. */
+ int vdid;
+ struct gcip_image_config_parser cfg_parser;
+ /* The config version specified in firmware's image config. */
+ u32 config_version;
+ /* Protects @dma_fence_list. */
+ struct mutex fence_list_lock;
+ /* List of GXP DMA fences owned by this VD. */
+ struct list_head gxp_fence_list;
+ /* Protects changing the state of vd while generating a debug dump. */
+ struct mutex debug_dump_lock;
+ /* An eventfd which will be triggered when this vd is invalidated. */
+ struct gxp_eventfd *invalidate_eventfd;
+ /*
+ * If true, the MCU FW communicating with this VD has been crashed and it must not work
+ * with any MCU FW anymore regardless of its state.
+ */
+ bool mcu_crashed;
};
/*
- * TODO(b/193180931) cleanup the relationship between the internal GXP modules.
- * For example, whether or not gxp_vd owns the gxp_fw module, and if so, if
- * other modules are expected to access the gxp_fw directly or only via gxp_vd.
- */
-/*
* Initializes the device management subsystem and allocates resources for it.
* This is expected to be called once per driver lifecycle.
*/
-int gxp_vd_init(struct gxp_dev *gxp);
+void gxp_vd_init(struct gxp_dev *gxp);
/*
* Tears down the device management subsystem.
@@ -79,37 +177,56 @@ void gxp_vd_destroy(struct gxp_dev *gxp);
* @gxp: The GXP device the virtual device will belong to
* @requested_cores: The number of cores the virtual device will have
*
+ * The state of VD is initialized to GXP_VD_OFF.
+ *
+ * The caller must have locked gxp->vd_semaphore for writing.
+ *
* Return: The virtual address of the virtual device or an ERR_PTR on failure
* * -EINVAL - The number of requested cores was invalid
* * -ENOMEM - Unable to allocate the virtual device
- * * -EBUSY - Not enough iommu domains available
+ * * -EBUSY - Not enough iommu domains available or insufficient physical
+ * cores to be assigned to @vd
+ * * -ENOSPC - There is no more available shared slices
*/
-struct gxp_virtual_device *gxp_vd_allocate(struct gxp_dev *gxp, u16 requested_cores);
+struct gxp_virtual_device *gxp_vd_allocate(struct gxp_dev *gxp,
+ u16 requested_cores);
/**
- * gxp_vd_release() - Cleanup and free a struct gxp_virtual_device
+ * gxp_vd_release() - Cleanup a struct gxp_virtual_device
* @vd: The virtual device to be released
*
+ * The caller must have locked gxp->vd_semaphore for writing.
+ *
* A virtual device must be stopped before it can be released.
+ *
+ * If @vd's reference count is 1 before this call, this function frees @vd.
+ * Otherwise @vd's state is set to GXP_VD_RELEASED.
*/
void gxp_vd_release(struct gxp_virtual_device *vd);
/**
- * gxp_vd_start() - Run a virtual device on physical cores
- * @vd: The virtual device to start
+ * gxp_vd_run() - Run a virtual device on physical cores
+ * @vd: The virtual device to run
+ *
+ * The state of @vd should be GXP_VD_OFF or GXP_VD_READY before calling this
+ * function. If this function runs successfully, the state becomes
+ * GXP_VD_RUNNING. Otherwise, it would be GXP_VD_UNAVAILABLE.
*
* The caller must have locked gxp->vd_semaphore for writing.
*
* Return:
- * * 0 - Success
- * * -EBUSY - Insufficient physical cores were free to start @vd
+ * * 0 - Success
+ * * -EINVAL - The VD is not in GXP_VD_READY state
+ * * Otherwise - Errno returned by firmware running
*/
-int gxp_vd_start(struct gxp_virtual_device *vd);
+int gxp_vd_run(struct gxp_virtual_device *vd);
/**
- * gxp_vd_stop() - Stop a running virtual device and free up physical cores
+ * gxp_vd_stop() - Stop a running virtual device
* @vd: The virtual device to stop
*
+ * The state of @vd will be GXP_VD_OFF.
+ *
* The caller must have locked gxp->vd_semaphore for writing.
*/
void gxp_vd_stop(struct gxp_virtual_device *vd);
@@ -123,25 +240,6 @@ void gxp_vd_stop(struct gxp_virtual_device *vd);
*/
int gxp_vd_virt_core_to_phys_core(struct gxp_virtual_device *vd, u16 virt_core);
-/*
- * Converts a bitfield of virtual core IDs to a bitfield of physical core IDs.
- *
- * If the virtual list contains any invalid IDs, the entire physical ID list
- * will be considered invalid and this function will return 0.
- *
- * The caller must have locked gxp->vd_semaphore for reading.
- */
-uint gxp_vd_virt_core_list_to_phys_core_list(struct gxp_virtual_device *vd,
- u16 virt_core_list);
-
-/*
- * Returns the virtual core number assigned the phys_core, inside of this
- * virtual device or -EINVAL if this core is not part of this virtual device.
- *
- * The caller must have locked gxp->vd_semaphore for reading.
- */
-int gxp_vd_phys_core_to_virt_core(struct gxp_virtual_device *vd, u16 phys_core);
-
/**
* gxp_vd_mapping_store() - Store a mapping in a virtual device's records
* @vd: The virtual device @map was created for and will be stored in
@@ -211,6 +309,10 @@ struct gxp_mapping *gxp_vd_mapping_search_host(struct gxp_virtual_device *vd,
* gxp_vd_suspend() - Suspend a running virtual device
* @vd: The virtual device to suspend
*
+ * The state of @vd should be GXP_VD_RUNNING before calling this function.
+ * If the suspension runs successfully on all cores, the state becomes
+ * GXP_VD_SUSPENDED. Otherwise, it would be GXP_VD_UNAVAILABLE.
+ *
* The caller must have locked gxp->vd_semaphore for writing.
*/
void gxp_vd_suspend(struct gxp_virtual_device *vd);
@@ -219,6 +321,10 @@ void gxp_vd_suspend(struct gxp_virtual_device *vd);
* gxp_vd_resume() - Resume a suspended virtual device
* @vd: The virtual device to resume
*
+ * The state of @vd should be GXP_VD_SUSPENDED before calling this function.
+ * If the resumption runs successfully on all cores, the state becomes
+ * GXP_VD_RUNNING. Otherwise, it would be GXP_VD_UNAVAILABLE.
+ *
* The caller must have locked gxp->vd_semaphore for writing.
*
* Return:
@@ -227,4 +333,116 @@ void gxp_vd_suspend(struct gxp_virtual_device *vd);
*/
int gxp_vd_resume(struct gxp_virtual_device *vd);
+/**
+ * gxp_vd_block_ready() - This is called after the block wakelock is acquired.
+ * Does required setup for serving VD such as attaching its IOMMU domain.
+ *
+ * @vd: The virtual device to prepare the resources
+ *
+ * The state of @vd should be GXP_VD_OFF before calling this function.
+ * If this function runs successfully, the state becomes GXP_VD_READY.
+ *
+ * The caller must have locked gxp->vd_semaphore for writing.
+ *
+ * Return:
+ * * 0 - Success
+ * * -EINVAL - The VD is not in GXP_VD_OFF state
+ * * Otherwise - Errno returned by IOMMU domain attachment
+ */
+int gxp_vd_block_ready(struct gxp_virtual_device *vd);
+
+/**
+ * gxp_vd_block_unready() - This is called before the block wakelock is going to be released.
+ *
+ * @vd: The virtual device to release the resources
+ *
+ * This function must be called only when the client holds the block wakelock and allocated a
+ * virtual device. It doesn't have a dependency on the state of @vd, but also doesn't change the
+ * state in normal situation. However, if an unexpected error happens, the state can be changed
+ * to GXP_VD_UNAVAILABLE.
+ *
+ * The caller must have locked gxp->vd_semaphore for writing.
+ */
+void gxp_vd_block_unready(struct gxp_virtual_device *vd);
+
+/*
+ * Checks whether the virtual device has a positive credit, and use 1 credit when
+ * yes.
+ *
+ * Returns true when there is enough credit, false otherwise.
+ */
+bool gxp_vd_has_and_use_credit(struct gxp_virtual_device *vd);
+/*
+ * Releases the credit.
+ */
+void gxp_vd_release_credit(struct gxp_virtual_device *vd);
+
+/* Increases reference count of @vd by one and returns @vd. */
+static inline struct gxp_virtual_device *
+gxp_vd_get(struct gxp_virtual_device *vd)
+{
+ WARN_ON_ONCE(!refcount_inc_not_zero(&vd->refcount));
+ return vd;
+}
+
+/*
+ * Decreases reference count of @vd by one.
+ *
+ * If @vd->refcount becomes 0, @vd will be freed.
+ */
+void gxp_vd_put(struct gxp_virtual_device *vd);
+
+/*
+ * Change the status of the vd of @client_id to GXP_VD_UNAVAILABLE.
+ * Internally, it will discard all pending/unconsumed user commands and call the
+ * `gxp_vd_block_unready` function.
+ *
+ * This function will be called when the `CLIENT_FATAL_ERROR_NOTIFY` RKCI has been sent from the
+ * firmware side.
+ *
+ * @gxp: The GXP device to obtain the handler for
+ * @client_id: client_id of the crashed vd.
+ * @core_list: A bitfield enumerating the physical cores on which crash is reported from firmware.
+ */
+void gxp_vd_invalidate_with_client_id(struct gxp_dev *gxp, int client_id,
+ uint core_list);
+
+/*
+ * Changes the status of the @vd to GXP_VD_UNAVAILABLE.
+ * Internally, it will discard all pending/unconsumed user commands.
+ *
+ * This function will be called when some unexpected errors happened and cannot proceed requests
+ * anymore with this @vd.
+ *
+ * The caller must have locked gxp->vd_semaphore for writing.
+ *
+ * @gxp: The GXP device to obtain the handler for.
+ * @vd: The virtual device to be invaliated.
+ */
+void gxp_vd_invalidate(struct gxp_dev *gxp, struct gxp_virtual_device *vd);
+
+/*
+ * Generates a debug dump of @vd which utilizes @core_list cores.
+ *
+ * This function is usually called in the MCU mode that the kernel driver cannot decide which cores
+ * will be used by @vd.
+ *
+ * The caller must have locked gxp->vd_semaphore for writing.
+ *
+ * @gxp: The GXP device to obtain the handler for.
+ * @vd: The virtual device to be dumped.
+ * @core_list: A bitfield enumerating the physical cores on which crash is reported from firmware.
+ */
+void gxp_vd_generate_debug_dump(struct gxp_dev *gxp,
+ struct gxp_virtual_device *vd, uint core_list);
+
+/*
+ * An ID between 0~GXP_NUM_CORES-1 and is unique to each VD.
+ * Only used in direct mode.
+ */
+static inline uint gxp_vd_hw_slot_id(struct gxp_virtual_device *vd)
+{
+ return ffs(vd->core_list) - 1;
+}
+
#endif /* __GXP_VD_H__ */
diff --git a/gxp-wakelock.c b/gxp-wakelock.c
deleted file mode 100644
index 9110a3d..0000000
--- a/gxp-wakelock.c
+++ /dev/null
@@ -1,154 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GXP wakelock support
- *
- * Copyright (C) 2022 Google LLC
- */
-
-#include "gxp-client.h"
-#include "gxp-dma.h"
-#include "gxp-pm.h"
-#include "gxp-wakelock.h"
-
-int gxp_wakelock_init(struct gxp_dev *gxp)
-{
- struct gxp_wakelock_manager *mgr;
-
- mgr = devm_kzalloc(gxp->dev, sizeof(*mgr), GFP_KERNEL);
- if (!mgr)
- return -ENOMEM;
-
- mutex_init(&mgr->lock);
-
- gxp->wakelock_mgr = mgr;
-
- return 0;
-}
-
-int gxp_wakelock_acquire(struct gxp_dev *gxp)
-{
- struct gxp_wakelock_manager *mgr = gxp->wakelock_mgr;
- int ret = 0;
-
- mutex_lock(&mgr->lock);
-
- if (mgr->suspended) {
- /*
- * Don't allow a new client to obtain a wakelock, powering up
- * BLK_AUR, when the system is going to sleep.
- */
- dev_warn(gxp->dev,
- "Attempt to obtain wakelock while suspending.\n");
- ret = -EAGAIN;
- goto out;
- }
-
- if (!mgr->count++) {
- ret = gxp_pm_blk_on(gxp);
- if (ret) {
- dev_err(gxp->dev,
- "Failed to power on BLK_AUR (ret=%d, client count=%u)\n",
- ret, mgr->count);
- goto err_blk_on;
- }
- }
-
-out:
- mutex_unlock(&mgr->lock);
-
- return ret;
-
-err_blk_on:
- mgr->count--;
- mutex_unlock(&mgr->lock);
- return ret;
-}
-
-void gxp_wakelock_release(struct gxp_dev *gxp)
-{
- struct gxp_wakelock_manager *mgr = gxp->wakelock_mgr;
- int ret = 0;
-
- mutex_lock(&mgr->lock);
-
- if (!mgr->count) {
- dev_err(gxp->dev,
- "Attempt to release wakelock with none held.\n");
- goto out;
- }
-
- if (!--mgr->count) {
- ret = gxp_pm_blk_off(gxp);
- if (ret)
- dev_err(gxp->dev,
- "Failed to power down BLK_AUR (ret=%d, client count=%u)\n",
- ret, mgr->count);
- }
-
-out:
- mutex_unlock(&mgr->lock);
-}
-
-int gxp_wakelock_suspend(struct gxp_dev *gxp)
-{
- struct gxp_wakelock_manager *mgr = gxp->wakelock_mgr;
- int ret;
- struct gxp_client *client;
-
- if (!mutex_trylock(&mgr->lock))
- return -EAGAIN;
-
- /* Can't suspend if there are any active clients */
- mgr->suspended = mgr->count == 0;
- ret = mgr->suspended ? 0 : -EAGAIN;
-
- /* Suspend successful. Can exit now. */
- if (!ret)
- goto out;
-
- /* Log clients currently holding a wakelock */
- if (!mutex_trylock(&gxp->client_list_lock)) {
- dev_warn_ratelimited(
- gxp->dev,
- "Unable to get client list lock on suspend failure\n");
- goto out;
- }
-
- list_for_each_entry(client, &gxp->client_list, list_entry) {
- if (!down_read_trylock(&client->semaphore)) {
- dev_warn_ratelimited(
- gxp->dev,
- "Unable to acquire client lock (tgid=%d pid=%d)\n",
- client->tgid, client->pid);
- continue;
- }
-
- if (client->has_block_wakelock)
- dev_warn_ratelimited(
- gxp->dev,
- "Cannot suspend with client holding wakelock (tgid=%d pid=%d)\n",
- client->tgid, client->pid);
-
- up_read(&client->semaphore);
- }
-
- mutex_unlock(&gxp->client_list_lock);
-
-out:
- mutex_unlock(&mgr->lock);
-
- return ret;
-}
-
-int gxp_wakelock_resume(struct gxp_dev *gxp)
-{
- struct gxp_wakelock_manager *mgr = gxp->wakelock_mgr;
-
- mutex_lock(&mgr->lock);
-
- mgr->suspended = false;
-
- mutex_unlock(&mgr->lock);
-
- return 0;
-}
diff --git a/gxp-wakelock.h b/gxp-wakelock.h
deleted file mode 100644
index ff76325..0000000
--- a/gxp-wakelock.h
+++ /dev/null
@@ -1,73 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * GXP wakelock support
- *
- * Copyright (C) 2022 Google LLC
- */
-#ifndef __GXP_WAKELOCK_H__
-#define __GXP_WAKELOCK_H__
-
-#include "gxp-internal.h"
-#include "gxp.h"
-
-struct gxp_wakelock_manager {
- /* Protects count and suspended */
- struct mutex lock;
- uint count;
- bool suspended;
-};
-
-/**
- * gxp_telemetry_init() - Initialize wakelock support
- * @gxp: The GXP device to initialize wakelock support for
- *
- * Return:
- * * 0 - Success
- * * -ENOMEM - Insufficient memory is available to initialize support
- */
-int gxp_wakelock_init(struct gxp_dev *gxp);
-
-/**
- * gxp_wakelock_acquire() - Increment the GXP wakelock counter
- * @gxp: The GXP device to increment the wakelock counter for
- *
- * If the wakelock counter transitions from 0 to 1, this will result in BLK_AUR
- * being powered on.
- *
- * Return:
- * * 0 - Success
- * * -EAGAIN - The system is suspending and BLK_AUR cannot be powered on
- * * Other - An attempt to power on BLK_AUR failed
- */
-int gxp_wakelock_acquire(struct gxp_dev *gxp);
-
-/**
- * gxp_wakelock_release() - Decrement the GXP wakelock counter
- * @gxp: The GXP device to decrement the wakelock counter for
- *
- * If the wakelock counter transitions from 1 to 0, this will result in BLK_AUR
- * being powered off. In the event BLK_AUR cannot be powered off, a message
- * will be logged, but the wakelock will still be released.
- */
-void gxp_wakelock_release(struct gxp_dev *gxp);
-
-/**
- * gxp_wakelock_suspend() - Check if the wakelock will allow a system suspend
- * @gxp: The GXP device to check the wakelock of
- *
- * Return:
- * * 0 - The wakelock has been suspended and is ready for system suspend
- * * -EAGAIN - The wakelock is held, and system suspend should be aborted
- */
-int gxp_wakelock_suspend(struct gxp_dev *gxp);
-
-/**
- * gxp_wakelock_resume() - Notify the wakelock that system suspend has exited
- * @gxp: The GXP device to notify the wakelock of
- *
- * Return:
- * * 0 - The wakelock is ready to be acquired again
- */
-int gxp_wakelock_resume(struct gxp_dev *gxp);
-
-#endif /* __GXP_WAKELOCK_H__ */
diff --git a/gxp.h b/gxp.h
index a7ce8ee..a754a85 100644
--- a/gxp.h
+++ b/gxp.h
@@ -2,8 +2,9 @@
/*
* GXP kernel-userspace interface definitions.
*
- * Copyright (C) 2020 Google LLC
+ * Copyright (C) 2020-2022 Google LLC
*/
+
#ifndef __GXP_H__
#define __GXP_H__
@@ -11,281 +12,56 @@
#include <linux/types.h>
/* Interface Version */
-#define GXP_INTERFACE_VERSION_MAJOR 1
-#define GXP_INTERFACE_VERSION_MINOR 3
-#define GXP_INTERFACE_VERSION_BUILD 0
+#define GXP_INTERFACE_VERSION_MAJOR 1
+#define GXP_INTERFACE_VERSION_MINOR 11
+#define GXP_INTERFACE_VERSION_BUILD 0
/*
- * mmap offsets for logging and tracing buffers
+ * Legacy mmap offsets for core logging and tracing buffers
* Requested size will be divided evenly among all cores. The whole buffer
* must be page-aligned, and the size of each core's buffer must be a multiple
* of PAGE_SIZE.
*/
-#define GXP_MMAP_LOG_BUFFER_OFFSET 0x10000
-#define GXP_MMAP_TRACE_BUFFER_OFFSET 0x20000
-
-#define GXP_IOCTL_BASE 0xEE
-
-#define GXP_INTERFACE_VERSION_BUILD_BUFFER_SIZE 64
-struct gxp_interface_version_ioctl {
- /*
- * Driver major version number.
- * Increments whenever a non-backwards compatible change to the
- * interface defined in this file changes.
- */
- __u16 version_major;
- /*
- * Driver minor version number.
- * Increments whenever a backwards compatible change, such as the
- * addition of a new IOCTL, is made to the interface defined in this
- * file.
- */
- __u16 version_minor;
- /*
- * Driver build identifier.
- * NULL-terminated string of the git hash of the commit the driver was
- * built from. If the driver had uncommitted changes the string will
- * end with "-dirty".
- */
- char version_build[GXP_INTERFACE_VERSION_BUILD_BUFFER_SIZE];
-};
-
-/* Query the driver's interface version. */
-#define GXP_GET_INTERFACE_VERSION \
- _IOR(GXP_IOCTL_BASE, 26, struct gxp_interface_version_ioctl)
+#define GXP_MMAP_CORE_LOG_BUFFER_OFFSET_LEGACY 0x10000
+#define GXP_MMAP_CORE_TRACE_BUFFER_OFFSET_LEGACY 0x20000
-struct gxp_specs_ioctl {
- /* Maximum number of cores that can be allocated to a virtual device */
- __u8 core_count;
- /* Deprecated fields that should be ignored */
- __u16 reserved_0;
- __u16 reserved_1;
- __u16 reserved_2;
- __u8 reserved_3;
- /*
- * Amount of "tightly-coupled memory" or TCM available to each core.
- * The value returned will be in kB, or 0 if the value was not
- * specified in the device-tree.
- */
- __u32 memory_per_core;
-};
+/* mmap offsets for MCU logging and tracing buffers */
+#define GXP_MMAP_MCU_LOG_BUFFER_OFFSET 0x30000
+#define GXP_MMAP_MCU_TRACE_BUFFER_OFFSET 0x40000
-/* Query system specs. */
-#define GXP_GET_SPECS \
- _IOR(GXP_IOCTL_BASE, 5, struct gxp_specs_ioctl)
+/* mmap offsets for core logging and tracing buffers */
+#define GXP_MMAP_CORE_LOG_BUFFER_OFFSET 0x50000
+#define GXP_MMAP_CORE_TRACE_BUFFER_OFFSET 0x60000
-struct gxp_virtual_device_ioctl {
- /*
- * Input:
- * The number of cores requested for the virtual device.
- */
- __u8 core_count;
- /*
- * Input:
- * The number of threads requested per core.
- */
- __u16 threads_per_core;
- /*
- * Input:
- * The amount of memory requested per core, in kB.
- */
- __u32 memory_per_core;
- /*
- * Output:
- * The ID assigned to the virtual device and shared with its cores.
- */
- __u32 vdid;
-};
-
-/* Allocate virtual device. */
-#define GXP_ALLOCATE_VIRTUAL_DEVICE \
- _IOWR(GXP_IOCTL_BASE, 6, struct gxp_virtual_device_ioctl)
-
-/*
- * Components for which a client may hold a wakelock.
- * Acquired by passing these values as `components_to_wake` in
- * `struct gxp_acquire_wakelock_ioctl` to GXP_ACQUIRE_WAKELOCK and released by
- * passing these values directly as the argument to GXP_RELEASE_WAKELOCK.
- *
- * Multiple wakelocks can be acquired or released at once by passing multiple
- * components, ORed together.
- */
-#define WAKELOCK_BLOCK (1 << 0)
-#define WAKELOCK_VIRTUAL_DEVICE (1 << 1)
-
-/*
- * DSP subsystem Power state values for use as `gxp_power_state` in
- * `struct gxp_acquire_wakelock_ioctl`.
- * Note: GXP_POWER_STATE_READY is a deprecated state. The way to achieve
- * original state is to request GXP_POWER_STATE_UUD with setting
- * GXP_POWER_LOW_FREQ_CLKMUX flag. Requesting GXP_POWER_STATE_READY is treated
- * as identical to GXP_POWER_STATE_UUD.
- */
-#define GXP_POWER_STATE_OFF 0
-#define GXP_POWER_STATE_UUD 1
-#define GXP_POWER_STATE_SUD 2
-#define GXP_POWER_STATE_UD 3
-#define GXP_POWER_STATE_NOM 4
-#define GXP_POWER_STATE_READY 5
-#define GXP_POWER_STATE_UUD_PLUS 6
-#define GXP_POWER_STATE_SUD_PLUS 7
-#define GXP_POWER_STATE_UD_PLUS 8
-#define GXP_NUM_POWER_STATES (GXP_POWER_STATE_UD_PLUS + 1)
-
-/*
- * Memory interface power state values for use as `memory_power_state` in
- * `struct gxp_acquire_wakelock_ioctl`.
- */
-#define MEMORY_POWER_STATE_UNDEFINED 0
-#define MEMORY_POWER_STATE_MIN 1
-#define MEMORY_POWER_STATE_VERY_LOW 2
-#define MEMORY_POWER_STATE_LOW 3
-#define MEMORY_POWER_STATE_HIGH 4
-#define MEMORY_POWER_STATE_VERY_HIGH 5
-#define MEMORY_POWER_STATE_MAX 6
-
-/*
- * GXP power flag macros, supported by `flags` in `gxp_acquire_wakelock_ioctl`
- * and `power_flags in `gxp_mailbox_command_ioctl`.
- *
- * Non-aggressor flag is deprecated. Setting this flag is a no-op since
- * non-aggressor support is defeatured.
- */
-#define GXP_POWER_NON_AGGRESSOR (1 << 0)
-/*
- * The client can request low frequency clkmux vote by this flag, which means
- * the kernel driver will switch the CLKMUX clocks to save more power.
- *
- * Note: The kernel driver keep separate track of low frequency clkmux votes
- * and normal votes, and the low frequency clkmux votes will have lower priority
- * than all normal votes.
- * For example, if the kerenl driver has two votes, one is GXP_POWER_STATE_UUD
- * without GXP_POWER_LOW_FREQ_CLKMUX, and the other one is GXP_POWER_STATE_NOM
- * with GXP_POWER_LOW_FREQ_CLKMUX. The voting result is GXP_POWER_STATE_UUD
- * without GXP_POWER_LOW_FREQ_CLKMUX.
- */
-#define GXP_POWER_LOW_FREQ_CLKMUX (1 << 1)
-
-struct gxp_acquire_wakelock_ioctl {
- /*
- * The components for which a wakelock will be acquired.
- * Should be one of WAKELOCK_BLOCK or WAKELOCK_VIRTUAL_DEVICE, or a
- * bitwise OR of both.
- *
- * A VIRTUAL_DEVICE wakelock cannot be acquired until the client has
- * allocated a virtual device. To acquire a VIRTUAL_DEVICE wakelock, a
- * client must already have acquired a BLOCK wakelock or acquire both
- * in the same call.
- */
- __u32 components_to_wake;
- /*
- * Minimum power state to operate the entire DSP subsystem at until
- * the BLOCK wakelock is released. One of the GXP_POWER_STATE_* defines
- * from above. Note that the requested power state will not be cleared
- * if only the VIRTUAL_DEVICE wakelock is released.
- *
- * `GXP_POWER_STATE_OFF` is not a valid value when acquiring a
- * wakelock.
- */
- __u32 gxp_power_state;
- /*
- * Memory interface power state to request from the system so long as
- * the BLOCK wakelock is held. One of the MEMORY_POWER_STATE* defines
- * from above. The requested memory power state will not be cleared if
- * only the VIRTUAL_DEVICE wakelock is released.
- *
- * If `MEMORY_POWER_STATE_UNDEFINED` is passed, no request to change
- * the memory interface power state will be made.
- */
- __u32 memory_power_state;
- /*
- * How long to wait, in microseconds, before returning if insufficient
- * physical cores are available when attempting to acquire a
- * VIRTUAL_DEVICE wakelock. A value of 0 indicates that the IOCTL
- * should not wait at all if cores are not available.
- */
- __u32 vd_timeout_us;
- /*
- * Flags indicating power attribute requests from the runtime.
- * Set RESERVED bits to 0 to ensure backwards compatibility.
- *
- * Bitfields:
- * [0:0] - Deprecated, do not use
- * [1:1] - LOW_FREQ_CLKMUX setting for power management
- * 0 = Don't switch CLKMUX clocks, default value
- * 1 = Switch CLKMUX clocks
- * [31:2] - RESERVED
- */
- __u32 flags;
-};
-
-/*
- * Acquire a wakelock and request minimum power states for the DSP subsystem
- * and the memory interface.
- *
- * Upon a successful return, the specified components will be powered on and if
- * they were not already running at the specified or higher power states,
- * requests will have been sent to transition both the DSP subsystem and
- * memory interface to the specified states.
- *
- * If the same client invokes this IOCTL for the same component more than once
- * without a corresponding call to `GXP_RELEASE_WAKE_LOCK` in between, the
- * second call will update requested power states, but have no other effects.
- * No additional call to `GXP_RELEASE_WAKE_LOCK` will be required.
- *
- * If a client attempts to acquire a VIRTUAL_DEVICE wakelock and there are
- * insufficient physical cores available, the driver will wait up to
- * `vd_timeout_us` microseconds, then return -EBUSY if sufficient cores were
- * never made available. In this case, if both BLOCK and VIRTUAL_DEVICE
- * wakelocks were being requested, neither will have been acquired.
- */
-#define GXP_ACQUIRE_WAKE_LOCK \
- _IOW(GXP_IOCTL_BASE, 25, struct gxp_acquire_wakelock_ioctl)
-
-/*
- * Legacy "acquire wakelock" IOCTL that does not support power flags.
- * This IOCTL exists for backwards compatibility with older runtimes. All other
- * fields are the same as in `struct gxp_acquire_wakelock_ioctl`.
- */
-struct gxp_acquire_wakelock_compat_ioctl {
- __u32 components_to_wake;
- __u32 gxp_power_state;
- __u32 memory_power_state;
- __u32 vd_timeout_us;
-};
-
-#define GXP_ACQUIRE_WAKE_LOCK_COMPAT \
- _IOW(GXP_IOCTL_BASE, 18, struct gxp_acquire_wakelock_compat_ioctl)
-
-/*
- * Release a wakelock acquired via `GXP_ACQUIRE_WAKE_LOCK`.
- *
- * The argument should be one of WAKELOCK_BLOCK or WAKELOCK_VIRTUAL_DEVICE, or a
- * bitwise OR of both.
- *
- * Upon releasing a VIRTUAL_DEVICE wakelock, a client's virtual device will be
- * removed from physical cores. At that point the cores may be reallocated to
- * another client or powered down.
- *
- * If no clients hold a BLOCK wakelock, the entire DSP subsytem may be powered
- * down. If a client attempts to release a BLOCK wakelock while still holding
- * a VIRTUAL_DEVICE wakelock, this IOCTL will return -EBUSY.
- *
- * If a client attempts to release a wakelock it does not hold, this IOCTL will
- * return -ENODEV.
- */
-#define GXP_RELEASE_WAKE_LOCK _IOW(GXP_IOCTL_BASE, 19, __u32)
+#define GXP_IOCTL_BASE 0xEE
/* GXP map flag macros */
/* The mask for specifying DMA direction in GXP map flag */
-#define GXP_MAP_DIR_MASK 3
+#define GXP_MAP_DIR_MASK 3
/* The targeted DMA direction for the buffer */
-#define GXP_MAP_DMA_BIDIRECTIONAL 0
-#define GXP_MAP_DMA_TO_DEVICE 1
-#define GXP_MAP_DMA_FROM_DEVICE 2
+#define GXP_MAP_DMA_BIDIRECTIONAL 0
+#define GXP_MAP_DMA_TO_DEVICE 1
+#define GXP_MAP_DMA_FROM_DEVICE 2
+/* Create coherent mappings of the buffer. */
+#define GXP_MAP_COHERENT (1 << 2)
+
+/* To check whether the driver is working in MCU mode. */
+#define GXP_SPEC_FEATURE_MODE_MCU (1 << 0)
+
+/* To specify the secureness of the virtual device. */
+#define GXP_ALLOCATE_VD_SECURE BIT(0)
+
+/* Core telemetry buffer size is a multiple of 64 kB */
+#define GXP_CORE_TELEMETRY_BUFFER_UNIT_SIZE 0x10000u
+/* Magic code used to indicate the validity of telemetry buffer contents */
+#define GXP_TELEMETRY_BUFFER_VALID_MAGIC_CODE 0xC0DEC0DEu
+/* Magic code used to indicate the validity of secure telemetry buffer contents */
+#define GXP_TELEMETRY_SECURE_BUFFER_VALID_MAGIC_CODE 0xA0B0C0D0u
struct gxp_map_ioctl {
/*
+ * Deprecated. All virtual cores will be mapped.
+ *
* Bitfield indicating which virtual cores to map the buffer for.
* To map for virtual core X, set bit X in this field, i.e. `1 << X`.
*
@@ -293,8 +69,8 @@ struct gxp_map_ioctl {
* buffer for all cores it had been mapped for.
*/
__u16 virtual_core_list;
- __u64 host_address; /* virtual address in the process space */
- __u32 size; /* size of mapping in bytes */
+ __u64 host_address; /* virtual address in the process space */
+ __u32 size; /* size of mapping in bytes */
/*
* Flags indicating mapping attribute requests from the runtime.
* Set RESERVED bits to 0 to ensure backwards compatibility.
@@ -306,10 +82,15 @@ struct gxp_map_ioctl {
* 10 = DMA_FROM_DEVICE (device can write buffer)
* Note: DMA_DIRECTION is the direction in which data moves
* from the host's perspective.
- * [31:2] - RESERVED
+ * [2:2] - Coherent Mapping:
+ * 0 = Create non-coherent mappings of the buffer.
+ * 1 = Create coherent mappings of the buffer.
+ * Note: this attribute may be ignored on platforms where
+ * gxp is not I/O coherent.
+ * [31:3] - RESERVED
*/
__u32 flags;
- __u64 device_address; /* returned device address */
+ __u64 device_address; /* returned device address */
};
/*
@@ -317,8 +98,7 @@ struct gxp_map_ioctl {
*
* The client must have allocated a virtual device.
*/
-#define GXP_MAP_BUFFER \
- _IOWR(GXP_IOCTL_BASE, 0, struct gxp_map_ioctl)
+#define GXP_MAP_BUFFER _IOWR(GXP_IOCTL_BASE, 0, struct gxp_map_ioctl)
/*
* Un-map host buffer previously mapped by GXP_MAP_BUFFER.
@@ -329,12 +109,11 @@ struct gxp_map_ioctl {
*
* The client must have allocated a virtual device.
*/
-#define GXP_UNMAP_BUFFER \
- _IOW(GXP_IOCTL_BASE, 1, struct gxp_map_ioctl)
+#define GXP_UNMAP_BUFFER _IOW(GXP_IOCTL_BASE, 1, struct gxp_map_ioctl)
/* GXP sync flag macros */
-#define GXP_SYNC_FOR_DEVICE (0)
-#define GXP_SYNC_FOR_CPU (1)
+#define GXP_SYNC_FOR_DEVICE (0)
+#define GXP_SYNC_FOR_CPU (1)
struct gxp_sync_ioctl {
/*
@@ -371,162 +150,18 @@ struct gxp_sync_ioctl {
* EINVAL: If @size equals 0.
* EINVAL: If @offset plus @size exceeds the mapping size.
*/
-#define GXP_SYNC_BUFFER \
- _IOW(GXP_IOCTL_BASE, 2, struct gxp_sync_ioctl)
-
-struct gxp_map_dmabuf_ioctl {
- /*
- * Bitfield indicating which virtual cores to map the dma-buf for.
- * To map for virtual core X, set bit X in this field, i.e. `1 << X`.
- *
- * This field is not used by the unmap dma-buf IOCTL, which always
- * unmaps a dma-buf for all cores it had been mapped for.
- */
- __u16 virtual_core_list;
- __s32 dmabuf_fd; /* File descriptor of the dma-buf to map. */
- /*
- * Flags indicating mapping attribute requests from the runtime.
- * Set RESERVED bits to 0 to ensure backwards compatibility.
- *
- * Bitfields:
- * [1:0] - DMA_DIRECTION:
- * 00 = DMA_BIDIRECTIONAL (host/device can write buffer)
- * 01 = DMA_TO_DEVICE (host can write buffer)
- * 10 = DMA_FROM_DEVICE (device can write buffer)
- * Note: DMA_DIRECTION is the direction in which data moves
- * from the host's perspective.
- * [31:2] - RESERVED
- */
- __u32 flags;
- /*
- * Device address the dmabuf is mapped to.
- * - GXP_MAP_DMABUF uses this field to return the address the dma-buf
- * can be accessed from by the device.
- * - GXP_UNMAP_DMABUF expects this field to contain the value from the
- * mapping call, and uses it to determine which dma-buf to unmap.
- */
- __u64 device_address;
-};
-
-/*
- * Map host buffer via its dma-buf FD.
- *
- * The client must have allocated a virtual device.
- */
-#define GXP_MAP_DMABUF _IOWR(GXP_IOCTL_BASE, 20, struct gxp_map_dmabuf_ioctl)
-
-/*
- * Un-map host buffer previously mapped by GXP_MAP_DMABUF.
- *
- * Only the @device_address field is used. Other fields are fetched from the
- * kernel's internal records. It is recommended to use the argument that was
- * passed in GXP_MAP_DMABUF to un-map the dma-buf.
- *
- * The client must have allocated a virtual device.
- */
-#define GXP_UNMAP_DMABUF _IOW(GXP_IOCTL_BASE, 21, struct gxp_map_dmabuf_ioctl)
-
-struct gxp_mailbox_command_ioctl {
- /*
- * Input:
- * The virtual core to dispatch the command to.
- */
- __u16 virtual_core_id;
- /*
- * Output:
- * The sequence number assigned to this command. The caller can use
- * this value to match responses fetched via `GXP_MAILBOX_RESPONSE`
- * with this command.
- */
- __u64 sequence_number;
- /*
- * Input:
- * Device address to the buffer containing a GXP command. The user
- * should have obtained this address from the GXP_MAP_BUFFER ioctl.
- */
- __u64 device_address;
- /*
- * Input:
- * Size of the buffer at `device_address` in bytes.
- */
- __u32 size;
- /*
- * Input:
- * Minimum power state to operate the entire DSP subsystem at until
- * the mailbox command is finished(executed or timeout). One of the
- * GXP_POWER_STATE_* defines from below.
- *
- * `GXP_POWER_STATE_OFF` is not a valid value when executing a
- * mailbox command. The caller should pass GXP_POWER_STATE_UUD if the
- * command is expected to run at the power state the wakelock has
- * specified.
- */
- __u32 gxp_power_state;
- /*
- * Input:
- * Memory interface power state to request from the system so long as
- * the mailbox command is executing. One of the MEMORY_POWER_STATE*
- * defines from below.
- *
- * If `MEMORY_POWER_STATE_UNDEFINED` is passed, no request to change
- * the memory interface power state will be made.
- */
- __u32 memory_power_state;
- /*
- * Input:
- * Flags describing the command, for use by the GXP device.
- */
- __u32 flags;
- /*
- * Input:
- * Flags indicating power attribute requests from the runtime.
- * Set RESERVED bits to 0 to ensure backwards compatibility.
- *
- * Bitfields:
- * [0:0] - Deprecated, do not use
- * [1:1] - LOW_FREQ_CLKMUX setting for power management
- * 0 = Don't switch CLKMUX clocks, default value
- * 1 = Switch CLKMUX clocks
- * [31:2] - RESERVED
- */
- __u32 power_flags;
-};
-
-/*
- * Push element to the mailbox commmand queue.
- *
- * The client must hold a VIRTUAL_DEVICE wakelock.
- */
-#define GXP_MAILBOX_COMMAND \
- _IOWR(GXP_IOCTL_BASE, 23, struct gxp_mailbox_command_ioctl)
-
-/*
- * Legacy "mailbox command" IOCTL that does not support power requests.
- * This IOCTL exists for backwards compatibility with older runtimes. All
- * fields, other than the unsupported `gxp_power_state`, `memory_power_state`,
- * and `power_flags`, are the same as in `struct gxp_mailbox_command_ioctl`.
- */
-struct gxp_mailbox_command_compat_ioctl {
- __u16 virtual_core_id;
- __u64 sequence_number;
- __u64 device_address;
- __u32 size;
- __u32 flags;
-};
-
-/* The client must hold a VIRTUAL_DEVICE wakelock. */
-#define GXP_MAILBOX_COMMAND_COMPAT \
- _IOW(GXP_IOCTL_BASE, 3, struct gxp_mailbox_command_compat_ioctl)
+#define GXP_SYNC_BUFFER _IOW(GXP_IOCTL_BASE, 2, struct gxp_sync_ioctl)
/* GXP mailbox response error code values */
-#define GXP_RESPONSE_ERROR_NONE (0)
-#define GXP_RESPONSE_ERROR_INTERNAL (1)
-#define GXP_RESPONSE_ERROR_TIMEOUT (2)
+#define GXP_RESPONSE_ERROR_NONE (0)
+#define GXP_RESPONSE_ERROR_INTERNAL (1)
+#define GXP_RESPONSE_ERROR_TIMEOUT (2)
struct gxp_mailbox_response_ioctl {
/*
* Input:
* The virtual core to fetch a response from.
+ * Only used in direct mode.
*/
__u16 virtual_core_id;
/*
@@ -551,56 +186,85 @@ struct gxp_mailbox_response_ioctl {
};
/*
- * Pop element from the mailbox response queue. Blocks until mailbox response
+ * Pop an element from the mailbox response queue. Blocks until mailbox response
* is available.
*
* The client must hold a VIRTUAL_DEVICE wakelock.
*/
-#define GXP_MAILBOX_RESPONSE \
+#define GXP_MAILBOX_RESPONSE \
_IOWR(GXP_IOCTL_BASE, 4, struct gxp_mailbox_response_ioctl)
-struct gxp_register_mailbox_eventfd_ioctl {
+struct gxp_specs_ioctl {
+ /* Maximum number of cores that can be allocated to a virtual device */
+ __u8 core_count;
/*
- * This eventfd will be signaled whenever a mailbox response arrives
- * for the core specified by `virtual_core_id`.
- *
- * When registering, if an eventfd has already been registered for the
- * specified core, the old eventfd will be unregistered and replaced.
- *
- * Not used during the unregister call, which clears any existing
- * eventfd.
+ * A field to indicate the features or modes the device supports.
+ * Bitfields:
+ * [0:0] - Mode:
+ * 0 = direct mode
+ * 1 = MCU mode
+ * [7:1] - RESERVED
*/
- __u32 eventfd;
+ __u8 features;
/*
- * Reserved.
- * Pass 0 for backwards compatibility.
+ * Size of per core allocated telemetry buffer represented in units
+ * of GXP_CORE_TELEMETRY_BUFFER_UNIT_SIZE.
*/
- __u32 flags;
+ __u8 telemetry_buffer_size;
/*
- * The virtual core to register or unregister an eventfd from.
- * While an eventfd is registered, it will be signaled exactly once
- * any time a command to this virtual core receives a response or times
- * out.
+ * Size of per core reserved secure telemetry buffer represented in
+ * units of GXP_CORE_TELEMETRY_BUFFER_UNIT_SIZE.
*/
- __u16 virtual_core_id;
+ __u8 secure_telemetry_buffer_size;
+ /* Deprecated fields that should be ignored */
+ __u8 reserved[8];
+ /*
+ * Amount of "tightly-coupled memory" or TCM available to each core.
+ * The value returned will be in kB, or 0 if the value was not
+ * specified in the device-tree.
+ */
+ __u32 memory_per_core;
};
-/*
- * Register an eventfd to be signaled whenever the specified virtual core
- * sends a mailbox response.
- *
- * The client must have allocated a virtual device.
- */
-#define GXP_REGISTER_MAILBOX_EVENTFD \
- _IOW(GXP_IOCTL_BASE, 22, struct gxp_register_mailbox_eventfd_ioctl)
+/* Query system specs. */
+#define GXP_GET_SPECS _IOR(GXP_IOCTL_BASE, 5, struct gxp_specs_ioctl)
-/*
- * Clear a previously registered mailbox response eventfd.
- *
- * The client must have allocated a virtual device.
- */
-#define GXP_UNREGISTER_MAILBOX_EVENTFD \
- _IOW(GXP_IOCTL_BASE, 24, struct gxp_register_mailbox_eventfd_ioctl)
+struct gxp_virtual_device_ioctl {
+ /*
+ * Input:
+ * The number of cores requested for the virtual device.
+ */
+ __u8 core_count;
+ /*
+ * Set RESERVED bits to 0 to ensure backwards compatibility.
+ *
+ * Bitfields:
+ * [0:0] - GXP_ALLOCATE_VD_SECURE setting for vd secureness
+ * 0 = Non-secure, default value
+ * 1 = Secure
+ * [31:1] - RESERVED
+ */
+ __u8 flags;
+ /*
+ * Input:
+ * The number of threads requested per core.
+ */
+ __u16 threads_per_core;
+ /*
+ * Input:
+ * The amount of memory requested per core, in kB.
+ */
+ __u32 memory_per_core;
+ /*
+ * Output:
+ * The ID assigned to the virtual device and shared with its cores.
+ */
+ __u32 vdid;
+};
+
+/* Allocate virtual device. */
+#define GXP_ALLOCATE_VIRTUAL_DEVICE \
+ _IOWR(GXP_IOCTL_BASE, 6, struct gxp_virtual_device_ioctl)
#define ETM_TRACE_LSB_MASK 0x1
#define ETM_TRACE_SYNC_MSG_PERIOD_MIN 8
@@ -653,7 +317,7 @@ struct gxp_etm_trace_start_ioctl {
*
* The client must hold a VIRTUAL_DEVICE wakelock.
*/
-#define GXP_ETM_TRACE_START_COMMAND \
+#define GXP_ETM_TRACE_START_COMMAND \
_IOW(GXP_IOCTL_BASE, 7, struct gxp_etm_trace_start_ioctl)
/*
@@ -662,8 +326,7 @@ struct gxp_etm_trace_start_ioctl {
*
* The client must hold a VIRTUAL_DEVICE wakelock.
*/
-#define GXP_ETM_TRACE_SW_STOP_COMMAND \
- _IOW(GXP_IOCTL_BASE, 8, __u16)
+#define GXP_ETM_TRACE_SW_STOP_COMMAND _IOW(GXP_IOCTL_BASE, 8, __u16)
/*
* Users should call this IOCTL after tracing has been stopped for the last
@@ -674,8 +337,7 @@ struct gxp_etm_trace_start_ioctl {
*
* The client must hold a VIRTUAL_DEVICE wakelock.
*/
-#define GXP_ETM_TRACE_CLEANUP_COMMAND \
- _IOW(GXP_IOCTL_BASE, 9, __u16)
+#define GXP_ETM_TRACE_CLEANUP_COMMAND _IOW(GXP_IOCTL_BASE, 9, __u16)
#define GXP_TRACE_HEADER_SIZE 256
#define GXP_TRACE_RAM_SIZE 4096
@@ -711,11 +373,11 @@ struct gxp_etm_get_trace_info_ioctl {
*
* The client must hold a VIRTUAL_DEVICE wakelock.
*/
-#define GXP_ETM_GET_TRACE_INFO_COMMAND \
+#define GXP_ETM_GET_TRACE_INFO_COMMAND \
_IOWR(GXP_IOCTL_BASE, 10, struct gxp_etm_get_trace_info_ioctl)
-#define GXP_TELEMETRY_TYPE_LOGGING (0)
-#define GXP_TELEMETRY_TYPE_TRACING (1)
+#define GXP_TELEMETRY_TYPE_LOGGING (0)
+#define GXP_TELEMETRY_TYPE_TRACING (1)
/*
* Enable either logging or software tracing for all cores.
@@ -730,7 +392,7 @@ struct gxp_etm_get_trace_info_ioctl {
* logging/tracing to their buffers. Any cores booting after this call will
* begin logging/tracing as soon as their firmware is able to.
*/
-#define GXP_ENABLE_TELEMETRY _IOWR(GXP_IOCTL_BASE, 11, __u8)
+#define GXP_ENABLE_CORE_TELEMETRY _IOWR(GXP_IOCTL_BASE, 11, __u8)
/*
* Disable either logging or software tracing for all cores.
@@ -740,7 +402,53 @@ struct gxp_etm_get_trace_info_ioctl {
* This call will block until any running cores have been notified and ACKed
* that they have disabled the specified telemetry type.
*/
-#define GXP_DISABLE_TELEMETRY _IOWR(GXP_IOCTL_BASE, 12, __u8)
+#define GXP_DISABLE_CORE_TELEMETRY _IOWR(GXP_IOCTL_BASE, 12, __u8)
+
+/* For backward compatibility. */
+#define GXP_ENABLE_TELEMETRY GXP_ENABLE_CORE_TELEMETRY
+#define GXP_DISABLE_TELEMETRY GXP_DISABLE_CORE_TELEMETRY
+
+struct gxp_tpu_mbx_queue_ioctl {
+ __u32 tpu_fd; /* TPU virtual device group fd */
+ /*
+ * Deprecated. All virtual cores will be mapped.
+ *
+ * Bitfield indicating which virtual cores to allocate and map the
+ * buffers for.
+ * To map for virtual core X, set bit X in this field, i.e. `1 << X`.
+ *
+ * This field is not used by the unmap IOCTL, which always unmaps the
+ * buffers for all cores it had been mapped for.
+ */
+ __u32 virtual_core_list;
+ /*
+ * The user address of an edgetpu_mailbox_attr struct, containing
+ * cmd/rsp queue size, mailbox priority and other relevant info.
+ * This structure is defined in edgetpu.h in the TPU driver.
+ */
+ __u64 attr_ptr;
+};
+
+/*
+ * Map TPU-DSP mailbox cmd/rsp queue buffers.
+ *
+ * The client must have allocated a virtual device.
+ */
+#define GXP_MAP_TPU_MBX_QUEUE \
+ _IOW(GXP_IOCTL_BASE, 13, struct gxp_tpu_mbx_queue_ioctl)
+
+/*
+ * Un-map TPU-DSP mailbox cmd/rsp queue buffers previously mapped by
+ * GXP_MAP_TPU_MBX_QUEUE.
+ *
+ * Only the @tpu_fd field will be used. Other fields will be fetched
+ * from the kernel's internal records. It is recommended to use the argument
+ * that was passed in GXP_MAP_TPU_MBX_QUEUE to un-map the buffers.
+ *
+ * The client must have allocated a virtual device.
+ */
+#define GXP_UNMAP_TPU_MBX_QUEUE \
+ _IOW(GXP_IOCTL_BASE, 14, struct gxp_tpu_mbx_queue_ioctl)
struct gxp_register_telemetry_eventfd_ioctl {
/*
@@ -758,12 +466,16 @@ struct gxp_register_telemetry_eventfd_ioctl {
__u8 type;
};
-#define GXP_REGISTER_TELEMETRY_EVENTFD \
+#define GXP_REGISTER_CORE_TELEMETRY_EVENTFD \
_IOW(GXP_IOCTL_BASE, 15, struct gxp_register_telemetry_eventfd_ioctl)
-#define GXP_UNREGISTER_TELEMETRY_EVENTFD \
+#define GXP_UNREGISTER_CORE_TELEMETRY_EVENTFD \
_IOW(GXP_IOCTL_BASE, 16, struct gxp_register_telemetry_eventfd_ioctl)
+/* For backward compatibility. */
+#define GXP_REGISTER_TELEMETRY_EVENTFD GXP_REGISTER_CORE_TELEMETRY_EVENTFD
+#define GXP_UNREGISTER_TELEMETRY_EVENTFD GXP_UNREGISTER_CORE_TELEMETRY_EVENTFD
+
/*
* Reads the 2 global counter registers in AURORA_TOP and combines them to
* return the full 64-bit value of the counter.
@@ -772,45 +484,372 @@ struct gxp_register_telemetry_eventfd_ioctl {
*/
#define GXP_READ_GLOBAL_COUNTER _IOR(GXP_IOCTL_BASE, 17, __u64)
-struct gxp_tpu_mbx_queue_ioctl {
- __u32 tpu_fd; /* TPU virtual device group fd */
+/*
+ * Release a wakelock acquired via `GXP_ACQUIRE_WAKE_LOCK`.
+ *
+ * The argument should be one of WAKELOCK_BLOCK or WAKELOCK_VIRTUAL_DEVICE, or a
+ * bitwise OR of both.
+ *
+ * Upon releasing a VIRTUAL_DEVICE wakelock, a client's virtual device will be
+ * removed from physical cores. At that point the cores may be reallocated to
+ * another client or powered down.
+ *
+ * If no clients hold a BLOCK wakelock, the entire DSP subsytem may be powered
+ * down. If a client attempts to release a BLOCK wakelock while still holding
+ * a VIRTUAL_DEVICE wakelock, this IOCTL will return -EBUSY.
+ *
+ * If a client attempts to release a wakelock it does not hold, this IOCTL will
+ * return -ENODEV.
+ */
+#define GXP_RELEASE_WAKE_LOCK _IOW(GXP_IOCTL_BASE, 19, __u32)
+
+struct gxp_map_dmabuf_ioctl {
/*
- * Bitfield indicating which virtual cores to allocate and map the
- * buffers for.
+ * Deprecated. All virtual cores will be mapped.
+ *
+ * Bitfield indicating which virtual cores to map the dma-buf for.
* To map for virtual core X, set bit X in this field, i.e. `1 << X`.
*
- * This field is not used by the unmap IOCTL, which always unmaps the
- * buffers for all cores it had been mapped for.
+ * This field is not used by the unmap dma-buf IOCTL, which always
+ * unmaps a dma-buf for all cores it had been mapped for.
*/
- __u32 virtual_core_list;
+ __u16 virtual_core_list;
+ __s32 dmabuf_fd; /* File descriptor of the dma-buf to map. */
/*
- * The user address of an edgetpu_mailbox_attr struct, containing
- * cmd/rsp queue size, mailbox priority and other relevant info.
- * This structure is defined in edgetpu.h in the TPU driver.
+ * Flags indicating mapping attribute requests from the runtime.
+ * Set RESERVED bits to 0 to ensure backwards compatibility.
+ *
+ * Bitfields:
+ * [1:0] - DMA_DIRECTION:
+ * 00 = DMA_BIDIRECTIONAL (host/device can write buffer)
+ * 01 = DMA_TO_DEVICE (host can write buffer)
+ * 10 = DMA_FROM_DEVICE (device can write buffer)
+ * Note: DMA_DIRECTION is the direction in which data moves
+ * from the host's perspective.
+ * [31:2] - RESERVED
*/
- __u64 attr_ptr;
+ __u32 flags;
+ /*
+ * Device address the dmabuf is mapped to.
+ * - GXP_MAP_DMABUF uses this field to return the address the dma-buf
+ * can be accessed from by the device.
+ * - GXP_UNMAP_DMABUF expects this field to contain the value from the
+ * mapping call, and uses it to determine which dma-buf to unmap.
+ */
+ __u64 device_address;
};
/*
- * Map TPU-DSP mailbox cmd/rsp queue buffers.
+ * Map host buffer via its dma-buf FD.
*
* The client must have allocated a virtual device.
*/
-#define GXP_MAP_TPU_MBX_QUEUE \
- _IOW(GXP_IOCTL_BASE, 13, struct gxp_tpu_mbx_queue_ioctl)
+#define GXP_MAP_DMABUF _IOWR(GXP_IOCTL_BASE, 20, struct gxp_map_dmabuf_ioctl)
/*
- * Un-map TPU-DSP mailbox cmd/rsp queue buffers previously mapped by
- * GXP_MAP_TPU_MBX_QUEUE.
+ * Un-map host buffer previously mapped by GXP_MAP_DMABUF.
*
- * Only the @tpu_fd field will be used. Other fields will be fetched
- * from the kernel's internal records. It is recommended to use the argument
- * that was passed in GXP_MAP_TPU_MBX_QUEUE to un-map the buffers.
+ * Only the @device_address field is used. Other fields are fetched from the
+ * kernel's internal records. It is recommended to use the argument that was
+ * passed in GXP_MAP_DMABUF to un-map the dma-buf.
*
* The client must have allocated a virtual device.
*/
-#define GXP_UNMAP_TPU_MBX_QUEUE \
- _IOW(GXP_IOCTL_BASE, 14, struct gxp_tpu_mbx_queue_ioctl)
+#define GXP_UNMAP_DMABUF _IOW(GXP_IOCTL_BASE, 21, struct gxp_map_dmabuf_ioctl)
+
+struct gxp_register_mailbox_eventfd_ioctl {
+ /*
+ * This eventfd will be signaled whenever a mailbox response arrives
+ * for the core specified by `virtual_core_id`.
+ *
+ * When registering, if an eventfd has already been registered for the
+ * specified core, the old eventfd will be unregistered and replaced.
+ *
+ * Not used during the unregister call, which clears any existing
+ * eventfd.
+ */
+ __u32 eventfd;
+ /*
+ * Reserved.
+ * Pass 0 for backwards compatibility.
+ */
+ __u32 flags;
+ /*
+ * The virtual core to register or unregister an eventfd from.
+ * While an eventfd is registered, it will be signaled exactly once
+ * any time a command to this virtual core receives a response or times
+ * out.
+ */
+ __u16 virtual_core_id;
+};
+
+/*
+ * Register an eventfd to be signaled whenever the specified virtual core
+ * sends a mailbox response.
+ *
+ * The client must have allocated a virtual device.
+ */
+#define GXP_REGISTER_MAILBOX_EVENTFD \
+ _IOW(GXP_IOCTL_BASE, 22, struct gxp_register_mailbox_eventfd_ioctl)
+
+struct gxp_mailbox_command_ioctl {
+ /*
+ * Input:
+ * The virtual core to dispatch the command to.
+ * Only used in direct mode.
+ */
+ __u16 virtual_core_id;
+ /*
+ * Input:
+ * The number of cores to dispatch the command to.
+ * Only used in non-direct mode.
+ */
+ __u16 num_cores;
+ /*
+ * Output:
+ * The sequence number assigned to this command. The caller can use
+ * this value to match responses fetched via `GXP_MAILBOX_RESPONSE`
+ * with this command.
+ */
+ __u64 sequence_number;
+ /*
+ * Input:
+ * Device address to the buffer containing a GXP command. The user
+ * should have obtained this address from the GXP_MAP_BUFFER ioctl.
+ */
+ __u64 device_address;
+ /*
+ * Input:
+ * Size of the buffer at `device_address` in bytes.
+ */
+ __u32 size;
+ /*
+ * Input:
+ * Minimum power state to operate the entire DSP subsystem at until
+ * the mailbox command is finished(executed or timeout). One of the
+ * GXP_POWER_STATE_* defines from below.
+ *
+ * `GXP_POWER_STATE_OFF` is not a valid value when executing a
+ * mailbox command. The caller should pass GXP_POWER_STATE_UUD if the
+ * command is expected to run at the power state the wakelock has
+ * specified.
+ */
+ __u32 gxp_power_state;
+ /*
+ * Input:
+ * Memory interface power state to request from the system so long as
+ * the mailbox command is executing. One of the MEMORY_POWER_STATE*
+ * defines from below.
+ *
+ * If `MEMORY_POWER_STATE_UNDEFINED` is passed, no request to change
+ * the memory interface power state will be made.
+ */
+ __u32 memory_power_state;
+ /*
+ * Input:
+ * Flags describing the command, for use by the GXP device.
+ */
+ __u32 flags;
+ /*
+ * Input:
+ * Flags indicating power attribute requests from the runtime.
+ * Set RESERVED bits to 0 to ensure backwards compatibility.
+ *
+ * Bitfields:
+ * [0:0] - Deprecated, do not use
+ * [1:1] - LOW_FREQ_CLKMUX setting for power management
+ * 0 = Don't switch CLKMUX clocks, default value
+ * 1 = Switch CLKMUX clocks
+ * [31:2] - RESERVED
+ */
+ __u32 power_flags;
+};
+
+/*
+ * Push an element to the mailbox command queue.
+ *
+ * The client must hold a VIRTUAL_DEVICE wakelock.
+ */
+#define GXP_MAILBOX_COMMAND \
+ _IOWR(GXP_IOCTL_BASE, 23, struct gxp_mailbox_command_ioctl)
+
+/*
+ * Clear a previously registered mailbox response eventfd.
+ *
+ * The client must have allocated a virtual device.
+ */
+#define GXP_UNREGISTER_MAILBOX_EVENTFD \
+ _IOW(GXP_IOCTL_BASE, 24, struct gxp_register_mailbox_eventfd_ioctl)
+
+/*
+ * Components for which a client may hold a wakelock.
+ * Acquired by passing these values as `components_to_wake` in
+ * `struct gxp_acquire_wakelock_ioctl` to GXP_ACQUIRE_WAKELOCK and released by
+ * passing these values directly as the argument to GXP_RELEASE_WAKELOCK.
+ *
+ * Multiple wakelocks can be acquired or released at once by passing multiple
+ * components, ORed together.
+ */
+#define WAKELOCK_BLOCK (1 << 0)
+#define WAKELOCK_VIRTUAL_DEVICE (1 << 1)
+
+/*
+ * DSP subsystem Power state values for use as `gxp_power_state` in
+ * `struct gxp_acquire_wakelock_ioctl`.
+ * Note: GXP_POWER_STATE_READY is a deprecated state. The way to achieve
+ * original state is to request GXP_POWER_STATE_UUD with setting
+ * GXP_POWER_LOW_FREQ_CLKMUX flag. Requesting GXP_POWER_STATE_READY is treated
+ * as identical to GXP_POWER_STATE_UUD.
+ */
+#define GXP_POWER_STATE_OFF 0
+#define GXP_POWER_STATE_UUD 1
+#define GXP_POWER_STATE_SUD 2
+#define GXP_POWER_STATE_UD 3
+#define GXP_POWER_STATE_NOM 4
+#define GXP_POWER_STATE_READY 5
+#define GXP_POWER_STATE_UUD_PLUS 6
+#define GXP_POWER_STATE_SUD_PLUS 7
+#define GXP_POWER_STATE_UD_PLUS 8
+#define GXP_NUM_POWER_STATES (GXP_POWER_STATE_UD_PLUS + 1)
+
+/*
+ * Memory interface power state values for use as `memory_power_state` in
+ * `struct gxp_acquire_wakelock_ioctl`.
+ */
+#define MEMORY_POWER_STATE_UNDEFINED 0
+#define MEMORY_POWER_STATE_MIN 1
+#define MEMORY_POWER_STATE_VERY_LOW 2
+#define MEMORY_POWER_STATE_LOW 3
+#define MEMORY_POWER_STATE_HIGH 4
+#define MEMORY_POWER_STATE_VERY_HIGH 5
+#define MEMORY_POWER_STATE_MAX 6
+
+/*
+ * GXP power flag macros, supported by `flags` in `gxp_acquire_wakelock_ioctl`
+ * and `power_flags in `gxp_mailbox_command_ioctl`.
+ *
+ * Non-aggressor flag is deprecated. Setting this flag is a no-op since
+ * non-aggressor support is defeatured.
+ */
+#define GXP_POWER_NON_AGGRESSOR (1 << 0)
+/*
+ * The client can request low frequency clkmux vote by this flag, which means
+ * the kernel driver will switch the CLKMUX clocks to save more power.
+ *
+ * Note: The kernel driver keep separate track of low frequency clkmux votes
+ * and normal votes, and the low frequency clkmux votes will have lower priority
+ * than all normal votes.
+ * For example, if the kerenl driver has two votes, one is GXP_POWER_STATE_UUD
+ * without GXP_POWER_LOW_FREQ_CLKMUX, and the other one is GXP_POWER_STATE_NOM
+ * with GXP_POWER_LOW_FREQ_CLKMUX. The voting result is GXP_POWER_STATE_UUD
+ * without GXP_POWER_LOW_FREQ_CLKMUX.
+ */
+#define GXP_POWER_LOW_FREQ_CLKMUX (1 << 1)
+
+struct gxp_acquire_wakelock_ioctl {
+ /*
+ * The components for which a wakelock will be acquired.
+ * Should be one of WAKELOCK_BLOCK or WAKELOCK_VIRTUAL_DEVICE, or a
+ * bitwise OR of both.
+ *
+ * A VIRTUAL_DEVICE wakelock cannot be acquired until the client has
+ * allocated a virtual device. To acquire a VIRTUAL_DEVICE wakelock, a
+ * client must already have acquired a BLOCK wakelock or acquire both
+ * in the same call.
+ */
+ __u32 components_to_wake;
+ /*
+ * Minimum power state to operate the entire DSP subsystem at until
+ * the BLOCK wakelock is released. One of the GXP_POWER_STATE_* defines
+ * from above. Note that the requested power state will not be cleared
+ * if only the VIRTUAL_DEVICE wakelock is released.
+ *
+ * `GXP_POWER_STATE_OFF` is not a valid value when acquiring a
+ * wakelock.
+ */
+ __u32 gxp_power_state;
+ /*
+ * Memory interface power state to request from the system so long as
+ * the BLOCK wakelock is held. One of the MEMORY_POWER_STATE* defines
+ * from above. The requested memory power state will not be cleared if
+ * only the VIRTUAL_DEVICE wakelock is released.
+ *
+ * If `MEMORY_POWER_STATE_UNDEFINED` is passed, no request to change
+ * the memory interface power state will be made.
+ */
+ __u32 memory_power_state;
+ /*
+ * How long to wait, in microseconds, before returning if insufficient
+ * physical cores are available when attempting to acquire a
+ * VIRTUAL_DEVICE wakelock. A value of 0 indicates that the IOCTL
+ * should not wait at all if cores are not available.
+ */
+ __u32 vd_timeout_us;
+ /*
+ * Flags indicating power attribute requests from the runtime.
+ * Set RESERVED bits to 0 to ensure backwards compatibility.
+ *
+ * Bitfields:
+ * [0:0] - Deprecated, do not use
+ * [1:1] - LOW_FREQ_CLKMUX setting for power management
+ * 0 = Don't switch CLKMUX clocks, default value
+ * 1 = Switch CLKMUX clocks
+ * [31:2] - RESERVED
+ */
+ __u32 flags;
+};
+
+/*
+ * Acquire a wakelock and request minimum power states for the DSP subsystem
+ * and the memory interface.
+ *
+ * Upon a successful return, the specified components will be powered on.
+ * If the specified components contain VIRTUAL_DEVICE, and they were not
+ * already running at the specified or higher power states, requests will
+ * have been sent to transition both the DSP subsystem and memory interface
+ * to the specified states.
+ *
+ * If the same client invokes this IOCTL for the same component more than once
+ * without a corresponding call to `GXP_RELEASE_WAKE_LOCK` in between, the
+ * second call may update requested power states, but have no other effects.
+ * No additional call to `GXP_RELEASE_WAKE_LOCK` will be required.
+ *
+ * If a client attempts to acquire a VIRTUAL_DEVICE wakelock and there are
+ * insufficient physical cores available, the driver will wait up to
+ * `vd_timeout_us` microseconds, then return -EBUSY if sufficient cores were
+ * never made available. In this case, if both BLOCK and VIRTUAL_DEVICE
+ * wakelocks were being requested, neither will have been acquired.
+ */
+#define GXP_ACQUIRE_WAKE_LOCK \
+ _IOW(GXP_IOCTL_BASE, 25, struct gxp_acquire_wakelock_ioctl)
+
+#define GXP_INTERFACE_VERSION_BUILD_BUFFER_SIZE 64
+struct gxp_interface_version_ioctl {
+ /*
+ * Driver major version number.
+ * Increments whenever a non-backwards compatible change to the
+ * interface defined in this file changes.
+ */
+ __u16 version_major;
+ /*
+ * Driver minor version number.
+ * Increments whenever a backwards compatible change, such as the
+ * addition of a new IOCTL, is made to the interface defined in this
+ * file.
+ */
+ __u16 version_minor;
+ /*
+ * Driver build identifier.
+ * NULL-terminated string of the git hash of the commit the driver was
+ * built from. If the driver had uncommitted changes the string will
+ * end with "-dirty".
+ */
+ char version_build[GXP_INTERFACE_VERSION_BUILD_BUFFER_SIZE];
+};
+
+/* Query the driver's interface version. */
+#define GXP_GET_INTERFACE_VERSION \
+ _IOR(GXP_IOCTL_BASE, 26, struct gxp_interface_version_ioctl)
/*
* Triggers a debug dump to be generated for cores.
@@ -829,4 +868,152 @@ struct gxp_tpu_mbx_queue_ioctl {
*/
#define GXP_TRIGGER_DEBUG_DUMP _IOW(GXP_IOCTL_BASE, 27, __u32)
+#define GXP_REGISTER_MCU_TELEMETRY_EVENTFD \
+ _IOW(GXP_IOCTL_BASE, 28, struct gxp_register_telemetry_eventfd_ioctl)
+
+#define GXP_UNREGISTER_MCU_TELEMETRY_EVENTFD \
+ _IOW(GXP_IOCTL_BASE, 29, struct gxp_register_telemetry_eventfd_ioctl)
+
+struct gxp_mailbox_uci_command_ioctl {
+ /*
+ * Output:
+ * The sequence number assigned to this command. The caller can use
+ * this value to match responses fetched via `GXP_MAILBOX_UCI_RESPONSE`
+ * with this command.
+ */
+ __u64 sequence_number;
+ /* reserved fields */
+ __u8 reserved[8];
+ /*
+ * Input:
+ * Will be copied to the UCI command without modification.
+ */
+ __u8 opaque[48];
+};
+
+/*
+ * Push an element to the UCI command queue.
+ *
+ * The client must hold a BLOCK wakelock.
+ */
+#define GXP_MAILBOX_UCI_COMMAND \
+ _IOWR(GXP_IOCTL_BASE, 30, struct gxp_mailbox_uci_command_ioctl)
+
+struct gxp_mailbox_uci_response_ioctl {
+ /*
+ * Output:
+ * Sequence number indicating which command this response is for.
+ */
+ __u64 sequence_number;
+ /*
+ * Output:
+ * Driver error code.
+ * Indicates if the response was obtained successfully,
+ * `GXP_RESPONSE_ERROR_NONE`, or what error prevented the command
+ * from completing successfully.
+ */
+ __u16 error_code;
+ /* reserved fields */
+ __u8 reserved[6];
+ /*
+ * Output:
+ * Is copied from the UCI response without modification.
+ * Only valid if `error_code` == GXP_RESPONSE_ERROR_NONE
+ */
+ __u8 opaque[16];
+};
+
+/*
+ * Pop an element from the UCI response queue. Blocks until mailbox response
+ * is available.
+ *
+ * The client must hold a BLOCK wakelock.
+ */
+#define GXP_MAILBOX_UCI_RESPONSE \
+ _IOR(GXP_IOCTL_BASE, 31, struct gxp_mailbox_uci_response_ioctl)
+
+/*
+ * struct gxp_create_sync_fence_data
+ * @seqno: the seqno to initialize the fence with
+ * @timeline_name: the name of the timeline the fence belongs to
+ * @fence: returns the fd of the new sync_file with the new fence
+ *
+ * Timeline names can be up to 128 characters (including trailing NUL byte)
+ * for gxp debugfs and kernel debug logs. These names are truncated to 32
+ * characters in the data returned by the standard SYNC_IOC_FILE_INFO
+ * ioctl.
+ */
+#define GXP_SYNC_TIMELINE_NAME_LEN 128
+struct gxp_create_sync_fence_data {
+ __u32 seqno;
+ char timeline_name[GXP_SYNC_TIMELINE_NAME_LEN];
+ __s32 fence;
+};
+
+/*
+ * Create a DMA sync fence, return the sync_file fd for the new fence.
+ *
+ * The client must have allocated a virtual device.
+ */
+#define GXP_CREATE_SYNC_FENCE \
+ _IOWR(GXP_IOCTL_BASE, 32, struct gxp_create_sync_fence_data)
+
+/*
+ * struct gxp_signal_sync_fence_data
+ * @fence: fd of the sync_file for the fence
+ * @error: error status errno value or zero for success
+ */
+struct gxp_signal_sync_fence_data {
+ __s32 fence;
+ __s32 error;
+};
+
+/*
+ * Signal a DMA sync fence with optional error status.
+ * Can pass a sync_file fd created by any driver.
+ * Signals the first DMA sync fence in the sync file.
+ */
+#define GXP_SIGNAL_SYNC_FENCE \
+ _IOW(GXP_IOCTL_BASE, 33, struct gxp_signal_sync_fence_data)
+
+/*
+ * struct gxp_sync_fence_status
+ * @fence: fd of the sync_file for the fence
+ * @status: returns:
+ * 0 if active
+ * 1 if signaled with no error
+ * negative errno value if signaled with error
+ */
+struct gxp_sync_fence_status {
+ __s32 fence;
+ __s32 status;
+};
+
+/*
+ * Retrieve DMA sync fence status.
+ * Can pass a sync_file fd created by any driver.
+ * Returns the status of the first DMA sync fence in the sync file.
+ */
+#define GXP_SYNC_FENCE_STATUS \
+ _IOWR(GXP_IOCTL_BASE, 34, struct gxp_sync_fence_status)
+
+/*
+ * struct gxp_register_invalidated_eventfd_ioctl
+ * @eventfd: File-descriptor obtained via eventfd().
+ * Not used during the unregister step.
+ */
+struct gxp_register_invalidated_eventfd_ioctl {
+ __u32 eventfd;
+};
+
+/*
+ * Registers an eventfd which will be triggered when the device crashes and
+ * the virtual device of the client is invalidated.
+ */
+#define GXP_REGISTER_INVALIDATED_EVENTFD \
+ _IOW(GXP_IOCTL_BASE, 35, struct gxp_register_invalidated_eventfd_ioctl)
+
+#define GXP_UNREGISTER_INVALIDATED_EVENTFD \
+ _IOW(GXP_IOCTL_BASE, 36, struct gxp_register_invalidated_eventfd_ioctl)
+
#endif /* __GXP_H__ */
diff --git a/mm-backport.h b/mm-backport.h
deleted file mode 100644
index c435281..0000000
--- a/mm-backport.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Backport mm APIs.
- *
- * Copyright (C) 2021 Google LLC
- */
-#ifndef __MM_BACKPORT_H__
-#define __MM_BACKPORT_H__
-
-#include <linux/mm.h>
-#include <linux/version.h>
-
-#if KERNEL_VERSION(5, 6, 0) > LINUX_VERSION_CODE
-
-/*
- * Define pin_user_pages* which are introduced in Linux 5.6.
- *
- * We simply define pin_user_pages* as get_user_pages* here so our driver can
- * prefer PIN over GET when possible.
- */
-#ifndef FOLL_PIN
-
-/* define as zero to prevent older get_user_pages* returning EINVAL */
-#define FOLL_LONGTERM 0
-
-#define pin_user_pages_fast get_user_pages_fast
-#define unpin_user_page put_page
-
-#endif /* FOLL_PIN */
-
-#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(5,6,0) */
-
-#endif /* __MM_BACKPORT_H__ */