summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRobin Peng <robinpeng@google.com>2022-12-08 05:38:50 +0000
committerRobin Peng <robinpeng@google.com>2022-12-08 05:38:50 +0000
commit45c6a20ada39db4a105af9e3a8d6217d83096e44 (patch)
tree96c5934f23fba6f0b7666c275d4fccf041297bed
parent621453cfa38f8e3ac09572d3549dc0ba9e9949be (diff)
parent52c07f728e8f5b7d5ddb4878faf60912a2aaf8f8 (diff)
downloadzuma-45c6a20ada39db4a105af9e3a8d6217d83096e44.tar.gz
Merge android13-gs-pixel-5.15 into android14-gs-pixel-5.15
Bug: 260174400 Change-Id: I70052516d11e786eb1d57913f3a5779c145f4096 Signed-off-by: Robin Peng <robinpeng@google.com>
-rw-r--r--Makefile1
-rw-r--r--callisto-platform.c237
-rw-r--r--callisto/csrs.h51
-rw-r--r--callisto/lpm.h101
-rw-r--r--gcip-kernel-driver/include/gcip/gcip-image-config.h4
-rw-r--r--gxp-client.c19
-rw-r--r--gxp-client.h11
-rw-r--r--gxp-common-platform.c65
-rw-r--r--gxp-config.h5
-rw-r--r--gxp-core-telemetry.c118
-rw-r--r--gxp-core-telemetry.h32
-rw-r--r--gxp-debugfs.c8
-rw-r--r--gxp-firmware.c19
-rw-r--r--gxp-firmware.h2
-rw-r--r--gxp-internal.h1
-rw-r--r--gxp-kci.c74
-rw-r--r--gxp-kci.h1
-rw-r--r--gxp-lpm.c44
-rw-r--r--gxp-lpm.h56
-rw-r--r--gxp-mcu-fs.c250
-rw-r--r--gxp-mcu-fs.h36
-rw-r--r--gxp-pm.c5
-rw-r--r--gxp-pm.h18
-rw-r--r--gxp-vd.c8
-rw-r--r--gxp.h12
25 files changed, 753 insertions, 425 deletions
diff --git a/Makefile b/Makefile
index acbf0ed..f69cdab 100644
--- a/Makefile
+++ b/Makefile
@@ -45,6 +45,7 @@ gxp-objs += \
gxp-dci.o \
gxp-kci.o \
gxp-mcu-firmware.o \
+ gxp-mcu-fs.o \
gxp-mcu-telemetry.o \
gxp-mcu.o \
gxp-uci.o \
diff --git a/callisto-platform.c b/callisto-platform.c
index 26ea3cc..bc274a3 100644
--- a/callisto-platform.c
+++ b/callisto-platform.c
@@ -10,13 +10,11 @@
#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
-#include <gcip/gcip-telemetry.h>
-
#include "callisto-platform.h"
#include "gxp-common-platform.c"
#include "gxp-kci.h"
-#include "gxp-mcu-telemetry.h"
+#include "gxp-mcu-fs.h"
#include "gxp-uci.h"
#include "gxp-usage-stats.h"
@@ -80,235 +78,6 @@ static void callisto_platform_before_remove(struct gxp_dev *gxp)
gxp_usage_stats_exit(gxp);
}
-static int gxp_ioctl_uci_command_helper(struct gxp_client *client,
- struct gxp_mailbox_command_ioctl *ibuf)
-{
- struct gxp_dev *gxp = client->gxp;
- struct callisto_dev *callisto = to_callisto_dev(gxp);
- struct gxp_uci_command cmd;
- int ret;
-
- if (ibuf->virtual_core_id >= GXP_NUM_CORES)
- return -EINVAL;
- down_read(&client->semaphore);
-
- if (!check_client_has_available_vd(client, "GXP_MAILBOX_COMMAND")) {
- ret = -ENODEV;
- goto out;
- }
-
- /* Caller must hold BLOCK wakelock */
- if (!client->has_block_wakelock) {
- dev_err(gxp->dev,
- "GXP_MAILBOX_COMMAND requires the client hold a BLOCK wakelock\n");
- ret = -ENODEV;
- goto out;
- }
-
- /* Use at least one core for the command */
- if (ibuf->num_cores == 0)
- ibuf->num_cores = 1;
-
- /* Pack the command structure */
- cmd.core_command_params.address = ibuf->device_address;
- cmd.core_command_params.size = ibuf->size;
- cmd.core_command_params.num_cores = ibuf->num_cores;
- /* Plus 1 to align with power states in MCU firmware. */
- cmd.core_command_params.dsp_operating_point = ibuf->gxp_power_state + 1;
- cmd.core_command_params.memory_operating_point = ibuf->memory_power_state;
- /* cmd.seq is assigned by mailbox implementation */
- cmd.type = CORE_COMMAND;
-
- /* TODO(b/248179414): Remove core assignment when MCU fw re-enable sticky core scheduler. */
- {
- int core;
-
- down_read(&gxp->vd_semaphore);
- core = gxp_vd_virt_core_to_phys_core(client->vd,
- ibuf->virtual_core_id);
- up_read(&gxp->vd_semaphore);
- if (core < 0) {
- dev_err(gxp->dev,
- "Mailbox command failed: Invalid virtual core id (%u)\n",
- ibuf->virtual_core_id);
- ret = -EINVAL;
- goto out;
- }
- cmd.priority = core;
- }
-
- cmd.client_id = client->vd->client_id;
-
- /*
- * TODO(b/248196344): Use the only one permitted eventfd for the virtual device
- * when MCU fw re-enable sticky core scheduler.
- */
- ret = gxp_uci_send_command(
- &callisto->mcu.uci, client->vd, &cmd,
- &client->vd->mailbox_resp_queues[UCI_RESOURCE_ID].queue,
- &client->vd->mailbox_resp_queues[UCI_RESOURCE_ID].lock,
- &client->vd->mailbox_resp_queues[UCI_RESOURCE_ID].waitq,
- client->mb_eventfds[ibuf->virtual_core_id]);
- if (ret) {
- dev_err(gxp->dev, "Failed to enqueue mailbox command (ret=%d)\n",
- ret);
- goto out;
- }
- ibuf->sequence_number = cmd.seq;
-
-out:
- up_read(&client->semaphore);
- return ret;
-}
-
-static int gxp_ioctl_uci_command(struct gxp_client *client,
- struct gxp_mailbox_command_ioctl __user *argp)
-{
- struct gxp_mailbox_command_ioctl ibuf;
- int ret;
-
- if (copy_from_user(&ibuf, argp, sizeof(ibuf)))
- return -EFAULT;
-
- ret = gxp_ioctl_uci_command_helper(client, &ibuf);
- if (ret)
- return ret;
-
- if (copy_to_user(argp, &ibuf, sizeof(ibuf)))
- return -EFAULT;
-
- return 0;
-}
-
-static int
-gxp_ioctl_uci_response(struct gxp_client *client,
- struct gxp_mailbox_response_ioctl __user *argp)
-{
- struct gxp_mailbox_response_ioctl ibuf;
- int ret = 0;
-
- if (copy_from_user(&ibuf, argp, sizeof(ibuf)))
- return -EFAULT;
-
- down_read(&client->semaphore);
-
- if (!check_client_has_available_vd(client, "GXP_MAILBOX_RESPONSE")) {
- ret = -ENODEV;
- goto out;
- }
-
- /* Caller must hold BLOCK wakelock */
- if (!client->has_block_wakelock) {
- dev_err(client->gxp->dev,
- "GXP_MAILBOX_RESPONSE requires the client hold a BLOCK wakelock\n");
- ret = -ENODEV;
- goto out;
- }
-
- ret = gxp_uci_wait_async_response(
- &client->vd->mailbox_resp_queues[UCI_RESOURCE_ID],
- &ibuf.sequence_number, &ibuf.cmd_retval, &ibuf.error_code);
- if (ret)
- goto out;
-
- if (copy_to_user(argp, &ibuf, sizeof(ibuf)))
- ret = -EFAULT;
-
-out:
- up_read(&client->semaphore);
-
- return ret;
-}
-
-static inline enum gcip_telemetry_type to_gcip_telemetry_type(u8 type)
-{
- if (type == GXP_TELEMETRY_TYPE_LOGGING)
- return GCIP_TELEMETRY_LOG;
- else
- return GCIP_TELEMETRY_TRACE;
-}
-
-static int gxp_register_mcu_telemetry_eventfd(
- struct gxp_client *client,
- struct gxp_register_telemetry_eventfd_ioctl __user *argp)
-{
- struct gxp_mcu *mcu = gxp_mcu_of(client->gxp);
- struct gxp_register_telemetry_eventfd_ioctl ibuf;
-
- if (copy_from_user(&ibuf, argp, sizeof(ibuf)))
- return -EFAULT;
-
- return gxp_mcu_telemetry_register_eventfd(
- mcu, to_gcip_telemetry_type(ibuf.type), ibuf.eventfd);
-}
-
-static int gxp_unregister_mcu_telemetry_eventfd(
- struct gxp_client *client,
- struct gxp_register_telemetry_eventfd_ioctl __user *argp)
-{
- struct gxp_mcu *mcu = gxp_mcu_of(client->gxp);
- struct gxp_register_telemetry_eventfd_ioctl ibuf;
-
- if (copy_from_user(&ibuf, argp, sizeof(ibuf)))
- return -EFAULT;
-
- return gxp_mcu_telemetry_unregister_eventfd(
- mcu, to_gcip_telemetry_type(ibuf.type));
-}
-
-static long callisto_platform_ioctl(struct file *file, uint cmd, ulong arg)
-{
- struct gxp_client *client = file->private_data;
- void __user *argp = (void __user *)arg;
- long ret;
-
- if (gxp_is_direct_mode(client->gxp))
- return -ENOTTY;
- switch (cmd) {
- case GXP_MAILBOX_COMMAND:
- ret = gxp_ioctl_uci_command(client, argp);
- break;
- case GXP_MAILBOX_RESPONSE:
- ret = gxp_ioctl_uci_response(client, argp);
- break;
- case GXP_REGISTER_MCU_TELEMETRY_EVENTFD:
- ret = gxp_register_mcu_telemetry_eventfd(client, argp);
- break;
- case GXP_UNREGISTER_MCU_TELEMETRY_EVENTFD:
- ret = gxp_unregister_mcu_telemetry_eventfd(client, argp);
- break;
- default:
- ret = -ENOTTY; /* unknown command */
- }
-
- return ret;
-}
-
-static int callisto_platform_mmap(struct file *file, struct vm_area_struct *vma)
-{
- struct gxp_client *client = file->private_data;
- struct gxp_mcu *mcu = gxp_mcu_of(client->gxp);
- int ret;
-
- if (gxp_is_direct_mode(client->gxp))
- return -EOPNOTSUPP;
-
- switch (vma->vm_pgoff << PAGE_SHIFT) {
- case GXP_MMAP_MCU_LOG_BUFFER_OFFSET:
- ret = gxp_mcu_telemetry_mmap_buffer(mcu, GCIP_TELEMETRY_LOG,
- vma);
- break;
- case GXP_MMAP_MCU_TRACE_BUFFER_OFFSET:
- ret = gxp_mcu_telemetry_mmap_buffer(mcu, GCIP_TELEMETRY_TRACE,
- vma);
- break;
- default:
- ret = -EOPNOTSUPP; /* unknown offset */
- }
-
- return ret;
-}
-
static int callisto_request_power_states(struct gxp_client *client,
struct gxp_power_states power_states)
{
@@ -495,8 +264,8 @@ static int gxp_platform_probe(struct platform_device *pdev)
callisto->gxp.parse_dt = callisto_platform_parse_dt;
callisto->gxp.after_probe = callisto_platform_after_probe;
callisto->gxp.before_remove = callisto_platform_before_remove;
- callisto->gxp.handle_ioctl = callisto_platform_ioctl;
- callisto->gxp.handle_mmap = callisto_platform_mmap;
+ callisto->gxp.handle_ioctl = gxp_mcu_ioctl;
+ callisto->gxp.handle_mmap = gxp_mcu_mmap;
callisto->gxp.after_vd_block_ready =
callisto_platform_after_vd_block_ready;
callisto->gxp.before_vd_block_unready =
diff --git a/callisto/csrs.h b/callisto/csrs.h
index 7b1cf1a..7a4c9dd 100644
--- a/callisto/csrs.h
+++ b/callisto/csrs.h
@@ -15,11 +15,6 @@
enum gxp_csrs {
GXP_REG_LPM_VERSION = 0x40000,
- GXP_REG_LPM_PSM_0 = 0x41000,
- GXP_REG_LPM_PSM_1 = 0x42000,
- GXP_REG_LPM_PSM_2 = 0x43000,
- GXP_REG_LPM_PSM_3 = 0x44000,
- GXP_REG_LPM_PSM_4 = 0x45000,
GXP_REG_AURORA_REVISION = 0x80000,
GXP_REG_COMMON_INT_POL_0 = 0x81000,
GXP_REG_COMMON_INT_POL_1 = 0x81004,
@@ -97,50 +92,4 @@ enum gxp_csrs {
#define PLL_CON0_PLL_AUR 0x100
#define PLL_CON0_NOC_USER 0x610
-/* LPM Registers */
-#define LPM_VERSION_OFFSET 0x0
-#define TRIGGER_CSR_START_OFFSET 0x4
-#define IMEM_START_OFFSET 0x8
-#define LPM_CONFIG_OFFSET 0xC
-#define PSM_DESCRIPTOR_OFFSET 0x10
-#define EVENTS_EN_OFFSET 0x100
-#define EVENTS_INV_OFFSET 0x140
-#define FUNCTION_SELECT_OFFSET 0x180
-#define TRIGGER_STATUS_OFFSET 0x184
-#define EVENT_STATUS_OFFSET 0x188
-#define OPS_OFFSET 0x800
-#define PSM_DESCRIPTOR_BASE(_x_) ((_x_) << 2)
-#define PSM_DESCRIPTOR_COUNT 5
-#define EVENTS_EN_BASE(_x_) ((_x_) << 2)
-#define EVENTS_EN_COUNT 16
-#define EVENTS_INV_BASE(_x_) ((_x_) << 2)
-#define EVENTS_INV_COUNT 16
-#define OPS_BASE(_x_) ((_x_) << 2)
-#define OPS_COUNT 128
-#define PSM_COUNT 5
-#define PSM_STATE_TABLE_BASE(_x_) ((_x_) << 8)
-#define PSM_STATE_TABLE_COUNT 6
-#define PSM_TRANS_BASE(_x_) ((_x_) << 5)
-#define PSM_TRANS_COUNT 4
-#define PSM_DMEM_BASE(_x_) ((_x_) << 2)
-#define PSM_DATA_COUNT 32
-#define PSM_NEXT_STATE_OFFSET 0x0
-#define PSM_SEQ_ADDR_OFFSET 0x4
-#define PSM_TIMER_VAL_OFFSET 0x8
-#define PSM_TIMER_EN_OFFSET 0xC
-#define PSM_TRIGGER_NUM_OFFSET 0x10
-#define PSM_TRIGGER_EN_OFFSET 0x14
-#define PSM_ENABLE_STATE_OFFSET 0x80
-#define PSM_DATA_OFFSET 0x600
-#define PSM_CFG_OFFSET 0x680
-#define PSM_START_OFFSET 0x684
-#define PSM_STATUS_OFFSET 0x688
-#define PSM_DEBUG_CFG_OFFSET 0x68C
-#define PSM_BREAK_ADDR_OFFSET 0x694
-#define PSM_GPIN_LO_RD_OFFSET 0x6A0
-#define PSM_GPIN_HI_RD_OFFSET 0x6A4
-#define PSM_GPOUT_LO_RD_OFFSET 0x6B0
-#define PSM_GPOUT_HI_RD_OFFSET 0x6B4
-#define PSM_DEBUG_STATUS_OFFSET 0x6B8
-
#endif /* __CALLISTO_CSRS_H__ */
diff --git a/callisto/lpm.h b/callisto/lpm.h
index eb9e98d..6c1bde9 100644
--- a/callisto/lpm.h
+++ b/callisto/lpm.h
@@ -8,6 +8,8 @@
#ifndef __CALLISTO_LPM_H__
#define __CALLISTO_LPM_H__
+#include <linux/types.h>
+
enum gxp_lpm_psm {
LPM_PSM_CORE0,
LPM_PSM_CORE1,
@@ -19,4 +21,103 @@ enum gxp_lpm_psm {
#define CORE_TO_PSM(core) (LPM_PSM_CORE0 + (core))
+enum lpm_psm_csrs {
+ LPM_REG_ENABLE_STATE_0 = 0x080,
+ LPM_REG_ENABLE_STATE_1 = 0x180,
+ LPM_REG_ENABLE_STATE_2 = 0x280,
+ LPM_REG_ENABLE_STATE_3 = 0x380,
+};
+
+enum lpm_psm_base {
+ GXP_REG_LPM_PSM_0 = 0x41000,
+ GXP_REG_LPM_PSM_1 = 0x42000,
+ GXP_REG_LPM_PSM_2 = 0x43000,
+ GXP_REG_LPM_PSM_3 = 0x44000,
+ GXP_REG_LPM_PSM_4 = 0x45000,
+};
+
+#define PSM_STATE_TABLE_SZ (LPM_REG_ENABLE_STATE_1 - LPM_REG_ENABLE_STATE_0)
+
+/* LPM address space starts at lpm_version register */
+#define GXP_LPM_BASE GXP_REG_LPM_VERSION
+#define GXP_LPM_PSM_0_BASE GXP_REG_LPM_PSM_0
+#define GXP_LPM_PSM_SIZE (GXP_REG_LPM_PSM_1 - GXP_REG_LPM_PSM_0)
+
+/* LPM Registers */
+#define LPM_VERSION_OFFSET 0x0
+#define TRIGGER_CSR_START_OFFSET 0x4
+#define IMEM_START_OFFSET 0x8
+#define LPM_CONFIG_OFFSET 0xC
+#define PSM_DESCRIPTOR_OFFSET 0x10
+#define EVENTS_EN_OFFSET 0x100
+#define EVENTS_INV_OFFSET 0x140
+#define FUNCTION_SELECT_OFFSET 0x180
+#define TRIGGER_STATUS_OFFSET 0x184
+#define EVENT_STATUS_OFFSET 0x188
+#define OPS_OFFSET 0x800
+#define PSM_DESCRIPTOR_BASE(_x_) ((_x_) << 2)
+#define PSM_DESCRIPTOR_COUNT 5
+#define EVENTS_EN_BASE(_x_) ((_x_) << 2)
+#define EVENTS_EN_COUNT 16
+#define EVENTS_INV_BASE(_x_) ((_x_) << 2)
+#define EVENTS_INV_COUNT 16
+#define OPS_BASE(_x_) ((_x_) << 2)
+#define OPS_COUNT 128
+#define PSM_COUNT 5
+#define PSM_STATE_TABLE_BASE(_x_) ((_x_) << 8)
+#define PSM_STATE_TABLE_COUNT 6
+#define PSM_TRANS_BASE(_x_) ((_x_) << 5)
+#define PSM_TRANS_COUNT 4
+#define PSM_DMEM_BASE(_x_) ((_x_) << 2)
+#define PSM_DATA_COUNT 32
+#define PSM_NEXT_STATE_OFFSET 0x0
+#define PSM_SEQ_ADDR_OFFSET 0x4
+#define PSM_TIMER_VAL_OFFSET 0x8
+#define PSM_TIMER_EN_OFFSET 0xC
+#define PSM_TRIGGER_NUM_OFFSET 0x10
+#define PSM_TRIGGER_EN_OFFSET 0x14
+#define PSM_ENABLE_STATE_OFFSET 0x80
+#define PSM_DATA_OFFSET 0x600
+#define PSM_CFG_OFFSET 0x680
+#define PSM_START_OFFSET 0x684
+#define PSM_STATUS_OFFSET 0x688
+#define PSM_DEBUG_CFG_OFFSET 0x68C
+#define PSM_BREAK_ADDR_OFFSET 0x694
+#define PSM_GPIN_LO_RD_OFFSET 0x6A0
+#define PSM_GPIN_HI_RD_OFFSET 0x6A4
+#define PSM_GPOUT_LO_RD_OFFSET 0x6B0
+#define PSM_GPOUT_HI_RD_OFFSET 0x6B4
+#define PSM_DEBUG_STATUS_OFFSET 0x6B8
+
+static inline u32 gxp_lpm_psm_get_status_offset(enum gxp_lpm_psm psm)
+{
+ if (psm >= LPM_NUM_PSMS)
+ return 0;
+ return GXP_LPM_PSM_0_BASE + (GXP_LPM_PSM_SIZE * psm) +
+ PSM_STATUS_OFFSET;
+}
+
+static inline u32 gxp_lpm_psm_get_start_offset(enum gxp_lpm_psm psm)
+{
+ if (psm >= LPM_NUM_PSMS)
+ return 0;
+ return GXP_LPM_PSM_0_BASE + (GXP_LPM_PSM_SIZE * psm) + PSM_START_OFFSET;
+}
+
+static inline u32 gxp_lpm_psm_get_cfg_offset(enum gxp_lpm_psm psm)
+{
+ if (psm >= LPM_NUM_PSMS)
+ return 0;
+ return GXP_LPM_PSM_0_BASE + (GXP_LPM_PSM_SIZE * psm) + PSM_CFG_OFFSET;
+}
+
+static inline u32 gxp_lpm_psm_get_state_offset(enum gxp_lpm_psm psm, uint state)
+{
+ if (psm >= LPM_NUM_PSMS || state > 3)
+ return 0;
+
+ return GXP_LPM_PSM_0_BASE + (GXP_LPM_PSM_SIZE * psm) +
+ LPM_REG_ENABLE_STATE_0 + (PSM_STATE_TABLE_SZ * state);
+}
+
#endif /* __CALLISTO_LPM_H__ */
diff --git a/gcip-kernel-driver/include/gcip/gcip-image-config.h b/gcip-kernel-driver/include/gcip/gcip-image-config.h
index 6c3333e..a3539a0 100644
--- a/gcip-kernel-driver/include/gcip/gcip-image-config.h
+++ b/gcip-kernel-driver/include/gcip/gcip-image-config.h
@@ -11,7 +11,7 @@
#include <linux/types.h>
#define GCIP_FW_NUM_VERSIONS 4
-#define GCIP_IMG_CFG_MAX_IOMMU_MAPPINGS 23
+#define GCIP_IMG_CFG_MAX_IOMMU_MAPPINGS 22
#define GCIP_IMG_CFG_MAX_NS_IOMMU_MAPPINGS 5
#define GCIP_FW_PRIV_LEVEL_GSA 0
@@ -40,6 +40,8 @@ struct gcip_image_config {
*/
__u32 image_config_value;
} iommu_mappings[GCIP_IMG_CFG_MAX_IOMMU_MAPPINGS];
+ __u32 remapped_data_start;
+ __u32 remapped_data_size;
__u32 num_ns_iommu_mappings;
__u32 ns_iommu_mappings[GCIP_IMG_CFG_MAX_NS_IOMMU_MAPPINGS];
} __packed;
diff --git a/gxp-client.c b/gxp-client.c
index 3c6fb80..a08547d 100644
--- a/gxp-client.c
+++ b/gxp-client.c
@@ -270,3 +270,22 @@ void gxp_client_release_vd_wakelock(struct gxp_client *client)
gxp_client_request_power_states(client, off_states);
client->has_vd_wakelock = false;
}
+
+bool gxp_client_has_available_vd(struct gxp_client *client, const char *name)
+{
+ struct gxp_dev *gxp = client->gxp;
+
+ lockdep_assert_held(&client->semaphore);
+ if (!client->vd) {
+ dev_err(gxp->dev,
+ "%s requires the client allocate a VIRTUAL_DEVICE\n",
+ name);
+ return false;
+ }
+ if (client->vd->state == GXP_VD_UNAVAILABLE) {
+ dev_err(gxp->dev, "Cannot do %s on a broken virtual device\n",
+ name);
+ return false;
+ }
+ return true;
+}
diff --git a/gxp-client.h b/gxp-client.h
index 735d10a..56b50e8 100644
--- a/gxp-client.h
+++ b/gxp-client.h
@@ -125,4 +125,15 @@ int gxp_client_acquire_vd_wakelock(struct gxp_client *client,
*/
void gxp_client_release_vd_wakelock(struct gxp_client *client);
+/**
+ * gxp_client_has_available_vd() - Returns whether @client has an available
+ * virtual device.
+ *
+ * @client: The client to check.
+ * @name: The string used for logging when the client has an invalid VD.
+ *
+ * The caller must have locked client->semaphore.
+ */
+bool gxp_client_has_available_vd(struct gxp_client *client, const char *name);
+
#endif /* __GXP_CLIENT_H__ */
diff --git a/gxp-common-platform.c b/gxp-common-platform.c
index 2c32d3f..5128b20 100644
--- a/gxp-common-platform.c
+++ b/gxp-common-platform.c
@@ -33,6 +33,7 @@
#include "gxp-firmware.h"
#include "gxp-firmware-data.h"
#include "gxp-internal.h"
+#include "gxp-lpm.h"
#include "gxp-mailbox.h"
#include "gxp-mailbox-driver.h"
#include "gxp-mapping.h"
@@ -56,27 +57,6 @@
static struct gxp_dev *gxp_debug_pointer;
-/* Caller needs to hold client->semaphore */
-static bool check_client_has_available_vd(struct gxp_client *client,
- char *ioctl_name)
-{
- struct gxp_dev *gxp = client->gxp;
-
- lockdep_assert_held(&client->semaphore);
- if (!client->vd) {
- dev_err(gxp->dev,
- "%s requires the client allocate a VIRTUAL_DEVICE\n",
- ioctl_name);
- return false;
- }
- if (client->vd->state == GXP_VD_UNAVAILABLE) {
- dev_err(gxp->dev, "Cannot do %s on a broken virtual device\n",
- ioctl_name);
- return false;
- }
- return true;
-}
-
/* Caller needs to hold client->semaphore for reading */
static bool check_client_has_available_vd_wakelock(struct gxp_client *client,
char *ioctl_name)
@@ -248,7 +228,7 @@ static int gxp_map_buffer(struct gxp_client *client,
down_read(&client->semaphore);
- if (!check_client_has_available_vd(client, "GXP_MAP_BUFFER")) {
+ if (!gxp_client_has_available_vd(client, "GXP_MAP_BUFFER")) {
ret = -ENODEV;
goto out;
}
@@ -978,7 +958,7 @@ static int gxp_map_tpu_mbx_queue(struct gxp_client *client,
down_write(&client->semaphore);
- if (!check_client_has_available_vd(client, "GXP_MAP_TPU_MBX_QUEUE")) {
+ if (!gxp_client_has_available_vd(client, "GXP_MAP_TPU_MBX_QUEUE")) {
ret = -ENODEV;
goto out_unlock_client_semaphore;
}
@@ -1274,7 +1254,7 @@ static int gxp_map_dmabuf(struct gxp_client *client,
down_read(&client->semaphore);
- if (!check_client_has_available_vd(client, "GXP_MAP_DMABUF")) {
+ if (!gxp_client_has_available_vd(client, "GXP_MAP_DMABUF")) {
ret = -ENODEV;
goto out_unlock;
}
@@ -1380,7 +1360,7 @@ static int gxp_register_mailbox_eventfd(
down_write(&client->semaphore);
- if (!check_client_has_available_vd(client, "GXP_REGISTER_MAILBOX_EVENTFD")) {
+ if (!gxp_client_has_available_vd(client, "GXP_REGISTER_MAILBOX_EVENTFD")) {
ret = -ENODEV;
goto out;
}
@@ -1639,11 +1619,11 @@ static int gxp_mmap(struct file *file, struct vm_area_struct *vma)
}
switch (vma->vm_pgoff << PAGE_SHIFT) {
- case GXP_MMAP_CORE_LOG_BUFFER_OFFSET:
- return gxp_core_telemetry_mmap_buffers(
+ case GXP_MMAP_CORE_LOG_BUFFER_OFFSET_LEGACY:
+ return gxp_core_telemetry_mmap_buffers_legacy(
client->gxp, GXP_TELEMETRY_TYPE_LOGGING, vma);
- case GXP_MMAP_CORE_TRACE_BUFFER_OFFSET:
- return gxp_core_telemetry_mmap_buffers(
+ case GXP_MMAP_CORE_TRACE_BUFFER_OFFSET_LEGACY:
+ return gxp_core_telemetry_mmap_buffers_legacy(
client->gxp, GXP_TELEMETRY_TYPE_TRACING, vma);
default:
return -EINVAL;
@@ -1718,6 +1698,24 @@ static int gxp_common_platform_probe(struct platform_device *pdev, struct gxp_de
}
#endif
+#ifdef GXP_SEPARATE_LPM_OFFSET
+ r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lpm");
+ if (IS_ERR_OR_NULL(r)) {
+ dev_err(dev, "Failed to get LPM resource\n");
+ return -ENODEV;
+ }
+ gxp->lpm_regs.paddr = r->start;
+ gxp->lpm_regs.size = resource_size(r);
+ gxp->lpm_regs.vaddr = devm_ioremap_resource(dev, r);
+ if (IS_ERR_OR_NULL(gxp->lpm_regs.vaddr)) {
+ dev_err(dev, "Failed to map LPM registers\n");
+ return -ENODEV;
+ }
+#else
+ gxp->lpm_regs.vaddr = gxp->regs.vaddr;
+ gxp->lpm_regs.size = gxp->regs.size;
+ gxp->lpm_regs.paddr = gxp->regs.paddr;
+#endif
ret = gxp_pm_init(gxp);
if (ret) {
dev_err(dev, "Failed to init power management (ret=%d)\n", ret);
@@ -1864,7 +1862,11 @@ static int gxp_common_platform_probe(struct platform_device *pdev, struct gxp_de
}
gxp_fw_data_init(gxp);
- gxp_core_telemetry_init(gxp);
+ ret = gxp_core_telemetry_init(gxp);
+ if (ret) {
+ dev_err(dev, "Failed to initialize core telemetry (ret=%d)", ret);
+ goto err_fw_data_destroy;
+ }
gxp_create_debugfs(gxp);
gxp->thermal_mgr = gxp_thermal_init(gxp);
if (!gxp->thermal_mgr)
@@ -1897,6 +1899,8 @@ err_before_remove:
gxp->before_remove(gxp);
err_vd_destroy:
gxp_remove_debugfs(gxp);
+ gxp_core_telemetry_exit(gxp);
+err_fw_data_destroy:
gxp_fw_data_destroy(gxp);
put_device(gxp->gsa_dev);
gxp_vd_destroy(gxp);
@@ -1924,6 +1928,7 @@ static int gxp_common_platform_remove(struct platform_device *pdev)
if (gxp->before_remove)
gxp->before_remove(gxp);
gxp_remove_debugfs(gxp);
+ gxp_core_telemetry_exit(gxp);
gxp_fw_data_destroy(gxp);
if (gxp->gsa_dev)
put_device(gxp->gsa_dev);
diff --git a/gxp-config.h b/gxp-config.h
index 7567e29..7f3856e 100644
--- a/gxp-config.h
+++ b/gxp-config.h
@@ -42,9 +42,4 @@
#define GXP_HAS_MCU 1
#endif
-/* LPM address space starts at lpm_version register */
-#define GXP_LPM_BASE GXP_REG_LPM_VERSION
-#define GXP_LPM_PSM_0_BASE GXP_REG_LPM_PSM_0
-#define GXP_LPM_PSM_SIZE (GXP_REG_LPM_PSM_1 - GXP_REG_LPM_PSM_0)
-
#endif /* __GXP_CONFIG_H__ */
diff --git a/gxp-core-telemetry.c b/gxp-core-telemetry.c
index 48e333a..9b8cf85 100644
--- a/gxp-core-telemetry.c
+++ b/gxp-core-telemetry.c
@@ -5,6 +5,7 @@
* Copyright (C) 2021-2022 Google LLC
*/
+#include <linux/moduleparam.h>
#include <linux/slab.h>
#include <linux/wait.h>
@@ -17,6 +18,9 @@
#include "gxp-notification.h"
#include "gxp-vd.h"
+static uint gxp_core_telemetry_buffer_size = CORE_TELEMETRY_DEFAULT_BUFFER_SIZE;
+module_param_named(core_telemetry_buffer_size, gxp_core_telemetry_buffer_size, uint, 0660);
+
static inline bool is_telemetry_enabled(struct gxp_dev *gxp, uint core, u8 type)
{
u32 device_status =
@@ -56,9 +60,14 @@ static void telemetry_status_notification_work(struct work_struct *work)
gxp_core_telemetry_status_notify(gxp, core);
}
+static struct buffer_data *allocate_telemetry_buffers(struct gxp_dev *gxp,
+ size_t size);
+static void free_telemetry_buffers(struct gxp_dev *gxp, struct buffer_data *data);
+
int gxp_core_telemetry_init(struct gxp_dev *gxp)
{
struct gxp_core_telemetry_manager *mgr;
+ struct buffer_data *log_buff_data, *trace_buff_data;
uint i;
mgr = devm_kzalloc(gxp->dev, sizeof(*mgr), GFP_KERNEL);
@@ -76,8 +85,45 @@ int gxp_core_telemetry_init(struct gxp_dev *gxp)
init_waitqueue_head(&mgr->waitq);
gxp->core_telemetry_mgr = mgr;
+ gxp_core_telemetry_buffer_size = ALIGN(gxp_core_telemetry_buffer_size,
+ CORE_TELEMETRY_BUFFER_UNIT_SIZE);
+ if ((gxp_core_telemetry_buffer_size < CORE_TELEMETRY_DEFAULT_BUFFER_SIZE) ||
+ (gxp_core_telemetry_buffer_size > CORE_TELEMETRY_MAX_BUFFER_SIZE)) {
+ dev_warn(gxp->dev,
+ "Invalid core telemetry buffer size, enforcing to default %u bytes\n",
+ CORE_TELEMETRY_DEFAULT_BUFFER_SIZE);
+ gxp_core_telemetry_buffer_size = CORE_TELEMETRY_DEFAULT_BUFFER_SIZE;
+ }
+
+ /* TODO(b/260959553): Remove mutex_lock/unlock during legacy telemetry removal */
+ mutex_lock(&mgr->lock);
+ log_buff_data = allocate_telemetry_buffers(gxp, gxp_core_telemetry_buffer_size);
+ if (IS_ERR_OR_NULL(log_buff_data)) {
+ dev_warn(gxp->dev,
+ "Failed to allocate per core log buffer of %u bytes\n",
+ gxp_core_telemetry_buffer_size);
+ goto err_free_buffers;
+ }
+ trace_buff_data = allocate_telemetry_buffers(gxp, gxp_core_telemetry_buffer_size);
+ if (IS_ERR_OR_NULL(trace_buff_data)) {
+ dev_warn(gxp->dev,
+ "Failed to allocate per core trace buffer of %u bytes\n",
+ gxp_core_telemetry_buffer_size);
+ free_telemetry_buffers(gxp, log_buff_data);
+ goto err_free_buffers;
+ }
+ gxp->core_telemetry_mgr->logging_buff_data = log_buff_data;
+ gxp->core_telemetry_mgr->tracing_buff_data = trace_buff_data;
+ mutex_unlock(&mgr->lock);
return 0;
+
+err_free_buffers:
+ mutex_unlock(&mgr->lock);
+ mutex_destroy(&mgr->lock);
+ devm_kfree(gxp->dev, mgr);
+ gxp->core_telemetry_mgr = NULL;
+ return -ENOMEM;
}
/* Wrapper struct to be used by the core telemetry vma_ops. */
@@ -101,8 +147,6 @@ static void telemetry_vma_open(struct vm_area_struct *vma)
mutex_unlock(&gxp->core_telemetry_mgr->lock);
}
-static void free_telemetry_buffers(struct gxp_dev *gxp, struct buffer_data *data);
-
static void telemetry_vma_close(struct vm_area_struct *vma)
{
struct telemetry_vma_data *vma_data =
@@ -130,10 +174,10 @@ static void telemetry_vma_close(struct vm_area_struct *vma)
if (refcount_dec_and_test(&buff_data->ref_count)) {
switch (type) {
case GXP_TELEMETRY_TYPE_LOGGING:
- gxp->core_telemetry_mgr->logging_buff_data = NULL;
+ gxp->core_telemetry_mgr->logging_buff_data_legacy = NULL;
break;
case GXP_TELEMETRY_TYPE_TRACING:
- gxp->core_telemetry_mgr->tracing_buff_data = NULL;
+ gxp->core_telemetry_mgr->tracing_buff_data_legacy = NULL;
break;
default:
dev_warn(gxp->dev, "%s called with invalid type %u\n",
@@ -172,11 +216,11 @@ static int check_telemetry_type_availability(struct gxp_dev *gxp, u8 type)
switch (type) {
case GXP_TELEMETRY_TYPE_LOGGING:
- if (gxp->core_telemetry_mgr->logging_buff_data)
+ if (gxp->core_telemetry_mgr->logging_buff_data_legacy)
return -EBUSY;
break;
case GXP_TELEMETRY_TYPE_TRACING:
- if (gxp->core_telemetry_mgr->tracing_buff_data)
+ if (gxp->core_telemetry_mgr->tracing_buff_data_legacy)
return -EBUSY;
break;
default:
@@ -206,6 +250,7 @@ static struct buffer_data *allocate_telemetry_buffers(struct gxp_dev *gxp,
size = size < PAGE_SIZE ? PAGE_SIZE : size;
+ /* TODO(b/260959553): Remove lockdep_assert_held during legacy telemetry removal */
lockdep_assert_held(&gxp->core_telemetry_mgr->lock);
data = kzalloc(sizeof(*data), GFP_KERNEL);
@@ -248,6 +293,7 @@ static void free_telemetry_buffers(struct gxp_dev *gxp, struct buffer_data *data
{
int i;
+ /* TODO(b/260959553): Remove lockdep_assert_held during legacy telemetry removal */
lockdep_assert_held(&gxp->core_telemetry_mgr->lock);
for (i = 0; i < GXP_NUM_CORES; i++)
@@ -320,8 +366,8 @@ out:
return ret;
}
-int gxp_core_telemetry_mmap_buffers(struct gxp_dev *gxp, u8 type,
- struct vm_area_struct *vma)
+int gxp_core_telemetry_mmap_buffers_legacy(struct gxp_dev *gxp, u8 type,
+ struct vm_area_struct *vma)
{
int ret = 0;
struct telemetry_vma_data *vma_data;
@@ -367,9 +413,9 @@ int gxp_core_telemetry_mmap_buffers(struct gxp_dev *gxp, u8 type,
/* Save book-keeping on the buffers in the core telemetry manager */
if (type == GXP_TELEMETRY_TYPE_LOGGING)
- gxp->core_telemetry_mgr->logging_buff_data = buff_data;
+ gxp->core_telemetry_mgr->logging_buff_data_legacy = buff_data;
else /* type == GXP_TELEMETRY_TYPE_TRACING */
- gxp->core_telemetry_mgr->tracing_buff_data = buff_data;
+ gxp->core_telemetry_mgr->tracing_buff_data_legacy = buff_data;
mutex_unlock(&gxp->core_telemetry_mgr->lock);
@@ -404,10 +450,10 @@ int gxp_core_telemetry_enable(struct gxp_dev *gxp, u8 type)
switch (type) {
case GXP_TELEMETRY_TYPE_LOGGING:
- data = gxp->core_telemetry_mgr->logging_buff_data;
+ data = gxp->core_telemetry_mgr->logging_buff_data_legacy;
break;
case GXP_TELEMETRY_TYPE_TRACING:
- data = gxp->core_telemetry_mgr->tracing_buff_data;
+ data = gxp->core_telemetry_mgr->tracing_buff_data_legacy;
break;
default:
ret = -EINVAL;
@@ -560,10 +606,10 @@ static int telemetry_disable_locked(struct gxp_dev *gxp, u8 type)
/* Cleanup core telemetry manager's book-keeping */
switch (type) {
case GXP_TELEMETRY_TYPE_LOGGING:
- data = gxp->core_telemetry_mgr->logging_buff_data;
+ data = gxp->core_telemetry_mgr->logging_buff_data_legacy;
break;
case GXP_TELEMETRY_TYPE_TRACING:
- data = gxp->core_telemetry_mgr->tracing_buff_data;
+ data = gxp->core_telemetry_mgr->tracing_buff_data_legacy;
break;
default:
return -EINVAL;
@@ -600,10 +646,10 @@ static int telemetry_disable_locked(struct gxp_dev *gxp, u8 type)
if (refcount_dec_and_test(&data->ref_count)) {
switch (type) {
case GXP_TELEMETRY_TYPE_LOGGING:
- gxp->core_telemetry_mgr->logging_buff_data = NULL;
+ gxp->core_telemetry_mgr->logging_buff_data_legacy = NULL;
break;
case GXP_TELEMETRY_TYPE_TRACING:
- gxp->core_telemetry_mgr->tracing_buff_data = NULL;
+ gxp->core_telemetry_mgr->tracing_buff_data_legacy = NULL;
break;
default:
/* NO-OP, we returned above if `type` was invalid */
@@ -709,3 +755,43 @@ gxp_core_telemetry_get_notification_handler(struct gxp_dev *gxp, uint core)
return &mgr->notification_works[core].work;
}
+
+void gxp_core_telemetry_exit(struct gxp_dev *gxp)
+{
+ struct buffer_data *log_buff_data, *trace_buff_data;
+ struct gxp_core_telemetry_manager *mgr = gxp->core_telemetry_mgr;
+
+ if (!mgr) {
+ dev_warn(gxp->dev, "Core telemetry manager was not allocated\n");
+ return;
+ }
+
+ /* TODO(b/260959553): Remove mutex_lock/unlock during legacy telemetry removal */
+ mutex_lock(&gxp->core_telemetry_mgr->lock);
+ log_buff_data = mgr->logging_buff_data;
+ trace_buff_data = mgr->tracing_buff_data;
+
+ if (!IS_ERR_OR_NULL(log_buff_data))
+ free_telemetry_buffers(gxp, log_buff_data);
+
+ if (!IS_ERR_OR_NULL(trace_buff_data))
+ free_telemetry_buffers(gxp, trace_buff_data);
+
+ mutex_unlock(&gxp->core_telemetry_mgr->lock);
+
+ if (!IS_ERR_OR_NULL(gxp->core_telemetry_mgr->logging_efd)) {
+ dev_warn(gxp->dev, "logging_efd was not released\n");
+ eventfd_ctx_put(gxp->core_telemetry_mgr->logging_efd);
+ gxp->core_telemetry_mgr->logging_efd = NULL;
+ }
+
+ if (!IS_ERR_OR_NULL(gxp->core_telemetry_mgr->tracing_efd)) {
+ dev_warn(gxp->dev, "tracing_efd was not released\n");
+ eventfd_ctx_put(gxp->core_telemetry_mgr->tracing_efd);
+ gxp->core_telemetry_mgr->tracing_efd = NULL;
+ }
+
+ mutex_destroy(&mgr->lock);
+ devm_kfree(gxp->dev, mgr);
+ gxp->core_telemetry_mgr = NULL;
+}
diff --git a/gxp-core-telemetry.h b/gxp-core-telemetry.h
index 432be71..4568d5c 100644
--- a/gxp-core-telemetry.h
+++ b/gxp-core-telemetry.h
@@ -16,6 +16,16 @@
#include "gxp-internal.h"
#include "gxp.h"
+/* Core telemetry buffer size is a multiple of 64 kB */
+#define CORE_TELEMETRY_BUFFER_UNIT_SIZE SZ_64K
+#define CORE_TELEMETRY_DEFAULT_BUFFER_SIZE CORE_TELEMETRY_BUFFER_UNIT_SIZE
+/**
+ * Maximum core telemetry buffer size that can be represented by GXP_GET_SPECS
+ * ioctl. 8 bits are reserved to represent telemetry buffer size in GXP_GET_SPECS
+ * ioctl and the size is represented in unit of CORE_TELEMETRY_BUFFER_UNIT_SIZE.
+ */
+#define CORE_TELEMETRY_MAX_BUFFER_SIZE (U8_MAX * CORE_TELEMETRY_BUFFER_UNIT_SIZE)
+
struct gxp_core_telemetry_work {
struct work_struct work;
struct gxp_dev *gxp;
@@ -29,7 +39,8 @@ struct gxp_core_telemetry_manager {
u32 size;
refcount_t ref_count;
bool is_enabled;
- } *logging_buff_data, *tracing_buff_data;
+ } *logging_buff_data_legacy, *tracing_buff_data_legacy,
+ *logging_buff_data, *tracing_buff_data;
/* Protects logging_buff_data and tracing_buff_data */
struct mutex lock;
struct gxp_core_telemetry_work notification_works[GXP_NUM_CORES];
@@ -49,9 +60,9 @@ struct gxp_core_telemetry_manager {
int gxp_core_telemetry_init(struct gxp_dev *gxp);
/**
- * gxp_core_telemetry_mmap_buffers() - Allocate a telemetry buffer for each core
- * and map them to their core and the
- * user-space vma
+ * gxp_core_telemetry_mmap_buffers_legacy() - Allocate a telemetry buffer for
+ * each core and map them to their
+ * core and the user-space vma
* @gxp: The GXP device to create the buffers for
* @type: EIther `GXP_TELEMETRY_TYPE_LOGGING` or `GXP_TELEMETRY_TYPE_TRACING`
* @vma: The vma from user-space which all cores' buffers will be mapped into
@@ -64,8 +75,8 @@ int gxp_core_telemetry_init(struct gxp_dev *gxp);
* * -EINVAL - Either the vma size is not aligned or @type is not valid
* * -ENOMEM - Insufficient memory is available to allocate and map the buffers
*/
-int gxp_core_telemetry_mmap_buffers(struct gxp_dev *gxp, u8 type,
- struct vm_area_struct *vma);
+int gxp_core_telemetry_mmap_buffers_legacy(struct gxp_dev *gxp, u8 type,
+ struct vm_area_struct *vma);
/**
* gxp_core_telemetry_enable() - Enable logging or tracing for all DSP cores
@@ -144,4 +155,13 @@ gxp_core_telemetry_get_notification_handler(struct gxp_dev *gxp, uint core);
*/
void gxp_core_telemetry_status_notify(struct gxp_dev *gxp, uint core);
+/**
+ * gxp_core_telemetry_exit() - Reverts gxp_core_telemetry_init() to release the
+ * resources acquired by core telemetry manager.
+ * @gxp: The GXP device to obtain the handler for
+ *
+ */
+void gxp_core_telemetry_exit(struct gxp_dev *gxp);
+
+
#endif /* __GXP_CORE_TELEMETRY_H__ */
diff --git a/gxp-debugfs.c b/gxp-debugfs.c
index 09d8d36..8f118d4 100644
--- a/gxp-debugfs.c
+++ b/gxp-debugfs.c
@@ -309,13 +309,13 @@ static int gxp_log_buff_set(void *data, u64 val)
mutex_lock(&gxp->core_telemetry_mgr->lock);
- if (!gxp->core_telemetry_mgr->logging_buff_data) {
+ if (!gxp->core_telemetry_mgr->logging_buff_data_legacy) {
dev_err(gxp->dev, "Logging buffer has not been created");
mutex_unlock(&gxp->core_telemetry_mgr->lock);
return -ENODEV;
}
- buffers = gxp->core_telemetry_mgr->logging_buff_data->buffers;
+ buffers = gxp->core_telemetry_mgr->logging_buff_data_legacy->buffers;
for (i = 0; i < GXP_NUM_CORES; i++) {
ptr = buffers[i].vaddr;
*ptr = val;
@@ -333,13 +333,13 @@ static int gxp_log_buff_get(void *data, u64 *val)
mutex_lock(&gxp->core_telemetry_mgr->lock);
- if (!gxp->core_telemetry_mgr->logging_buff_data) {
+ if (!gxp->core_telemetry_mgr->logging_buff_data_legacy) {
dev_err(gxp->dev, "Logging buffer has not been created");
mutex_unlock(&gxp->core_telemetry_mgr->lock);
return -ENODEV;
}
- buffers = gxp->core_telemetry_mgr->logging_buff_data->buffers;
+ buffers = gxp->core_telemetry_mgr->logging_buff_data_legacy->buffers;
*val = *(u64 *)(buffers[0].vaddr);
diff --git a/gxp-firmware.c b/gxp-firmware.c
index 57b3583..d755f84 100644
--- a/gxp-firmware.c
+++ b/gxp-firmware.c
@@ -131,6 +131,15 @@ static int elf_load_segments(struct gxp_dev *gxp, const u8 *elf_data,
return ret;
}
+static void elf_fetch_entry_point(struct gxp_dev *gxp, const u8 *elf_data,
+ uint core)
+{
+ struct elf32_hdr *ehdr;
+
+ ehdr = (struct elf32_hdr *)elf_data;
+ gxp->firmware_mgr->entry_points[core] = ehdr->e_entry;
+}
+
static int
gxp_firmware_authenticate(struct gxp_dev *gxp,
const struct firmware *firmwares[GXP_NUM_CORES])
@@ -339,11 +348,11 @@ static void gxp_program_reset_vector(struct gxp_dev *gxp, uint core, bool verbos
"Current Aurora reset vector for core %u: 0x%x\n",
core, reset_vec);
gxp_write_32(gxp, GXP_CORE_REG_ALT_RESET_VECTOR(core),
- gxp->fwbufs[core].daddr);
+ gxp->firmware_mgr->entry_points[core]);
if (verbose)
dev_notice(gxp->dev,
- "New Aurora reset vector for core %u: 0x%llx\n",
- core, gxp->fwbufs[core].daddr);
+ "New Aurora reset vector for core %u: 0x%x\n",
+ core, gxp->firmware_mgr->entry_points[core]);
}
static int gxp_firmware_load(struct gxp_dev *gxp, uint core)
@@ -366,6 +375,10 @@ static int gxp_firmware_load(struct gxp_dev *gxp, uint core)
goto out_firmware_unload;
}
+ elf_fetch_entry_point(gxp,
+ mgr->firmwares[core]->data + FW_HEADER_SIZE,
+ core);
+
memset(gxp->fwbufs[core].vaddr + AURORA_SCRATCHPAD_OFF, 0,
AURORA_SCRATCHPAD_LEN);
diff --git a/gxp-firmware.h b/gxp-firmware.h
index 1985717..73a21ba 100644
--- a/gxp-firmware.h
+++ b/gxp-firmware.h
@@ -55,6 +55,8 @@ struct gxp_firmware_manager {
* header after @firmwares have been fetched.
*/
dma_addr_t rw_boundaries[GXP_NUM_CORES];
+ /* Store the entry point of the DSP core firmware. */
+ u32 entry_points[GXP_NUM_CORES];
};
enum aurora_msg {
diff --git a/gxp-internal.h b/gxp-internal.h
index 5dd6e53..91c78a0 100644
--- a/gxp-internal.h
+++ b/gxp-internal.h
@@ -74,6 +74,7 @@ struct gxp_dev {
struct miscdevice misc_dev; /* misc device structure */
struct dentry *d_entry; /* debugfs dir for this device */
struct gxp_mapped_resource regs; /* ioremapped CSRs */
+ struct gxp_mapped_resource lpm_regs; /* ioremapped LPM CSRs, may be equal to @regs */
struct gxp_mapped_resource mbx[GXP_NUM_MAILBOXES]; /* mailbox CSRs */
struct gxp_mapped_resource fwbufs[GXP_NUM_CORES]; /* FW carveout */
struct gxp_mapped_resource fwdatabuf; /* Shared FW data carveout */
diff --git a/gxp-kci.c b/gxp-kci.c
index 8191887..0f93c33 100644
--- a/gxp-kci.c
+++ b/gxp-kci.c
@@ -30,6 +30,14 @@
#define MBOX_CMD_QUEUE_NUM_ENTRIES 1024
#define MBOX_RESP_QUEUE_NUM_ENTRIES 1024
+/*
+ * Encode INT/MIF values as a 16 bit pair in the 32-bit return value
+ * (in units of MHz, to provide enough range)
+ */
+#define PM_QOS_INT_SHIFT (16)
+#define PM_QOS_MIF_MASK (0xFFFF)
+#define PM_QOS_FACTOR (1000)
+
/* Callback functions for struct gcip_kci. */
static u32 gxp_kci_get_cmd_queue_head(struct gcip_kci *kci)
@@ -83,6 +91,49 @@ static void gxp_kci_inc_resp_queue_head(struct gcip_kci *kci, u32 inc)
CIRCULAR_QUEUE_WRAP_BIT);
}
+static void gxp_kci_set_pm_qos(struct gxp_dev *gxp, u32 pm_qos_val)
+{
+ s32 int_val = (pm_qos_val >> PM_QOS_INT_SHIFT) * PM_QOS_FACTOR;
+ s32 mif_val = (pm_qos_val & PM_QOS_MIF_MASK) * PM_QOS_FACTOR;
+
+ dev_dbg(gxp->dev, "%s: pm_qos request - int = %d mif = %d\n", __func__,
+ int_val, mif_val);
+
+ gxp_pm_update_pm_qos(gxp, int_val, mif_val);
+}
+
+static void gxp_kci_handle_rkci(struct gxp_kci *gkci,
+ struct gcip_kci_response_element *resp)
+{
+ struct gxp_dev *gxp = gkci->gxp;
+
+ switch (resp->code) {
+ case GXP_RKCI_CODE_PM_QOS_BTS:
+ /* FW indicates to ignore the request by setting them to undefined values. */
+ if (resp->retval != (typeof(resp->retval))~0ull)
+ gxp_kci_set_pm_qos(gxp, resp->retval);
+ if (resp->status != (typeof(resp->status))~0ull)
+ dev_warn_once(gxp->dev, "BTS is not supported");
+ gxp_kci_resp_rkci_ack(gkci, resp);
+ break;
+ case GXP_RKCI_CODE_CORE_TELEMETRY_READ: {
+ uint core;
+ uint core_list = (uint)(resp->status);
+
+ for (core = 0; core < GXP_NUM_CORES; core++) {
+ if (BIT(core) & core_list) {
+ gxp_core_telemetry_status_notify(gxp, core);
+ }
+ }
+ gxp_kci_resp_rkci_ack(gkci, resp);
+ break;
+ }
+ default:
+ dev_warn(gxp->dev, "Unrecognized reverse KCI request: %#x",
+ resp->code);
+ }
+}
+
/* Handle one incoming request from firmware. */
static void
gxp_reverse_kci_handle_response(struct gcip_kci *kci,
@@ -93,25 +144,7 @@ gxp_reverse_kci_handle_response(struct gcip_kci *kci,
struct gxp_kci *gxp_kci = mbx->data;
if (resp->code <= GCIP_RKCI_CHIP_CODE_LAST) {
- /* TODO(b/239638427): Handle reverse kci */
- switch (resp->code) {
- case GXP_RKCI_CODE_CORE_TELEMETRY_READ: {
- uint core;
- uint core_list = (uint)(resp->status);
-
- for (core = 0; core < GXP_NUM_CORES; core++) {
- if (BIT(core) & core_list) {
- gxp_core_telemetry_status_notify(gxp,
- core);
- }
- }
- gxp_kci_resp_rkci_ack(gxp_kci, resp);
- break;
- }
- default:
- dev_dbg(gxp->dev, "Reverse KCI received: %#x",
- resp->code);
- }
+ gxp_kci_handle_rkci(gxp_kci, resp);
return;
}
@@ -200,7 +233,8 @@ static int gxp_kci_allocate_resources(struct gxp_mailbox *mailbox,
mailbox->descriptor_buf.vaddr = gkci->descriptor_mem.vaddr;
mailbox->descriptor_buf.dsp_addr = gkci->descriptor_mem.daddr;
- mailbox->descriptor = (struct gxp_mailbox_descriptor *)mailbox->descriptor_buf.vaddr;
+ mailbox->descriptor =
+ (struct gxp_mailbox_descriptor *)mailbox->descriptor_buf.vaddr;
mailbox->descriptor->cmd_queue_device_addr =
mailbox->cmd_queue_buf.dsp_addr;
mailbox->descriptor->resp_queue_device_addr =
diff --git a/gxp-kci.h b/gxp-kci.h
index 0589f0c..85669e2 100644
--- a/gxp-kci.h
+++ b/gxp-kci.h
@@ -50,6 +50,7 @@
* Chip specific reverse KCI request codes.
*/
enum gxp_reverse_rkci_code {
+ GXP_RKCI_CODE_PM_QOS_BTS = GCIP_RKCI_CHIP_CODE_FIRST + 3,
GXP_RKCI_CODE_CORE_TELEMETRY_READ = GCIP_RKCI_CHIP_CODE_FIRST + 4,
};
diff --git a/gxp-lpm.c b/gxp-lpm.c
index 9a2d77f..1e51b40 100644
--- a/gxp-lpm.c
+++ b/gxp-lpm.c
@@ -22,7 +22,7 @@
int i = 100000; \
while (i) { \
lpm_state = \
- lpm_read_32_psm(gxp, psm, PSM_STATUS_OFFSET) & \
+ lpm_read_32_psm(gxp, psm, PSM_REG_STATUS_OFFSET) & \
PSM_CURR_STATE_MASK; \
if (condition) \
break; \
@@ -34,24 +34,22 @@
void gxp_lpm_enable_state(struct gxp_dev *gxp, enum gxp_lpm_psm psm, uint state)
{
- uint offset = LPM_REG_ENABLE_STATE_0 + (LPM_STATE_TABLE_SIZE * state);
-
/* PS0 should always be enabled */
- if (state == 0)
+ if (state == LPM_ACTIVE_STATE || state > LPM_PG_STATE)
return;
/* Disable all low power states */
- lpm_write_32_psm(gxp, psm, LPM_REG_ENABLE_STATE_1, 0x0);
- lpm_write_32_psm(gxp, psm, LPM_REG_ENABLE_STATE_2, 0x0);
- lpm_write_32_psm(gxp, psm, LPM_REG_ENABLE_STATE_3, 0x0);
+ lpm_write_32_psm(gxp, psm, PSM_REG_ENABLE_STATE1_OFFSET, 0x0);
+ lpm_write_32_psm(gxp, psm, PSM_REG_ENABLE_STATE2_OFFSET, 0x0);
+ lpm_write_32_psm(gxp, psm, PSM_REG_ENABLE_STATE3_OFFSET, 0x0);
/* Enable the requested low power state */
- lpm_write_32_psm(gxp, psm, offset, 0x1);
+ lpm_write_32_psm(gxp, psm, state, 0x1);
}
bool gxp_lpm_is_initialized(struct gxp_dev *gxp, enum gxp_lpm_psm psm)
{
- u32 status = lpm_read_32_psm(gxp, psm, PSM_STATUS_OFFSET);
+ u32 status = lpm_read_32_psm(gxp, psm, PSM_REG_STATUS_OFFSET);
/*
* state_valid bit goes active and stays high forever the first time you
@@ -65,7 +63,7 @@ bool gxp_lpm_is_initialized(struct gxp_dev *gxp, enum gxp_lpm_psm psm)
bool gxp_lpm_is_powered(struct gxp_dev *gxp, enum gxp_lpm_psm psm)
{
- u32 status = lpm_read_32_psm(gxp, psm, PSM_STATUS_OFFSET);
+ u32 status = lpm_read_32_psm(gxp, psm, PSM_REG_STATUS_OFFSET);
u32 state;
if (!(status & PSM_STATE_VALID_MASK))
@@ -76,7 +74,7 @@ bool gxp_lpm_is_powered(struct gxp_dev *gxp, enum gxp_lpm_psm psm)
uint gxp_lpm_get_state(struct gxp_dev *gxp, enum gxp_lpm_psm psm)
{
- u32 status = lpm_read_32_psm(gxp, psm, PSM_STATUS_OFFSET);
+ u32 status = lpm_read_32_psm(gxp, psm, PSM_REG_STATUS_OFFSET);
return status & PSM_CURR_STATE_MASK;
}
@@ -89,13 +87,13 @@ static int set_state_internal(struct gxp_dev *gxp, enum gxp_lpm_psm psm, uint ta
/* Set SW sequencing mode and PS target */
val = LPM_SW_PSM_MODE;
val |= target_state << LPM_CFG_SW_PS_TARGET_OFFSET;
- lpm_write_32_psm(gxp, psm, PSM_CFG_OFFSET, val);
+ lpm_write_32_psm(gxp, psm, PSM_REG_CFG_OFFSET, val);
/* Start the SW sequence */
- lpm_write_32_psm(gxp, psm, PSM_START_OFFSET, 0x1);
+ lpm_write_32_psm(gxp, psm, PSM_REG_START_OFFSET, 0x1);
/* Wait for LPM init done (0x60041688) */
- while (i && !(lpm_read_32_psm(gxp, psm, PSM_STATUS_OFFSET)
+ while (i && !(lpm_read_32_psm(gxp, psm, PSM_REG_STATUS_OFFSET)
& PSM_INIT_DONE_MASK)) {
udelay(1 * GXP_TIME_DELAY_FACTOR);
i--;
@@ -121,7 +119,7 @@ int gxp_lpm_set_state(struct gxp_dev *gxp, enum gxp_lpm_psm psm, uint target_sta
dev_warn(gxp->dev,
"Forcing a transition to PS%u on core%u, status: %x\n",
target_state, psm,
- lpm_read_32_psm(gxp, psm, PSM_STATUS_OFFSET));
+ lpm_read_32_psm(gxp, psm, PSM_REG_STATUS_OFFSET));
gxp_lpm_enable_state(gxp, psm, target_state);
@@ -138,10 +136,10 @@ int gxp_lpm_set_state(struct gxp_dev *gxp, enum gxp_lpm_psm psm, uint target_sta
gxp->dev,
"Finished forced transition on core %u. target: PS%u, actual: PS%u, status: %x\n",
psm, target_state, gxp_lpm_get_state(gxp, psm),
- lpm_read_32_psm(gxp, psm, PSM_STATUS_OFFSET));
+ lpm_read_32_psm(gxp, psm, PSM_REG_STATUS_OFFSET));
/* Set HW sequencing mode */
- lpm_write_32_psm(gxp, psm, PSM_CFG_OFFSET, LPM_HW_MODE);
+ lpm_write_32_psm(gxp, psm, PSM_REG_CFG_OFFSET, LPM_HW_MODE);
return 0;
}
@@ -162,10 +160,10 @@ static int psm_enable(struct gxp_dev *gxp, enum gxp_lpm_psm psm)
}
/* Write PSM start bit */
- lpm_write_32_psm(gxp, psm, PSM_START_OFFSET, PSM_START);
+ lpm_write_32_psm(gxp, psm, PSM_REG_START_OFFSET, PSM_START);
/* Wait for LPM init done (0x60041688) */
- while (i && !(lpm_read_32_psm(gxp, psm, PSM_STATUS_OFFSET)
+ while (i && !(lpm_read_32_psm(gxp, psm, PSM_REG_STATUS_OFFSET)
& PSM_INIT_DONE_MASK)) {
udelay(1 * GXP_TIME_DELAY_FACTOR);
i--;
@@ -175,7 +173,7 @@ static int psm_enable(struct gxp_dev *gxp, enum gxp_lpm_psm psm)
return 1;
/* Set PSM to HW mode (0x60041680) */
- lpm_write_32_psm(gxp, psm, PSM_CFG_OFFSET, PSM_HW_MODE);
+ lpm_write_32_psm(gxp, psm, PSM_REG_CFG_OFFSET, PSM_HW_MODE);
return 0;
}
@@ -193,8 +191,8 @@ void gxp_lpm_destroy(struct gxp_dev *gxp)
dev_dbg(gxp->dev, "Kicking Top PSM out of ACG\n");
/* Disable all low-power states for TOP */
- lpm_write_32_psm(gxp, LPM_PSM_TOP, LPM_REG_ENABLE_STATE_1, 0x0);
- lpm_write_32_psm(gxp, LPM_PSM_TOP, LPM_REG_ENABLE_STATE_2, 0x0);
+ lpm_write_32_psm(gxp, LPM_PSM_TOP, PSM_REG_ENABLE_STATE1_OFFSET, 0x0);
+ lpm_write_32_psm(gxp, LPM_PSM_TOP, PSM_REG_ENABLE_STATE2_OFFSET, 0x0);
}
int gxp_lpm_up(struct gxp_dev *gxp, uint core)
@@ -220,7 +218,7 @@ int gxp_lpm_up(struct gxp_dev *gxp, uint core)
void gxp_lpm_down(struct gxp_dev *gxp, uint core)
{
- if (gxp_lpm_get_state(gxp, core) == LPM_PG_STATE)
+ if (gxp_lpm_get_state(gxp, CORE_TO_PSM(core)) == LPM_PG_STATE)
return;
/* Enable PS3 (Pwr Gated) */
gxp_lpm_enable_state(gxp, CORE_TO_PSM(core), LPM_PG_STATE);
diff --git a/gxp-lpm.h b/gxp-lpm.h
index 74fdebc..22ae00f 100644
--- a/gxp-lpm.h
+++ b/gxp-lpm.h
@@ -13,13 +13,6 @@
#include "gxp-config.h"
#include "gxp.h"
-enum lpm_psm_csrs {
- LPM_REG_ENABLE_STATE_0 = 0x080,
- LPM_REG_ENABLE_STATE_1 = 0x180,
- LPM_REG_ENABLE_STATE_2 = 0x280,
- LPM_REG_ENABLE_STATE_3 = 0x380,
-};
-
enum lpm_state {
LPM_ACTIVE_STATE = 0,
LPM_CG_STATE = 1,
@@ -27,7 +20,15 @@ enum lpm_state {
LPM_PG_STATE = 3,
};
-#define LPM_STATE_TABLE_SIZE (LPM_REG_ENABLE_STATE_1 - LPM_REG_ENABLE_STATE_0)
+enum psm_reg_offset {
+ PSM_REG_ENABLE_STATE0_OFFSET,
+ PSM_REG_ENABLE_STATE1_OFFSET,
+ PSM_REG_ENABLE_STATE2_OFFSET,
+ PSM_REG_ENABLE_STATE3_OFFSET,
+ PSM_REG_START_OFFSET,
+ PSM_REG_STATUS_OFFSET,
+ PSM_REG_CFG_OFFSET,
+};
#define LPM_INSTRUCTION_OFFSET 0x00000944
#define LPM_INSTRUCTION_MASK 0x03000000
@@ -105,34 +106,47 @@ void gxp_lpm_enable_state(struct gxp_dev *gxp, enum gxp_lpm_psm psm, uint state)
static inline u32 lpm_read_32(struct gxp_dev *gxp, uint reg_offset)
{
- uint offset = GXP_LPM_BASE + reg_offset;
-
- return gxp_read_32(gxp, offset);
+ return readl(gxp->lpm_regs.vaddr + reg_offset);
}
static inline void lpm_write_32(struct gxp_dev *gxp, uint reg_offset, u32 value)
{
- uint offset = GXP_LPM_BASE + reg_offset;
+ writel(value, gxp->lpm_regs.vaddr + reg_offset);
+}
- gxp_write_32(gxp, offset, value);
+static u32 get_reg_offset(struct gxp_dev *gxp, enum psm_reg_offset reg_offset, enum gxp_lpm_psm psm)
+{
+ switch (reg_offset) {
+ case PSM_REG_ENABLE_STATE0_OFFSET:
+ case PSM_REG_ENABLE_STATE1_OFFSET:
+ case PSM_REG_ENABLE_STATE2_OFFSET:
+ case PSM_REG_ENABLE_STATE3_OFFSET:
+ return gxp_lpm_psm_get_state_offset(psm, (uint)reg_offset);
+ case PSM_REG_START_OFFSET:
+ return gxp_lpm_psm_get_start_offset(psm);
+ case PSM_REG_STATUS_OFFSET:
+ return gxp_lpm_psm_get_status_offset(psm);
+ case PSM_REG_CFG_OFFSET:
+ return gxp_lpm_psm_get_cfg_offset(psm);
+ }
+
+ return 0;
}
static inline u32 lpm_read_32_psm(struct gxp_dev *gxp, enum gxp_lpm_psm psm,
- uint reg_offset)
+ enum psm_reg_offset reg_offset)
{
- uint offset =
- GXP_LPM_PSM_0_BASE + (GXP_LPM_PSM_SIZE * psm) + reg_offset;
+ uint offset = get_reg_offset(gxp, reg_offset, psm);
- return gxp_read_32(gxp, offset);
+ return lpm_read_32(gxp, offset);
}
static inline void lpm_write_32_psm(struct gxp_dev *gxp, enum gxp_lpm_psm psm,
- uint reg_offset, u32 value)
+ enum psm_reg_offset reg_offset, u32 value)
{
- uint offset =
- GXP_LPM_PSM_0_BASE + (GXP_LPM_PSM_SIZE * psm) + reg_offset;
+ u32 offset = get_reg_offset(gxp, reg_offset, psm);
- gxp_write_32(gxp, offset, value);
+ lpm_write_32(gxp, offset, value);
}
#endif /* __GXP_LPM_H__ */
diff --git a/gxp-mcu-fs.c b/gxp-mcu-fs.c
new file mode 100644
index 0000000..8805cc6
--- /dev/null
+++ b/gxp-mcu-fs.c
@@ -0,0 +1,250 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Common file system operations for devices with MCU support.
+ *
+ * Copyright (C) 2022 Google LLC
+ */
+
+#include <linux/fs.h>
+#include <linux/mm_types.h>
+#include <linux/rwsem.h>
+
+#include <gcip/gcip-telemetry.h>
+
+#include "gxp-client.h"
+#include "gxp-internal.h"
+#include "gxp-mcu-fs.h"
+#include "gxp-mcu-telemetry.h"
+#include "gxp-mcu.h"
+#include "gxp-uci.h"
+#include "gxp.h"
+
+static int gxp_ioctl_uci_command_helper(struct gxp_client *client,
+ struct gxp_mailbox_command_ioctl *ibuf)
+{
+ struct gxp_dev *gxp = client->gxp;
+ struct gxp_mcu *mcu = gxp_mcu_of(gxp);
+ struct gxp_uci_command cmd;
+ int ret;
+
+ if (ibuf->virtual_core_id >= GXP_NUM_CORES)
+ return -EINVAL;
+ down_read(&client->semaphore);
+
+ if (!gxp_client_has_available_vd(client, "GXP_MAILBOX_COMMAND")) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ /* Caller must hold BLOCK wakelock */
+ if (!client->has_block_wakelock) {
+ dev_err(gxp->dev,
+ "GXP_MAILBOX_COMMAND requires the client hold a BLOCK wakelock\n");
+ ret = -ENODEV;
+ goto out;
+ }
+
+ /* Use at least one core for the command */
+ if (ibuf->num_cores == 0)
+ ibuf->num_cores = 1;
+
+ /* Pack the command structure */
+ cmd.core_command_params.address = ibuf->device_address;
+ cmd.core_command_params.size = ibuf->size;
+ cmd.core_command_params.num_cores = ibuf->num_cores;
+ /* Plus 1 to align with power states in MCU firmware. */
+ cmd.core_command_params.dsp_operating_point = ibuf->gxp_power_state + 1;
+ cmd.core_command_params.memory_operating_point =
+ ibuf->memory_power_state;
+ /* cmd.seq is assigned by mailbox implementation */
+ cmd.type = CORE_COMMAND;
+
+ /* TODO(b/248179414): Remove core assignment when MCU fw re-enable sticky core scheduler. */
+ {
+ int core;
+
+ down_read(&gxp->vd_semaphore);
+ core = gxp_vd_virt_core_to_phys_core(client->vd,
+ ibuf->virtual_core_id);
+ up_read(&gxp->vd_semaphore);
+ if (core < 0) {
+ dev_err(gxp->dev,
+ "Mailbox command failed: Invalid virtual core id (%u)\n",
+ ibuf->virtual_core_id);
+ ret = -EINVAL;
+ goto out;
+ }
+ cmd.priority = core;
+ }
+
+ cmd.client_id = client->vd->client_id;
+
+ /*
+ * TODO(b/248196344): Use the only one permitted eventfd for the virtual device
+ * when MCU fw re-enable sticky core scheduler.
+ */
+ ret = gxp_uci_send_command(
+ &mcu->uci, client->vd, &cmd,
+ &client->vd->mailbox_resp_queues[UCI_RESOURCE_ID].queue,
+ &client->vd->mailbox_resp_queues[UCI_RESOURCE_ID].lock,
+ &client->vd->mailbox_resp_queues[UCI_RESOURCE_ID].waitq,
+ client->mb_eventfds[ibuf->virtual_core_id]);
+ if (ret) {
+ dev_err(gxp->dev,
+ "Failed to enqueue mailbox command (ret=%d)\n", ret);
+ goto out;
+ }
+ ibuf->sequence_number = cmd.seq;
+
+out:
+ up_read(&client->semaphore);
+ return ret;
+}
+
+static int gxp_ioctl_uci_command(struct gxp_client *client,
+ struct gxp_mailbox_command_ioctl __user *argp)
+{
+ struct gxp_mailbox_command_ioctl ibuf;
+ int ret;
+
+ if (copy_from_user(&ibuf, argp, sizeof(ibuf)))
+ return -EFAULT;
+
+ ret = gxp_ioctl_uci_command_helper(client, &ibuf);
+ if (ret)
+ return ret;
+
+ if (copy_to_user(argp, &ibuf, sizeof(ibuf)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int
+gxp_ioctl_uci_response(struct gxp_client *client,
+ struct gxp_mailbox_response_ioctl __user *argp)
+{
+ struct gxp_mailbox_response_ioctl ibuf;
+ int ret = 0;
+
+ if (copy_from_user(&ibuf, argp, sizeof(ibuf)))
+ return -EFAULT;
+
+ down_read(&client->semaphore);
+
+ if (!gxp_client_has_available_vd(client, "GXP_MAILBOX_RESPONSE")) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ /* Caller must hold BLOCK wakelock */
+ if (!client->has_block_wakelock) {
+ dev_err(client->gxp->dev,
+ "GXP_MAILBOX_RESPONSE requires the client hold a BLOCK wakelock\n");
+ ret = -ENODEV;
+ goto out;
+ }
+
+ ret = gxp_uci_wait_async_response(
+ &client->vd->mailbox_resp_queues[UCI_RESOURCE_ID],
+ &ibuf.sequence_number, &ibuf.cmd_retval, &ibuf.error_code);
+ if (ret)
+ goto out;
+
+ if (copy_to_user(argp, &ibuf, sizeof(ibuf)))
+ ret = -EFAULT;
+
+out:
+ up_read(&client->semaphore);
+
+ return ret;
+}
+
+static inline enum gcip_telemetry_type to_gcip_telemetry_type(u8 type)
+{
+ if (type == GXP_TELEMETRY_TYPE_LOGGING)
+ return GCIP_TELEMETRY_LOG;
+ else
+ return GCIP_TELEMETRY_TRACE;
+}
+
+static int gxp_register_mcu_telemetry_eventfd(
+ struct gxp_client *client,
+ struct gxp_register_telemetry_eventfd_ioctl __user *argp)
+{
+ struct gxp_mcu *mcu = gxp_mcu_of(client->gxp);
+ struct gxp_register_telemetry_eventfd_ioctl ibuf;
+
+ if (copy_from_user(&ibuf, argp, sizeof(ibuf)))
+ return -EFAULT;
+
+ return gxp_mcu_telemetry_register_eventfd(
+ mcu, to_gcip_telemetry_type(ibuf.type), ibuf.eventfd);
+}
+
+static int gxp_unregister_mcu_telemetry_eventfd(
+ struct gxp_client *client,
+ struct gxp_register_telemetry_eventfd_ioctl __user *argp)
+{
+ struct gxp_mcu *mcu = gxp_mcu_of(client->gxp);
+ struct gxp_register_telemetry_eventfd_ioctl ibuf;
+
+ if (copy_from_user(&ibuf, argp, sizeof(ibuf)))
+ return -EFAULT;
+
+ return gxp_mcu_telemetry_unregister_eventfd(
+ mcu, to_gcip_telemetry_type(ibuf.type));
+}
+
+long gxp_mcu_ioctl(struct file *file, uint cmd, ulong arg)
+{
+ struct gxp_client *client = file->private_data;
+ void __user *argp = (void __user *)arg;
+ long ret;
+
+ if (gxp_is_direct_mode(client->gxp))
+ return -ENOTTY;
+ switch (cmd) {
+ case GXP_MAILBOX_COMMAND:
+ ret = gxp_ioctl_uci_command(client, argp);
+ break;
+ case GXP_MAILBOX_RESPONSE:
+ ret = gxp_ioctl_uci_response(client, argp);
+ break;
+ case GXP_REGISTER_MCU_TELEMETRY_EVENTFD:
+ ret = gxp_register_mcu_telemetry_eventfd(client, argp);
+ break;
+ case GXP_UNREGISTER_MCU_TELEMETRY_EVENTFD:
+ ret = gxp_unregister_mcu_telemetry_eventfd(client, argp);
+ break;
+ default:
+ ret = -ENOTTY; /* unknown command */
+ }
+
+ return ret;
+}
+
+int gxp_mcu_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct gxp_client *client = file->private_data;
+ struct gxp_mcu *mcu = gxp_mcu_of(client->gxp);
+ int ret;
+
+ if (gxp_is_direct_mode(client->gxp))
+ return -EOPNOTSUPP;
+
+ switch (vma->vm_pgoff << PAGE_SHIFT) {
+ case GXP_MMAP_MCU_LOG_BUFFER_OFFSET:
+ ret = gxp_mcu_telemetry_mmap_buffer(mcu, GCIP_TELEMETRY_LOG,
+ vma);
+ break;
+ case GXP_MMAP_MCU_TRACE_BUFFER_OFFSET:
+ ret = gxp_mcu_telemetry_mmap_buffer(mcu, GCIP_TELEMETRY_TRACE,
+ vma);
+ break;
+ default:
+ ret = -EOPNOTSUPP; /* unknown offset */
+ }
+
+ return ret;
+}
diff --git a/gxp-mcu-fs.h b/gxp-mcu-fs.h
new file mode 100644
index 0000000..75b55c7
--- /dev/null
+++ b/gxp-mcu-fs.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Common file system operations for devices with MCU support.
+ *
+ * Copyright (C) 2022 Google LLC
+ */
+
+#ifndef __GXP_MCU_FS_H__
+#define __GXP_MCU_FS_H__
+
+#include <linux/fs.h>
+#include <linux/mm_types.h>
+
+/**
+ * gxp_mcu_ioctl() - Handles ioctl calls that are meaningful for devices with
+ * MCU support.
+ *
+ * Return:
+ * * -ENOTTY - The call is not handled - either the command is unrecognized
+ * or the driver is running in direct mode.
+ * * Otherwise - Returned by individual command handlers.
+ */
+long gxp_mcu_ioctl(struct file *file, uint cmd, ulong arg);
+
+/**
+ * gxp_mcu_mmap() - Handles mmap calls that are meaningful for devices with
+ * MCU support.
+ *
+ * Return:
+ * * -EOPNOTSUPP - The call is not handled - either the offset is unrecognized
+ * or the driver is running in direct mode.
+ * * Otherwise - Returned by individual command handlers.
+ */
+int gxp_mcu_mmap(struct file *file, struct vm_area_struct *vma);
+
+#endif /* __GXP_MCU_FS_H__ */
diff --git a/gxp-pm.c b/gxp-pm.c
index bd59bb9..33b2834 100644
--- a/gxp-pm.c
+++ b/gxp-pm.c
@@ -669,6 +669,11 @@ out:
return ret;
}
+int gxp_pm_update_pm_qos(struct gxp_dev *gxp, s32 int_val, s32 mif_val)
+{
+ return gxp_pm_req_pm_qos(gxp, int_val, mif_val);
+}
+
int gxp_pm_init(struct gxp_dev *gxp)
{
struct gxp_power_manager *mgr;
diff --git a/gxp-pm.h b/gxp-pm.h
index b1425f5..acf9205 100644
--- a/gxp-pm.h
+++ b/gxp-pm.h
@@ -119,7 +119,7 @@ struct gxp_power_manager {
/* Last requested clock mux state */
bool last_scheduled_low_clkmux;
int curr_state;
- int curr_memory_state;
+ int curr_memory_state; /* Note: this state will not be maintained in the MCU mode. */
struct gxp_pm_device_ops *ops;
struct gxp_set_acpm_state_work
set_acpm_state_work[AUR_NUM_POWER_STATE_WORKER];
@@ -264,6 +264,22 @@ int gxp_pm_update_requested_power_states(struct gxp_dev *gxp,
struct gxp_power_states origin_states,
struct gxp_power_states requested_states);
+/**
+ * gxp_pm_update_pm_qos() - API for updating the memory power state but passing the values of
+ * INT and MIF frequencies directly. This function will ignore the vote ratings and update the
+ * frequencies right away.
+ * @gxp: The GXP device to operate.
+ * @int_val: The value of INT frequency.
+ * @mif_val: The value of MIF frequency.
+ *
+ * Note: This function will not update the @curr_memory_state of gxp_power_manager.
+ *
+ * Return:
+ * * 0 - The memory power state has been changed
+ * * -EINVAL - Invalid requested state
+ */
+int gxp_pm_update_pm_qos(struct gxp_dev *gxp, s32 int_val, s32 mif_val);
+
/*
* gxp_pm_force_clkmux_normal() - Force PLL_CON0_NOC_USER and PLL_CON0_PLL_AUR MUX
* switch to the normal state. This is required to guarantee LPM works when the core
diff --git a/gxp-vd.c b/gxp-vd.c
index 1996425..0456d0a 100644
--- a/gxp-vd.c
+++ b/gxp-vd.c
@@ -59,8 +59,8 @@ static int map_core_telemetry_buffers(struct gxp_dev *gxp,
return 0;
mutex_lock(&gxp->core_telemetry_mgr->lock);
- data[0] = gxp->core_telemetry_mgr->logging_buff_data;
- data[1] = gxp->core_telemetry_mgr->tracing_buff_data;
+ data[0] = gxp->core_telemetry_mgr->logging_buff_data_legacy;
+ data[1] = gxp->core_telemetry_mgr->tracing_buff_data_legacy;
for (i = 0; i < ARRAY_SIZE(data); i++) {
if (!data[i] || !data[i]->is_enabled)
@@ -111,8 +111,8 @@ static void unmap_core_telemetry_buffers(struct gxp_dev *gxp,
if (!gxp->core_telemetry_mgr)
return;
mutex_lock(&gxp->core_telemetry_mgr->lock);
- data[0] = gxp->core_telemetry_mgr->logging_buff_data;
- data[1] = gxp->core_telemetry_mgr->tracing_buff_data;
+ data[0] = gxp->core_telemetry_mgr->logging_buff_data_legacy;
+ data[1] = gxp->core_telemetry_mgr->tracing_buff_data_legacy;
for (i = 0; i < ARRAY_SIZE(data); i++) {
if (!data[i] || !data[i]->is_enabled)
diff --git a/gxp.h b/gxp.h
index 35894c7..d4fb160 100644
--- a/gxp.h
+++ b/gxp.h
@@ -17,21 +17,21 @@
#define GXP_INTERFACE_VERSION_BUILD 0
/*
- * mmap offsets for core logging and tracing buffers
+ * Legacy mmap offsets for core logging and tracing buffers
* Requested size will be divided evenly among all cores. The whole buffer
* must be page-aligned, and the size of each core's buffer must be a multiple
* of PAGE_SIZE.
*/
-#define GXP_MMAP_CORE_LOG_BUFFER_OFFSET 0x10000
-#define GXP_MMAP_CORE_TRACE_BUFFER_OFFSET 0x20000
+#define GXP_MMAP_CORE_LOG_BUFFER_OFFSET_LEGACY 0x10000
+#define GXP_MMAP_CORE_TRACE_BUFFER_OFFSET_LEGACY 0x20000
/* mmap offsets for MCU logging and tracing buffers */
#define GXP_MMAP_MCU_LOG_BUFFER_OFFSET 0x30000
#define GXP_MMAP_MCU_TRACE_BUFFER_OFFSET 0x40000
-/* For backward compatibility. */
-#define GXP_MMAP_LOG_BUFFER_OFFSET GXP_MMAP_CORE_LOG_BUFFER_OFFSET
-#define GXP_MMAP_TRACE_BUFFER_OFFSET GXP_MMAP_CORE_TRACE_BUFFER_OFFSET
+/* mmap offsets for core logging and tracing buffers */
+#define GXP_MMAP_CORE_LOG_BUFFER_OFFSET 0x50000
+#define GXP_MMAP_CORE_TRACE_BUFFER_OFFSET 0x60000
#define GXP_IOCTL_BASE 0xEE