depends on ARM || ARM64
select RESET_CONTROLLER
-config QCOM_SCM_32
- def_bool y
- depends on QCOM_SCM && ARM
-
-config QCOM_SCM_64
- def_bool y
- depends on QCOM_SCM && ARM64
-
config QCOM_SCM_DOWNLOAD_MODE_DEFAULT
bool "Qualcomm download mode enabled by default"
depends on QCOM_SCM
obj-$(CONFIG_FIRMWARE_MEMMAP) += memmap.o
obj-$(CONFIG_RASPBERRYPI_FIRMWARE) += raspberrypi.o
obj-$(CONFIG_FW_CFG_SYSFS) += qemu_fw_cfg.o
-obj-$(CONFIG_QCOM_SCM) += qcom_scm.o
-obj-$(CONFIG_QCOM_SCM_64) += qcom_scm-64.o
-obj-$(CONFIG_QCOM_SCM_32) += qcom_scm-32.o
+obj-$(CONFIG_QCOM_SCM) += qcom_scm.o qcom_scm-smc.o qcom_scm-legacy.o
obj-$(CONFIG_TI_SCI_PROTOCOL) += ti_sci.o
obj-$(CONFIG_TRUSTED_FOUNDATIONS) += trusted_foundations.o
obj-$(CONFIG_TURRIS_MOX_RWTM) += turris-mox-rwtm.o
+++ /dev/null
-// SPDX-License-Identifier: GPL-2.0-only
-/* Copyright (c) 2010,2015,2019 The Linux Foundation. All rights reserved.
- * Copyright (C) 2015 Linaro Ltd.
- */
-
-#include <linux/slab.h>
-#include <linux/io.h>
-#include <linux/module.h>
-#include <linux/mutex.h>
-#include <linux/errno.h>
-#include <linux/err.h>
-#include <linux/qcom_scm.h>
-#include <linux/arm-smccc.h>
-#include <linux/dma-mapping.h>
-
-#include "qcom_scm.h"
-
-static DEFINE_MUTEX(qcom_scm_lock);
-
-
-/**
- * struct arm_smccc_args
- * @args: The array of values used in registers in smc instruction
- */
-struct arm_smccc_args {
- unsigned long args[8];
-};
-
-#define SCM_LEGACY_FNID(s, c) (((s) << 10) | ((c) & 0x3ff))
-
-/**
- * struct scm_legacy_command - one SCM command buffer
- * @len: total available memory for command and response
- * @buf_offset: start of command buffer
- * @resp_hdr_offset: start of response buffer
- * @id: command to be executed
- * @buf: buffer returned from scm_legacy_get_command_buffer()
- *
- * An SCM command is laid out in memory as follows:
- *
- * ------------------- <--- struct scm_legacy_command
- * | command header |
- * ------------------- <--- scm_legacy_get_command_buffer()
- * | command buffer |
- * ------------------- <--- struct scm_legacy_response and
- * | response header | scm_legacy_command_to_response()
- * ------------------- <--- scm_legacy_get_response_buffer()
- * | response buffer |
- * -------------------
- *
- * There can be arbitrary padding between the headers and buffers so
- * you should always use the appropriate scm_legacy_get_*_buffer() routines
- * to access the buffers in a safe manner.
- */
-struct scm_legacy_command {
- __le32 len;
- __le32 buf_offset;
- __le32 resp_hdr_offset;
- __le32 id;
- __le32 buf[0];
-};
-
-/**
- * struct scm_legacy_response - one SCM response buffer
- * @len: total available memory for response
- * @buf_offset: start of response data relative to start of scm_legacy_response
- * @is_complete: indicates if the command has finished processing
- */
-struct scm_legacy_response {
- __le32 len;
- __le32 buf_offset;
- __le32 is_complete;
-};
-
-/**
- * scm_legacy_command_to_response() - Get a pointer to a scm_legacy_response
- * @cmd: command
- *
- * Returns a pointer to a response for a command.
- */
-static inline struct scm_legacy_response *scm_legacy_command_to_response(
- const struct scm_legacy_command *cmd)
-{
- return (void *)cmd + le32_to_cpu(cmd->resp_hdr_offset);
-}
-
-/**
- * scm_legacy_get_command_buffer() - Get a pointer to a command buffer
- * @cmd: command
- *
- * Returns a pointer to the command buffer of a command.
- */
-static inline void *scm_legacy_get_command_buffer(
- const struct scm_legacy_command *cmd)
-{
- return (void *)cmd->buf;
-}
-
-/**
- * scm_legacy_get_response_buffer() - Get a pointer to a response buffer
- * @rsp: response
- *
- * Returns a pointer to a response buffer of a response.
- */
-static inline void *scm_legacy_get_response_buffer(
- const struct scm_legacy_response *rsp)
-{
- return (void *)rsp + le32_to_cpu(rsp->buf_offset);
-}
-
-static void __scm_legacy_do(const struct arm_smccc_args *smc,
- struct arm_smccc_res *res)
-{
- do {
- arm_smccc_smc(smc->args[0], smc->args[1], smc->args[2],
- smc->args[3], smc->args[4], smc->args[5],
- smc->args[6], smc->args[7], res);
- } while (res->a0 == QCOM_SCM_INTERRUPTED);
-}
-
-/**
- * qcom_scm_call() - Sends a command to the SCM and waits for the command to
- * finish processing.
- *
- * A note on cache maintenance:
- * Note that any buffers that are expected to be accessed by the secure world
- * must be flushed before invoking qcom_scm_call and invalidated in the cache
- * immediately after qcom_scm_call returns. Cache maintenance on the command
- * and response buffers is taken care of by qcom_scm_call; however, callers are
- * responsible for any other cached buffers passed over to the secure world.
- */
-int qcom_scm_call(struct device *dev, const struct qcom_scm_desc *desc,
- struct qcom_scm_res *res)
-{
- u8 arglen = desc->arginfo & 0xf;
- int ret = 0, context_id;
- unsigned int i;
- struct scm_legacy_command *cmd;
- struct scm_legacy_response *rsp;
- struct arm_smccc_args smc = {0};
- struct arm_smccc_res smc_res;
- const size_t cmd_len = arglen * sizeof(__le32);
- const size_t resp_len = MAX_QCOM_SCM_RETS * sizeof(__le32);
- size_t alloc_len = sizeof(*cmd) + cmd_len + sizeof(*rsp) + resp_len;
- dma_addr_t cmd_phys;
- __le32 *arg_buf;
- const __le32 *res_buf;
-
- cmd = kzalloc(PAGE_ALIGN(alloc_len), GFP_KERNEL);
- if (!cmd)
- return -ENOMEM;
-
- cmd->len = cpu_to_le32(alloc_len);
- cmd->buf_offset = cpu_to_le32(sizeof(*cmd));
- cmd->resp_hdr_offset = cpu_to_le32(sizeof(*cmd) + cmd_len);
- cmd->id = cpu_to_le32(SCM_LEGACY_FNID(desc->svc, desc->cmd));
-
- arg_buf = scm_legacy_get_command_buffer(cmd);
- for (i = 0; i < arglen; i++)
- arg_buf[i] = cpu_to_le32(desc->args[i]);
-
- rsp = scm_legacy_command_to_response(cmd);
-
- cmd_phys = dma_map_single(dev, cmd, alloc_len, DMA_TO_DEVICE);
- if (dma_mapping_error(dev, cmd_phys)) {
- kfree(cmd);
- return -ENOMEM;
- }
-
- smc.args[0] = 1;
- smc.args[1] = (unsigned long)&context_id;
- smc.args[2] = cmd_phys;
-
- mutex_lock(&qcom_scm_lock);
- __scm_legacy_do(&smc, &smc_res);
- if (smc_res.a0)
- ret = qcom_scm_remap_error(smc_res.a0);
- mutex_unlock(&qcom_scm_lock);
- if (ret)
- goto out;
-
- do {
- dma_sync_single_for_cpu(dev, cmd_phys + sizeof(*cmd) + cmd_len,
- sizeof(*rsp), DMA_FROM_DEVICE);
- } while (!rsp->is_complete);
-
- dma_sync_single_for_cpu(dev, cmd_phys + sizeof(*cmd) + cmd_len +
- le32_to_cpu(rsp->buf_offset),
- resp_len, DMA_FROM_DEVICE);
-
- if (res) {
- res_buf = scm_legacy_get_response_buffer(rsp);
- for (i = 0; i < MAX_QCOM_SCM_RETS; i++)
- res->result[i] = le32_to_cpu(res_buf[i]);
- }
-out:
- dma_unmap_single(dev, cmd_phys, alloc_len, DMA_TO_DEVICE);
- kfree(cmd);
- return ret;
-}
-
-#define SCM_LEGACY_ATOMIC_N_REG_ARGS 5
-#define SCM_LEGACY_ATOMIC_FIRST_REG_IDX 2
-#define SCM_LEGACY_CLASS_REGISTER (0x2 << 8)
-#define SCM_LEGACY_MASK_IRQS BIT(5)
-#define SCM_LEGACY_ATOMIC_ID(svc, cmd, n) \
- ((SCM_LEGACY_FNID(svc, cmd) << 12) | \
- SCM_LEGACY_CLASS_REGISTER | \
- SCM_LEGACY_MASK_IRQS | \
- (n & 0xf))
-
-/**
- * qcom_scm_call_atomic() - Send an atomic SCM command with up to 5 arguments
- * and 3 return values
- * @desc: SCM call descriptor containing arguments
- * @res: SCM call return values
- *
- * This shall only be used with commands that are guaranteed to be
- * uninterruptable, atomic and SMP safe.
- */
-int qcom_scm_call_atomic(struct device *unused,
- const struct qcom_scm_desc *desc,
- struct qcom_scm_res *res)
-{
- int context_id;
- struct arm_smccc_res smc_res;
- size_t arglen = desc->arginfo & 0xf;
-
- BUG_ON(arglen > SCM_LEGACY_ATOMIC_N_REG_ARGS);
-
- arm_smccc_smc(SCM_LEGACY_ATOMIC_ID(desc->svc, desc->cmd, arglen),
- (unsigned long)&context_id,
- desc->args[0], desc->args[1], desc->args[2],
- desc->args[3], desc->args[4], 0, &smc_res);
-
- if (res) {
- res->result[0] = smc_res.a1;
- res->result[1] = smc_res.a2;
- res->result[2] = smc_res.a3;
- }
-
- return smc_res.a0;
-}
-
-int __qcom_scm_is_call_available(struct device *dev, u32 svc_id, u32 cmd_id)
-{
- int ret;
- struct qcom_scm_desc desc = {
- .svc = QCOM_SCM_SVC_INFO,
- .cmd = QCOM_SCM_INFO_IS_CALL_AVAIL,
- .args[0] = SCM_LEGACY_FNID(svc_id, cmd_id),
- .arginfo = QCOM_SCM_ARGS(1),
- };
- struct qcom_scm_res res;
-
- ret = qcom_scm_call(dev, &desc, &res);
-
- return ret ? : res.result[0];
-}
-
-void __qcom_scm_init(void)
-{
-}
+++ /dev/null
-// SPDX-License-Identifier: GPL-2.0-only
-/* Copyright (c) 2015,2019 The Linux Foundation. All rights reserved.
- */
-
-#include <linux/io.h>
-#include <linux/errno.h>
-#include <linux/delay.h>
-#include <linux/mutex.h>
-#include <linux/slab.h>
-#include <linux/types.h>
-#include <linux/qcom_scm.h>
-#include <linux/arm-smccc.h>
-#include <linux/dma-mapping.h>
-
-#include "qcom_scm.h"
-
-#define SCM_SMC_FNID(s, c) ((((s) & 0xFF) << 8) | ((c) & 0xFF))
-
-/**
- * struct arm_smccc_args
- * @args: The array of values used in registers in smc instruction
- */
-struct arm_smccc_args {
- unsigned long args[8];
-};
-
-static u64 qcom_smccc_convention = -1;
-static DEFINE_MUTEX(qcom_scm_lock);
-
-#define QCOM_SCM_EBUSY_WAIT_MS 30
-#define QCOM_SCM_EBUSY_MAX_RETRY 20
-
-#define SCM_SMC_N_REG_ARGS 4
-#define SCM_SMC_FIRST_EXT_IDX (SCM_SMC_N_REG_ARGS - 1)
-#define SCM_SMC_N_EXT_ARGS (MAX_QCOM_SCM_ARGS - SCM_SMC_N_REG_ARGS + 1)
-#define SCM_SMC_FIRST_REG_IDX 2
-#define SCM_SMC_LAST_REG_IDX (SCM_SMC_FIRST_REG_IDX + SCM_SMC_N_REG_ARGS - 1)
-
-static void __scm_smc_do_quirk(const struct arm_smccc_args *smc,
- struct arm_smccc_res *res)
-{
- unsigned long a0 = smc->args[0];
- struct arm_smccc_quirk quirk = { .id = ARM_SMCCC_QUIRK_QCOM_A6 };
-
- quirk.state.a6 = 0;
-
- do {
- arm_smccc_smc_quirk(a0, smc->args[1], smc->args[2],
- smc->args[3], smc->args[4], smc->args[5],
- quirk.state.a6, smc->args[7], res, &quirk);
-
- if (res->a0 == QCOM_SCM_INTERRUPTED)
- a0 = res->a0;
-
- } while (res->a0 == QCOM_SCM_INTERRUPTED);
-}
-
-static void __scm_smc_do(const struct arm_smccc_args *smc,
- struct arm_smccc_res *res, bool atomic)
-{
- int retry_count = 0;
-
- if (atomic) {
- __scm_smc_do_quirk(smc, res);
- return;
- }
-
- do {
- mutex_lock(&qcom_scm_lock);
-
- __scm_smc_do_quirk(smc, res);
-
- mutex_unlock(&qcom_scm_lock);
-
- if (res->a0 == QCOM_SCM_V2_EBUSY) {
- if (retry_count++ > QCOM_SCM_EBUSY_MAX_RETRY)
- break;
- msleep(QCOM_SCM_EBUSY_WAIT_MS);
- }
- } while (res->a0 == QCOM_SCM_V2_EBUSY);
-}
-
-static int __scm_smc_call(struct device *dev, const struct qcom_scm_desc *desc,
- struct qcom_scm_res *res, bool atomic)
-{
- int arglen = desc->arginfo & 0xf;
- int i;
- dma_addr_t args_phys = 0;
- void *args_virt = NULL;
- size_t alloc_len;
- gfp_t flag = atomic ? GFP_ATOMIC : GFP_KERNEL;
- u32 smccc_call_type = atomic ? ARM_SMCCC_FAST_CALL : ARM_SMCCC_STD_CALL;
- struct arm_smccc_res smc_res;
- struct arm_smccc_args smc = {0};
-
- smc.args[0] = ARM_SMCCC_CALL_VAL(
- smccc_call_type,
- qcom_smccc_convention,
- desc->owner,
- SCM_SMC_FNID(desc->svc, desc->cmd));
- smc.args[1] = desc->arginfo;
- for (i = 0; i < SCM_SMC_N_REG_ARGS; i++)
- smc.args[i + SCM_SMC_FIRST_REG_IDX] = desc->args[i];
-
- if (unlikely(arglen > SCM_SMC_N_REG_ARGS)) {
- alloc_len = SCM_SMC_N_EXT_ARGS * sizeof(u64);
- args_virt = kzalloc(PAGE_ALIGN(alloc_len), flag);
-
- if (!args_virt)
- return -ENOMEM;
-
- if (qcom_smccc_convention == ARM_SMCCC_SMC_32) {
- __le32 *args = args_virt;
-
- for (i = 0; i < SCM_SMC_N_EXT_ARGS; i++)
- args[i] = cpu_to_le32(desc->args[i +
- SCM_SMC_FIRST_EXT_IDX]);
- } else {
- __le64 *args = args_virt;
-
- for (i = 0; i < SCM_SMC_N_EXT_ARGS; i++)
- args[i] = cpu_to_le64(desc->args[i +
- SCM_SMC_FIRST_EXT_IDX]);
- }
-
- args_phys = dma_map_single(dev, args_virt, alloc_len,
- DMA_TO_DEVICE);
-
- if (dma_mapping_error(dev, args_phys)) {
- kfree(args_virt);
- return -ENOMEM;
- }
-
- smc.args[SCM_SMC_LAST_REG_IDX] = args_phys;
- }
-
- __scm_smc_do(&smc, &smc_res, atomic);
-
- if (args_virt) {
- dma_unmap_single(dev, args_phys, alloc_len, DMA_TO_DEVICE);
- kfree(args_virt);
- }
-
- if (res) {
- res->result[0] = smc_res.a1;
- res->result[1] = smc_res.a2;
- res->result[2] = smc_res.a3;
- }
-
- return (long)smc_res.a0 ? qcom_scm_remap_error(smc_res.a0) : 0;
-}
-
-/**
- * qcom_scm_call() - Invoke a syscall in the secure world
- * @dev: device
- * @svc_id: service identifier
- * @cmd_id: command identifier
- * @desc: Descriptor structure containing arguments and return values
- *
- * Sends a command to the SCM and waits for the command to finish processing.
- * This should *only* be called in pre-emptible context.
- */
-int qcom_scm_call(struct device *dev, const struct qcom_scm_desc *desc,
- struct qcom_scm_res *res)
-{
- might_sleep();
- return __scm_smc_call(dev, desc, res, false);
-}
-
-/**
- * qcom_scm_call_atomic() - atomic variation of qcom_scm_call()
- * @dev: device
- * @svc_id: service identifier
- * @cmd_id: command identifier
- * @desc: Descriptor structure containing arguments and return values
- * @res: Structure containing results from SMC/HVC call
- *
- * Sends a command to the SCM and waits for the command to finish processing.
- * This can be called in atomic context.
- */
-int qcom_scm_call_atomic(struct device *dev, const struct qcom_scm_desc *desc,
- struct qcom_scm_res *res)
-{
- return __scm_smc_call(dev, desc, res, true);
-}
-
-int __qcom_scm_is_call_available(struct device *dev, u32 svc_id, u32 cmd_id)
-{
- int ret;
- struct qcom_scm_desc desc = {
- .svc = QCOM_SCM_SVC_INFO,
- .cmd = QCOM_SCM_INFO_IS_CALL_AVAIL,
- .owner = ARM_SMCCC_OWNER_SIP,
- };
- struct qcom_scm_res res;
-
- desc.arginfo = QCOM_SCM_ARGS(1);
- desc.args[0] = SCM_SMC_FNID(svc_id, cmd_id) |
- (ARM_SMCCC_OWNER_SIP << ARM_SMCCC_OWNER_SHIFT);
-
- ret = qcom_scm_call(dev, &desc, &res);
-
- return ret ? : res.result[0];
-}
-
-void __qcom_scm_init(void)
-{
- struct qcom_scm_desc desc = {
- .svc = QCOM_SCM_SVC_INFO,
- .cmd = QCOM_SCM_INFO_IS_CALL_AVAIL,
- .args[0] = SCM_SMC_FNID(QCOM_SCM_SVC_INFO,
- QCOM_SCM_INFO_IS_CALL_AVAIL) |
- (ARM_SMCCC_OWNER_SIP << ARM_SMCCC_OWNER_SHIFT),
- .arginfo = QCOM_SCM_ARGS(1),
- .owner = ARM_SMCCC_OWNER_SIP,
- };
- struct qcom_scm_res res;
- int ret;
-
- qcom_smccc_convention = ARM_SMCCC_SMC_64;
- // Device isn't required as there is only one argument - no device
- // needed to dma_map_single to secure world
- ret = qcom_scm_call_atomic(NULL, &desc, &res);
- if (!ret && res.result[0] == 1)
- goto out;
-
- qcom_smccc_convention = ARM_SMCCC_SMC_32;
- ret = qcom_scm_call_atomic(NULL, &desc, &res);
- if (!ret && res.result[0] == 1)
- goto out;
-
- qcom_smccc_convention = -1;
- BUG();
-out:
- pr_info("QCOM SCM SMC Convention: %lld\n", qcom_smccc_convention);
-}
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2010,2015,2019 The Linux Foundation. All rights reserved.
+ * Copyright (C) 2015 Linaro Ltd.
+ */
+
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/qcom_scm.h>
+#include <linux/arm-smccc.h>
+#include <linux/dma-mapping.h>
+
+#include "qcom_scm.h"
+
+static DEFINE_MUTEX(qcom_scm_lock);
+
+
+/**
+ * struct arm_smccc_args
+ * @args: The array of values used in registers in smc instruction
+ */
+struct arm_smccc_args {
+ unsigned long args[8];
+};
+
+
+/**
+ * struct scm_legacy_command - one SCM command buffer
+ * @len: total available memory for command and response
+ * @buf_offset: start of command buffer
+ * @resp_hdr_offset: start of response buffer
+ * @id: command to be executed
+ * @buf: buffer returned from scm_legacy_get_command_buffer()
+ *
+ * An SCM command is laid out in memory as follows:
+ *
+ * ------------------- <--- struct scm_legacy_command
+ * | command header |
+ * ------------------- <--- scm_legacy_get_command_buffer()
+ * | command buffer |
+ * ------------------- <--- struct scm_legacy_response and
+ * | response header | scm_legacy_command_to_response()
+ * ------------------- <--- scm_legacy_get_response_buffer()
+ * | response buffer |
+ * -------------------
+ *
+ * There can be arbitrary padding between the headers and buffers so
+ * you should always use the appropriate scm_legacy_get_*_buffer() routines
+ * to access the buffers in a safe manner.
+ */
+struct scm_legacy_command {
+ __le32 len;
+ __le32 buf_offset;
+ __le32 resp_hdr_offset;
+ __le32 id;
+ __le32 buf[0];
+};
+
+/**
+ * struct scm_legacy_response - one SCM response buffer
+ * @len: total available memory for response
+ * @buf_offset: start of response data relative to start of scm_legacy_response
+ * @is_complete: indicates if the command has finished processing
+ */
+struct scm_legacy_response {
+ __le32 len;
+ __le32 buf_offset;
+ __le32 is_complete;
+};
+
+/**
+ * scm_legacy_command_to_response() - Get a pointer to a scm_legacy_response
+ * @cmd: command
+ *
+ * Returns a pointer to a response for a command.
+ */
+static inline struct scm_legacy_response *scm_legacy_command_to_response(
+ const struct scm_legacy_command *cmd)
+{
+ return (void *)cmd + le32_to_cpu(cmd->resp_hdr_offset);
+}
+
+/**
+ * scm_legacy_get_command_buffer() - Get a pointer to a command buffer
+ * @cmd: command
+ *
+ * Returns a pointer to the command buffer of a command.
+ */
+static inline void *scm_legacy_get_command_buffer(
+ const struct scm_legacy_command *cmd)
+{
+ return (void *)cmd->buf;
+}
+
+/**
+ * scm_legacy_get_response_buffer() - Get a pointer to a response buffer
+ * @rsp: response
+ *
+ * Returns a pointer to a response buffer of a response.
+ */
+static inline void *scm_legacy_get_response_buffer(
+ const struct scm_legacy_response *rsp)
+{
+ return (void *)rsp + le32_to_cpu(rsp->buf_offset);
+}
+
+static void __scm_legacy_do(const struct arm_smccc_args *smc,
+ struct arm_smccc_res *res)
+{
+ do {
+ arm_smccc_smc(smc->args[0], smc->args[1], smc->args[2],
+ smc->args[3], smc->args[4], smc->args[5],
+ smc->args[6], smc->args[7], res);
+ } while (res->a0 == QCOM_SCM_INTERRUPTED);
+}
+
+/**
+ * qcom_scm_call() - Sends a command to the SCM and waits for the command to
+ * finish processing.
+ *
+ * A note on cache maintenance:
+ * Note that any buffers that are expected to be accessed by the secure world
+ * must be flushed before invoking qcom_scm_call and invalidated in the cache
+ * immediately after qcom_scm_call returns. Cache maintenance on the command
+ * and response buffers is taken care of by qcom_scm_call; however, callers are
+ * responsible for any other cached buffers passed over to the secure world.
+ */
+int scm_legacy_call(struct device *dev, const struct qcom_scm_desc *desc,
+ struct qcom_scm_res *res)
+{
+ u8 arglen = desc->arginfo & 0xf;
+ int ret = 0, context_id;
+ unsigned int i;
+ struct scm_legacy_command *cmd;
+ struct scm_legacy_response *rsp;
+ struct arm_smccc_args smc = {0};
+ struct arm_smccc_res smc_res;
+ const size_t cmd_len = arglen * sizeof(__le32);
+ const size_t resp_len = MAX_QCOM_SCM_RETS * sizeof(__le32);
+ size_t alloc_len = sizeof(*cmd) + cmd_len + sizeof(*rsp) + resp_len;
+ dma_addr_t cmd_phys;
+ __le32 *arg_buf;
+ const __le32 *res_buf;
+
+ cmd = kzalloc(PAGE_ALIGN(alloc_len), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ cmd->len = cpu_to_le32(alloc_len);
+ cmd->buf_offset = cpu_to_le32(sizeof(*cmd));
+ cmd->resp_hdr_offset = cpu_to_le32(sizeof(*cmd) + cmd_len);
+ cmd->id = cpu_to_le32(SCM_LEGACY_FNID(desc->svc, desc->cmd));
+
+ arg_buf = scm_legacy_get_command_buffer(cmd);
+ for (i = 0; i < arglen; i++)
+ arg_buf[i] = cpu_to_le32(desc->args[i]);
+
+ rsp = scm_legacy_command_to_response(cmd);
+
+ cmd_phys = dma_map_single(dev, cmd, alloc_len, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, cmd_phys)) {
+ kfree(cmd);
+ return -ENOMEM;
+ }
+
+ smc.args[0] = 1;
+ smc.args[1] = (unsigned long)&context_id;
+ smc.args[2] = cmd_phys;
+
+ mutex_lock(&qcom_scm_lock);
+ __scm_legacy_do(&smc, &smc_res);
+ if (smc_res.a0)
+ ret = qcom_scm_remap_error(smc_res.a0);
+ mutex_unlock(&qcom_scm_lock);
+ if (ret)
+ goto out;
+
+ do {
+ dma_sync_single_for_cpu(dev, cmd_phys + sizeof(*cmd) + cmd_len,
+ sizeof(*rsp), DMA_FROM_DEVICE);
+ } while (!rsp->is_complete);
+
+ dma_sync_single_for_cpu(dev, cmd_phys + sizeof(*cmd) + cmd_len +
+ le32_to_cpu(rsp->buf_offset),
+ resp_len, DMA_FROM_DEVICE);
+
+ if (res) {
+ res_buf = scm_legacy_get_response_buffer(rsp);
+ for (i = 0; i < MAX_QCOM_SCM_RETS; i++)
+ res->result[i] = le32_to_cpu(res_buf[i]);
+ }
+out:
+ dma_unmap_single(dev, cmd_phys, alloc_len, DMA_TO_DEVICE);
+ kfree(cmd);
+ return ret;
+}
+
+#define SCM_LEGACY_ATOMIC_N_REG_ARGS 5
+#define SCM_LEGACY_ATOMIC_FIRST_REG_IDX 2
+#define SCM_LEGACY_CLASS_REGISTER (0x2 << 8)
+#define SCM_LEGACY_MASK_IRQS BIT(5)
+#define SCM_LEGACY_ATOMIC_ID(svc, cmd, n) \
+ ((SCM_LEGACY_FNID(svc, cmd) << 12) | \
+ SCM_LEGACY_CLASS_REGISTER | \
+ SCM_LEGACY_MASK_IRQS | \
+ (n & 0xf))
+
+/**
+ * qcom_scm_call_atomic() - Send an atomic SCM command with up to 5 arguments
+ * and 3 return values
+ * @desc: SCM call descriptor containing arguments
+ * @res: SCM call return values
+ *
+ * This shall only be used with commands that are guaranteed to be
+ * uninterruptable, atomic and SMP safe.
+ */
+int scm_legacy_call_atomic(struct device *unused,
+ const struct qcom_scm_desc *desc,
+ struct qcom_scm_res *res)
+{
+ int context_id;
+ struct arm_smccc_res smc_res;
+ size_t arglen = desc->arginfo & 0xf;
+
+ BUG_ON(arglen > SCM_LEGACY_ATOMIC_N_REG_ARGS);
+
+ arm_smccc_smc(SCM_LEGACY_ATOMIC_ID(desc->svc, desc->cmd, arglen),
+ (unsigned long)&context_id,
+ desc->args[0], desc->args[1], desc->args[2],
+ desc->args[3], desc->args[4], 0, &smc_res);
+
+ if (res) {
+ res->result[0] = smc_res.a1;
+ res->result[1] = smc_res.a2;
+ res->result[2] = smc_res.a3;
+ }
+
+ return smc_res.a0;
+}
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2015,2019 The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/io.h>
+#include <linux/errno.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/qcom_scm.h>
+#include <linux/arm-smccc.h>
+#include <linux/dma-mapping.h>
+
+#include "qcom_scm.h"
+
+/**
+ * struct arm_smccc_args
+ * @args: The array of values used in registers in smc instruction
+ */
+struct arm_smccc_args {
+ unsigned long args[8];
+};
+
+static DEFINE_MUTEX(qcom_scm_lock);
+
+#define QCOM_SCM_EBUSY_WAIT_MS 30
+#define QCOM_SCM_EBUSY_MAX_RETRY 20
+
+#define SCM_SMC_N_REG_ARGS 4
+#define SCM_SMC_FIRST_EXT_IDX (SCM_SMC_N_REG_ARGS - 1)
+#define SCM_SMC_N_EXT_ARGS (MAX_QCOM_SCM_ARGS - SCM_SMC_N_REG_ARGS + 1)
+#define SCM_SMC_FIRST_REG_IDX 2
+#define SCM_SMC_LAST_REG_IDX (SCM_SMC_FIRST_REG_IDX + SCM_SMC_N_REG_ARGS - 1)
+
+static void __scm_smc_do_quirk(const struct arm_smccc_args *smc,
+ struct arm_smccc_res *res)
+{
+ unsigned long a0 = smc->args[0];
+ struct arm_smccc_quirk quirk = { .id = ARM_SMCCC_QUIRK_QCOM_A6 };
+
+ quirk.state.a6 = 0;
+
+ do {
+ arm_smccc_smc_quirk(a0, smc->args[1], smc->args[2],
+ smc->args[3], smc->args[4], smc->args[5],
+ quirk.state.a6, smc->args[7], res, &quirk);
+
+ if (res->a0 == QCOM_SCM_INTERRUPTED)
+ a0 = res->a0;
+
+ } while (res->a0 == QCOM_SCM_INTERRUPTED);
+}
+
+static void __scm_smc_do(const struct arm_smccc_args *smc,
+ struct arm_smccc_res *res, bool atomic)
+{
+ int retry_count = 0;
+
+ if (atomic) {
+ __scm_smc_do_quirk(smc, res);
+ return;
+ }
+
+ do {
+ mutex_lock(&qcom_scm_lock);
+
+ __scm_smc_do_quirk(smc, res);
+
+ mutex_unlock(&qcom_scm_lock);
+
+ if (res->a0 == QCOM_SCM_V2_EBUSY) {
+ if (retry_count++ > QCOM_SCM_EBUSY_MAX_RETRY)
+ break;
+ msleep(QCOM_SCM_EBUSY_WAIT_MS);
+ }
+ } while (res->a0 == QCOM_SCM_V2_EBUSY);
+}
+
+int scm_smc_call(struct device *dev, const struct qcom_scm_desc *desc,
+ struct qcom_scm_res *res, bool atomic)
+{
+ int arglen = desc->arginfo & 0xf;
+ int i;
+ dma_addr_t args_phys = 0;
+ void *args_virt = NULL;
+ size_t alloc_len;
+ gfp_t flag = atomic ? GFP_ATOMIC : GFP_KERNEL;
+ u32 smccc_call_type = atomic ? ARM_SMCCC_FAST_CALL : ARM_SMCCC_STD_CALL;
+ u32 qcom_smccc_convention =
+ (qcom_scm_convention == SMC_CONVENTION_ARM_32) ?
+ ARM_SMCCC_SMC_32 : ARM_SMCCC_SMC_64;
+ struct arm_smccc_res smc_res;
+ struct arm_smccc_args smc = {0};
+
+ smc.args[0] = ARM_SMCCC_CALL_VAL(
+ smccc_call_type,
+ qcom_smccc_convention,
+ desc->owner,
+ SCM_SMC_FNID(desc->svc, desc->cmd));
+ smc.args[1] = desc->arginfo;
+ for (i = 0; i < SCM_SMC_N_REG_ARGS; i++)
+ smc.args[i + SCM_SMC_FIRST_REG_IDX] = desc->args[i];
+
+ if (unlikely(arglen > SCM_SMC_N_REG_ARGS)) {
+ alloc_len = SCM_SMC_N_EXT_ARGS * sizeof(u64);
+ args_virt = kzalloc(PAGE_ALIGN(alloc_len), flag);
+
+ if (!args_virt)
+ return -ENOMEM;
+
+ if (qcom_smccc_convention == ARM_SMCCC_SMC_32) {
+ __le32 *args = args_virt;
+
+ for (i = 0; i < SCM_SMC_N_EXT_ARGS; i++)
+ args[i] = cpu_to_le32(desc->args[i +
+ SCM_SMC_FIRST_EXT_IDX]);
+ } else {
+ __le64 *args = args_virt;
+
+ for (i = 0; i < SCM_SMC_N_EXT_ARGS; i++)
+ args[i] = cpu_to_le64(desc->args[i +
+ SCM_SMC_FIRST_EXT_IDX]);
+ }
+
+ args_phys = dma_map_single(dev, args_virt, alloc_len,
+ DMA_TO_DEVICE);
+
+ if (dma_mapping_error(dev, args_phys)) {
+ kfree(args_virt);
+ return -ENOMEM;
+ }
+
+ smc.args[SCM_SMC_LAST_REG_IDX] = args_phys;
+ }
+
+ __scm_smc_do(&smc, &smc_res, atomic);
+
+ if (args_virt) {
+ dma_unmap_single(dev, args_phys, alloc_len, DMA_TO_DEVICE);
+ kfree(args_virt);
+ }
+
+ if (res) {
+ res->result[0] = smc_res.a1;
+ res->result[1] = smc_res.a2;
+ res->result[2] = smc_res.a3;
+ }
+
+ return (long)smc_res.a0 ? qcom_scm_remap_error(smc_res.a0) : 0;
+}
{ .flag = QCOM_SCM_FLAG_WARMBOOT_CPU3 },
};
+static const char *qcom_scm_convention_names[] = {
+ [SMC_CONVENTION_UNKNOWN] = "unknown",
+ [SMC_CONVENTION_ARM_32] = "smc arm 32",
+ [SMC_CONVENTION_ARM_64] = "smc arm 64",
+ [SMC_CONVENTION_LEGACY] = "smc legacy",
+};
+
static struct qcom_scm *__scm;
static int qcom_scm_clk_enable(void)
clk_disable_unprepare(__scm->bus_clk);
}
+static int __qcom_scm_is_call_available(struct device *dev, u32 svc_id,
+ u32 cmd_id);
+
+enum qcom_scm_convention qcom_scm_convention;
+static bool has_queried __read_mostly;
+static DEFINE_SPINLOCK(query_lock);
+
+static void __query_convention(void)
+{
+ unsigned long flags;
+ struct qcom_scm_desc desc = {
+ .svc = QCOM_SCM_SVC_INFO,
+ .cmd = QCOM_SCM_INFO_IS_CALL_AVAIL,
+ .args[0] = SCM_SMC_FNID(QCOM_SCM_SVC_INFO,
+ QCOM_SCM_INFO_IS_CALL_AVAIL) |
+ (ARM_SMCCC_OWNER_SIP << ARM_SMCCC_OWNER_SHIFT),
+ .arginfo = QCOM_SCM_ARGS(1),
+ .owner = ARM_SMCCC_OWNER_SIP,
+ };
+ struct qcom_scm_res res;
+ int ret;
+
+ spin_lock_irqsave(&query_lock, flags);
+ if (has_queried)
+ goto out;
+
+ qcom_scm_convention = SMC_CONVENTION_ARM_64;
+ // Device isn't required as there is only one argument - no device
+ // needed to dma_map_single to secure world
+ ret = scm_smc_call(NULL, &desc, &res, true);
+ if (!ret && res.result[0] == 1)
+ goto out;
+
+ qcom_scm_convention = SMC_CONVENTION_ARM_32;
+ ret = scm_smc_call(NULL, &desc, &res, true);
+ if (!ret && res.result[0] == 1)
+ goto out;
+
+ qcom_scm_convention = SMC_CONVENTION_LEGACY;
+out:
+ has_queried = true;
+ spin_unlock_irqrestore(&query_lock, flags);
+ pr_info("qcom_scm: convention: %s\n",
+ qcom_scm_convention_names[qcom_scm_convention]);
+}
+
+static inline enum qcom_scm_convention __get_convention(void)
+{
+ if (unlikely(!has_queried))
+ __query_convention();
+ return qcom_scm_convention;
+}
+
+/**
+ * qcom_scm_call() - Invoke a syscall in the secure world
+ * @dev: device
+ * @svc_id: service identifier
+ * @cmd_id: command identifier
+ * @desc: Descriptor structure containing arguments and return values
+ *
+ * Sends a command to the SCM and waits for the command to finish processing.
+ * This should *only* be called in pre-emptible context.
+ */
+static int qcom_scm_call(struct device *dev, const struct qcom_scm_desc *desc,
+ struct qcom_scm_res *res)
+{
+ might_sleep();
+ switch (__get_convention()) {
+ case SMC_CONVENTION_ARM_32:
+ case SMC_CONVENTION_ARM_64:
+ return scm_smc_call(dev, desc, res, false);
+ case SMC_CONVENTION_LEGACY:
+ return scm_legacy_call(dev, desc, res);
+ default:
+ pr_err("Unknown current SCM calling convention.\n");
+ return -EINVAL;
+ }
+}
+
+/**
+ * qcom_scm_call_atomic() - atomic variation of qcom_scm_call()
+ * @dev: device
+ * @svc_id: service identifier
+ * @cmd_id: command identifier
+ * @desc: Descriptor structure containing arguments and return values
+ * @res: Structure containing results from SMC/HVC call
+ *
+ * Sends a command to the SCM and waits for the command to finish processing.
+ * This can be called in atomic context.
+ */
+static int qcom_scm_call_atomic(struct device *dev,
+ const struct qcom_scm_desc *desc,
+ struct qcom_scm_res *res)
+{
+ switch (__get_convention()) {
+ case SMC_CONVENTION_ARM_32:
+ case SMC_CONVENTION_ARM_64:
+ return scm_smc_call(dev, desc, res, true);
+ case SMC_CONVENTION_LEGACY:
+ return scm_legacy_call_atomic(dev, desc, res);
+ default:
+ pr_err("Unknown current SCM calling convention.\n");
+ return -EINVAL;
+ }
+}
+
+static int __qcom_scm_is_call_available(struct device *dev, u32 svc_id,
+ u32 cmd_id)
+{
+ int ret;
+ struct qcom_scm_desc desc = {
+ .svc = QCOM_SCM_SVC_INFO,
+ .cmd = QCOM_SCM_INFO_IS_CALL_AVAIL,
+ .owner = ARM_SMCCC_OWNER_SIP,
+ };
+ struct qcom_scm_res res;
+
+ desc.arginfo = QCOM_SCM_ARGS(1);
+ switch (__get_convention()) {
+ case SMC_CONVENTION_ARM_32:
+ case SMC_CONVENTION_ARM_64:
+ desc.args[0] = SCM_SMC_FNID(svc_id, cmd_id) |
+ (ARM_SMCCC_OWNER_SIP << ARM_SMCCC_OWNER_SHIFT);
+ break;
+ case SMC_CONVENTION_LEGACY:
+ desc.args[0] = SCM_LEGACY_FNID(svc_id, cmd_id);
+ break;
+ default:
+ pr_err("Unknown SMC convention being used\n");
+ return -EINVAL;
+ }
+
+ ret = qcom_scm_call(dev, &desc, &res);
+
+ return ret ? : res.result[0];
+}
+
/**
* qcom_scm_set_warm_boot_addr() - Set the warm boot address for cpus
* @entry: Entry point function for the cpus
__scm = scm;
__scm->dev = &pdev->dev;
- __qcom_scm_init();
+ __query_convention();
/*
* If requested enable "download mode", from this point on warmboot
*/
#ifndef __QCOM_SCM_INT_H
#define __QCOM_SCM_INT_H
+
+enum qcom_scm_convention {
+ SMC_CONVENTION_UNKNOWN,
+ SMC_CONVENTION_LEGACY,
+ SMC_CONVENTION_ARM_32,
+ SMC_CONVENTION_ARM_64,
+};
+
+extern enum qcom_scm_convention qcom_scm_convention;
+
#define MAX_QCOM_SCM_ARGS 10
#define MAX_QCOM_SCM_RETS 3
u64 result[MAX_QCOM_SCM_RETS];
};
-extern int qcom_scm_call(struct device *dev, const struct qcom_scm_desc *desc,
- struct qcom_scm_res *res);
-extern int qcom_scm_call_atomic(struct device *dev,
- const struct qcom_scm_desc *desc,
- struct qcom_scm_res *res);
+#define SCM_SMC_FNID(s, c) ((((s) & 0xFF) << 8) | ((c) & 0xFF))
+extern int scm_smc_call(struct device *dev, const struct qcom_scm_desc *desc,
+ struct qcom_scm_res *res, bool atomic);
+
+#define SCM_LEGACY_FNID(s, c) (((s) << 10) | ((c) & 0x3ff))
+extern int scm_legacy_call_atomic(struct device *dev,
+ const struct qcom_scm_desc *desc,
+ struct qcom_scm_res *res);
+extern int scm_legacy_call(struct device *dev, const struct qcom_scm_desc *desc,
+ struct qcom_scm_res *res);
#define QCOM_SCM_SVC_BOOT 0x01
#define QCOM_SCM_BOOT_SET_ADDR 0x01
#define QCOM_SCM_SVC_INFO 0x06
#define QCOM_SCM_INFO_IS_CALL_AVAIL 0x01
-extern int __qcom_scm_is_call_available(struct device *dev, u32 svc_id,
- u32 cmd_id);
#define QCOM_SCM_SVC_MP 0x0c
#define QCOM_SCM_MP_RESTORE_SEC_CFG 0x02