[PATCH v3 5/6] remoteproc: mediatek: Add VCP ipi communication sync mechanism
Xiangzhi Tang
xiangzhi.tang at mediatek.com
Thu Mar 19 20:18:07 PDT 2026
1.Add vcp ready ipi register driver.
2.Add vcp ready notify work mechanism.
3.Add vcp feature resgiter mechanism.
Signed-off-by: Xiangzhi Tang <xiangzhi.tang at mediatek.com>
---
drivers/remoteproc/mtk_vcp_common.c | 279 ++++++++++++++++++++++
drivers/remoteproc/mtk_vcp_common.h | 56 +++++
drivers/remoteproc/mtk_vcp_rproc.c | 62 ++++-
drivers/remoteproc/mtk_vcp_rproc.h | 18 ++
include/linux/remoteproc/mtk_vcp_public.h | 12 +
5 files changed, 425 insertions(+), 2 deletions(-)
diff --git a/drivers/remoteproc/mtk_vcp_common.c b/drivers/remoteproc/mtk_vcp_common.c
index 97ea8099912d..f3b506034e95 100644
--- a/drivers/remoteproc/mtk_vcp_common.c
+++ b/drivers/remoteproc/mtk_vcp_common.c
@@ -24,6 +24,9 @@
#include "mtk_vcp_common.h"
#include "mtk_vcp_rproc.h"
+static BLOCKING_NOTIFIER_HEAD(mmup_notifier_list);
+static BLOCKING_NOTIFIER_HEAD(vcp_notifier_list);
+
phys_addr_t vcp_get_reserve_mem_phys(struct mtk_vcp_device *vcp,
enum vcp_reserve_mem_id id)
{
@@ -153,6 +156,40 @@ int vcp_reserve_memory_init(struct mtk_vcp_device *vcp)
return 0;
}
+static bool vcp_is_core_ready(struct mtk_vcp_device *vcp,
+ enum vcp_core_id core_id)
+{
+ switch (core_id) {
+ case VCP_ID:
+ return vcp->vcp_cluster->vcp_ready[VCP_ID];
+ case MMUP_ID:
+ return vcp->vcp_cluster->vcp_ready[MMUP_ID];
+ case VCP_CORE_TOTAL:
+ default:
+ return vcp->vcp_cluster->vcp_ready[VCP_ID] &
+ vcp->vcp_cluster->vcp_ready[MMUP_ID];
+ }
+}
+
+static enum vcp_core_id get_core_by_feature(struct mtk_vcp_device *vcp,
+ enum vcp_feature_id id)
+{
+ for (u32 i = 0; i < NUM_FEATURE_ID; i++) {
+ if (vcp->platdata->feature_tb[i].feature_id == id)
+ return vcp->platdata->feature_tb[i].core_id;
+ }
+
+ return 0;
+}
+
+bool is_vcp_ready(struct mtk_vcp_device *vcp,
+ enum vcp_feature_id id)
+{
+ enum vcp_core_id core_id = get_core_by_feature(vcp, id);
+
+ return vcp_is_core_ready(vcp, core_id);
+}
+
int wait_core_hart_shutdown(struct mtk_vcp_device *vcp,
enum vcp_core_id core_id)
{
@@ -215,9 +252,120 @@ int wait_core_hart_shutdown(struct mtk_vcp_device *vcp,
return ret;
}
+void vcp_A_register_notify(struct mtk_vcp_device *vcp,
+ enum vcp_feature_id id,
+ struct notifier_block *nb)
+{
+ enum vcp_core_id core_id = get_core_by_feature(vcp, id);
+
+ switch (core_id) {
+ case VCP_ID:
+ blocking_notifier_chain_register(&vcp_notifier_list, nb);
+ if (vcp_is_core_ready(vcp, VCP_ID))
+ nb->notifier_call(nb, VCP_EVENT_READY, NULL);
+ break;
+ case MMUP_ID:
+ blocking_notifier_chain_register(&mmup_notifier_list, nb);
+ if (vcp_is_core_ready(vcp, MMUP_ID))
+ nb->notifier_call(nb, VCP_EVENT_READY, NULL);
+ break;
+ default:
+ dev_err(vcp->dev, "%s(), No Support core id\n", __func__);
+ break;
+ }
+}
+
+void vcp_A_unregister_notify(struct mtk_vcp_device *vcp,
+ enum vcp_feature_id id,
+ struct notifier_block *nb)
+{
+ enum vcp_core_id core_id = get_core_by_feature(vcp, id);
+
+ switch (core_id) {
+ case VCP_ID:
+ blocking_notifier_chain_unregister(&vcp_notifier_list, nb);
+ break;
+ case MMUP_ID:
+ blocking_notifier_chain_unregister(&mmup_notifier_list, nb);
+ break;
+ default:
+ dev_err(vcp->dev, "%s(), No Support core id\n", __func__);
+ break;
+ }
+}
+
+void vcp_extern_notify(enum vcp_core_id core_id,
+ enum vcp_notify_event notify_status)
+{
+ switch (core_id) {
+ case VCP_ID:
+ blocking_notifier_call_chain(&vcp_notifier_list, notify_status, NULL);
+ break;
+ case MMUP_ID:
+ blocking_notifier_call_chain(&mmup_notifier_list, notify_status, NULL);
+ break;
+ default:
+ break;
+ }
+}
+
+static void vcp_A_notify_ws(struct work_struct *ws)
+{
+ struct vcp_work_struct *sws =
+ container_of(ws, struct vcp_work_struct, work);
+ struct mtk_vcp_device *vcp = platform_get_drvdata(to_platform_device(sws->dev));
+ enum vcp_core_id core_id = sws->flags;
+
+ if (core_id < VCP_CORE_TOTAL) {
+ mutex_lock(&vcp->vcp_cluster->vcp_ready_mutex);
+ vcp->vcp_cluster->vcp_ready[core_id] = true;
+ mutex_unlock(&vcp->vcp_cluster->vcp_ready_mutex);
+
+ vcp_extern_notify(core_id, VCP_EVENT_READY);
+
+ /*clear reset status and unlock wake lock*/
+ dev_info(sws->dev, "%s core id %u ready\n", __func__, core_id);
+ } else {
+ dev_err(sws->dev, "%s wrong core id %u\n", __func__, core_id);
+ }
+}
+
+static void vcp_A_set_ready(struct mtk_vcp_device *vcp,
+ enum vcp_core_id core_id)
+{
+ if (core_id < VCP_CORE_TOTAL) {
+ vcp->vcp_cluster->vcp_ready_notify_wk[core_id].flags = core_id;
+ queue_work(vcp->vcp_cluster->vcp_workqueue,
+ &vcp->vcp_cluster->vcp_ready_notify_wk[core_id].work);
+ }
+}
+
+int vcp_A_ready_ipi_handler(u32 id, void *prdata, void *data, u32 len)
+{
+ struct mtk_vcp_device *vcp = (struct mtk_vcp_device *)prdata;
+
+ switch (id) {
+ case IPI_IN_VCP_READY_0:
+ if (!vcp_is_core_ready(vcp, VCP_ID))
+ vcp_A_set_ready(vcp, VCP_ID);
+ break;
+ case IPI_IN_VCP_READY_1:
+ if (!vcp_is_core_ready(vcp, MMUP_ID))
+ vcp_A_set_ready(vcp, MMUP_ID);
+ break;
+ default:
+ dev_err(vcp->dev, "%s(), No Support ipi id\n", __func__);
+ break;
+ }
+
+ return 0;
+}
+
int reset_vcp(struct mtk_vcp_device *vcp)
{
struct arm_smccc_res res;
+ bool mmup_status, vcp_status;
+ int ret;
if (vcp->vcp_cluster->core_nums > MMUP_ID) {
writel((u32)VCP_PACK_IOVA(vcp->vcp_cluster->share_mem_iova),
@@ -228,6 +376,16 @@ int reset_vcp(struct mtk_vcp_device *vcp)
arm_smccc_smc(MTK_SIP_TINYSYS_VCP_CONTROL,
MTK_TINYSYS_MMUP_KERNEL_OP_RESET_RELEASE,
1, 0, 0, 0, 0, 0, &res);
+
+ ret = read_poll_timeout(vcp_is_core_ready,
+ mmup_status, mmup_status,
+ USEC_PER_MSEC,
+ VCP_READY_TIMEOUT_MS * USEC_PER_MSEC,
+ false, vcp, MMUP_ID);
+ if (ret) {
+ dev_err(vcp->dev, "MMUP_ID bootup timeout. Stop vcp booting\n");
+ return ret;
+ }
}
writel((u32)VCP_PACK_IOVA(vcp->vcp_cluster->share_mem_iova),
@@ -239,6 +397,127 @@ int reset_vcp(struct mtk_vcp_device *vcp)
MTK_TINYSYS_VCP_KERNEL_OP_RESET_RELEASE,
1, 0, 0, 0, 0, 0, &res);
+ ret = read_poll_timeout(vcp_is_core_ready,
+ vcp_status, vcp_status,
+ USEC_PER_MSEC,
+ VCP_READY_TIMEOUT_MS * USEC_PER_MSEC,
+ false, vcp, VCP_ID);
+ if (ret) {
+ dev_err(vcp->dev, "VCP_ID bootup timeout. Stop vcp booting\n");
+ return ret;
+ }
+
+ return ret;
+}
+
+static int vcp_enable_pm_clk(struct mtk_vcp_device *vcp, enum vcp_feature_id id)
+{
+ struct vcp_slp_ctrl slp_data;
+ bool suspend_status;
+ int ret;
+
+ if (vcp->vcp_cluster->feature_enable[id]) {
+ dev_err(vcp->dev, "%s feature(id=%d) already enabled\n",
+ __func__, id);
+ return -EINVAL;
+ }
+
+ if (id != RTOS_FEATURE_ID) {
+ slp_data.cmd = SLP_WAKE_LOCK;
+ slp_data.feature = id;
+ ret = vcp->ipi_ops->ipi_send_compl(vcp->ipi_dev, IPI_OUT_C_SLEEP_0,
+ &slp_data, PIN_OUT_C_SIZE_SLEEP_0, 500);
+ if (ret < 0) {
+ dev_err(vcp->dev, "%s ipc_send_compl failed. ret %d\n",
+ __func__, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int vcp_disable_pm_clk(struct mtk_vcp_device *vcp, enum vcp_feature_id id)
+{
+ struct vcp_slp_ctrl slp_data;
+ bool suspend_status;
+ int ret;
+
+ if (!vcp->vcp_cluster->feature_enable[id]) {
+ dev_err(vcp->dev, "%s feature(id=%d) already disabled\n",
+ __func__, id);
+ return -EINVAL;
+ }
+
+ if (id != RTOS_FEATURE_ID) {
+ slp_data.cmd = SLP_WAKE_UNLOCK;
+ slp_data.feature = id;
+ ret = vcp->ipi_ops->ipi_send_compl(vcp->ipi_dev, IPI_OUT_C_SLEEP_0,
+ &slp_data, PIN_OUT_C_SIZE_SLEEP_0, 500);
+ if (ret < 0) {
+ dev_err(vcp->dev, "%s ipc_send_compl failed. ret %d\n",
+ __func__, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+int vcp_A_register_feature(struct mtk_vcp_device *vcp, enum vcp_feature_id id)
+{
+ int ret;
+
+ if (id >= NUM_FEATURE_ID) {
+ dev_err(vcp->dev, "%s unsupported feature id %d\n",
+ __func__, id);
+ return -EINVAL;
+ }
+
+ mutex_lock(&vcp->vcp_cluster->vcp_feature_mutex);
+ ret = vcp_enable_pm_clk(vcp, id);
+ if (ret)
+ dev_err(vcp->dev, "%s feature(id=%d) register failed\n",
+ __func__, id);
+ else
+ vcp->vcp_cluster->feature_enable[id] = true;
+ mutex_unlock(&vcp->vcp_cluster->vcp_feature_mutex);
+
+ return ret;
+}
+
+int vcp_A_deregister_feature(struct mtk_vcp_device *vcp, enum vcp_feature_id id)
+{
+ int ret;
+
+ if (id >= NUM_FEATURE_ID) {
+ dev_err(vcp->dev, "%s unsupported feature id %d\n", __func__, id);
+ return -EINVAL;
+ }
+
+ mutex_lock(&vcp->vcp_cluster->vcp_feature_mutex);
+ ret = vcp_disable_pm_clk(vcp, id);
+ if (ret)
+ dev_err(vcp->dev, "%s feature(id=%d) deregister failed\n",
+ __func__, id);
+ else
+ vcp->vcp_cluster->feature_enable[id] = false;
+ mutex_unlock(&vcp->vcp_cluster->vcp_feature_mutex);
+
+ return ret;
+}
+
+int vcp_notify_work_init(struct mtk_vcp_device *vcp)
+{
+ vcp->vcp_cluster->vcp_workqueue = create_singlethread_workqueue("VCP_WQ");
+ if (!vcp->vcp_cluster->vcp_workqueue)
+ return dev_err_probe(vcp->dev, -EINVAL, "vcp_workqueue create fail\n");
+
+ for (u32 core_id = 0; core_id < VCP_CORE_TOTAL; core_id++) {
+ vcp->vcp_cluster->vcp_ready_notify_wk[core_id].dev = vcp->dev;
+ INIT_WORK(&vcp->vcp_cluster->vcp_ready_notify_wk[core_id].work, vcp_A_notify_ws);
+ }
+
return 0;
}
diff --git a/drivers/remoteproc/mtk_vcp_common.h b/drivers/remoteproc/mtk_vcp_common.h
index d048757c955a..8b19fcb78a79 100644
--- a/drivers/remoteproc/mtk_vcp_common.h
+++ b/drivers/remoteproc/mtk_vcp_common.h
@@ -13,10 +13,13 @@
#include <linux/remoteproc/mtk_vcp_public.h>
/* VCP timeout definition */
+#define VCP_READY_TIMEOUT_MS 3000
+#define VCP_IPI_DEV_READY_TIMEOUT 1000
#define CORE_HART_SHUTDOWN_TIMEOUT_MS 10
/* VCP platform definition */
#define DMA_MAX_MASK_BIT 33
+#define PIN_OUT_C_SIZE_SLEEP_0 2
/* VCP load image definition */
#define VCM_IMAGE_MAGIC (0x58881688)
@@ -90,6 +93,14 @@ enum vcp_core_id {
VCP_CORE_TOTAL,
};
+enum vcp_slp_cmd {
+ SLP_WAKE_LOCK = 0,
+ SLP_WAKE_UNLOCK,
+ SLP_STATUS_DBG,
+ SLP_SUSPEND,
+ SLP_RESUME,
+};
+
enum mtk_tinysys_vcp_kernel_op {
MTK_TINYSYS_VCP_KERNEL_OP_RESET_SET = 0,
MTK_TINYSYS_VCP_KERNEL_OP_RESET_RELEASE,
@@ -154,6 +165,32 @@ struct vcp_reserve_mblock {
size_t size;
};
+/**
+ * struct vcp_slp_ctrl - sleep ctrl data sync with AP and VCP
+ *
+ * @feature: Feature id
+ * @cmd: sleep cmd flag.
+ */
+struct vcp_slp_ctrl {
+ u32 feature;
+ u32 cmd;
+};
+
+/**
+ * struct vcp_work_struct - vcp notify work structure.
+ *
+ * @work: struct work_struct member
+ * @dev: struct device member
+ * @u32 flags: vcp notify work flag
+ * @id: vcp core id
+ */
+struct vcp_work_struct {
+ struct work_struct work;
+ struct device *dev;
+ u32 flags;
+ u32 id;
+};
+
/**
* struct vcp_region_info_st - config vcp image info sync to vcp bootloader.
*
@@ -201,6 +238,20 @@ struct vcp_region_info_st {
u32 coredump_dram_offset;
};
+int vcp_A_ready_ipi_handler(u32 id, void *prdata,
+ void *data, u32 len);
+bool is_vcp_ready(struct mtk_vcp_device *vcp,
+ enum vcp_feature_id id);
+int vcp_notify_work_init(struct mtk_vcp_device *vcp);
+void vcp_extern_notify(enum vcp_core_id core_id,
+ enum vcp_notify_event notify_status);
+void vcp_A_register_notify(struct mtk_vcp_device *vcp,
+ enum vcp_feature_id id,
+ struct notifier_block *nb);
+void vcp_A_unregister_notify(struct mtk_vcp_device *vcp,
+ enum vcp_feature_id id,
+ struct notifier_block *nb);
+
int vcp_reserve_memory_init(struct mtk_vcp_device *vcp);
phys_addr_t vcp_get_reserve_mem_phys(struct mtk_vcp_device *vcp, enum vcp_reserve_mem_id id);
dma_addr_t vcp_get_reserve_mem_iova(struct mtk_vcp_device *vcp, enum vcp_reserve_mem_id id);
@@ -213,5 +264,10 @@ int mtk_vcp_load(struct rproc *rproc, const struct firmware *fw);
int vcp_wdt_irq_init(struct mtk_vcp_device *vcp);
+int vcp_A_register_feature(struct mtk_vcp_device *vcp,
+ enum vcp_feature_id id);
+int vcp_A_deregister_feature(struct mtk_vcp_device *vcp,
+ enum vcp_feature_id id);
+
int wait_core_hart_shutdown(struct mtk_vcp_device *vcp, enum vcp_core_id core_id);
#endif
diff --git a/drivers/remoteproc/mtk_vcp_rproc.c b/drivers/remoteproc/mtk_vcp_rproc.c
index 6e0fecef72ce..833a0dc69d9c 100644
--- a/drivers/remoteproc/mtk_vcp_rproc.c
+++ b/drivers/remoteproc/mtk_vcp_rproc.c
@@ -71,6 +71,30 @@ static int mtk_vcp_start(struct rproc *rproc)
{
struct mtk_vcp_device *vcp = (struct mtk_vcp_device *)rproc->priv;
struct arm_smccc_res res;
+ int ret;
+
+ ret = vcp->ipi_ops->ipi_register(vcp->ipi_dev, IPI_OUT_C_SLEEP_0,
+ NULL, NULL, &vcp->vcp_cluster->slp_ipi_ack_data);
+ if (ret) {
+ dev_err(vcp->dev, "Failed to register IPI_OUT_C_SLEEP_0\n");
+ goto slp_ipi_unregister;
+ }
+
+ ret = vcp->ipi_ops->ipi_register(vcp->ipi_dev, IPI_IN_VCP_READY_0,
+ (void *)vcp_A_ready_ipi_handler,
+ vcp, &vcp->vcp_cluster->msg_vcp_ready0);
+ if (ret) {
+ dev_err(vcp->dev, "Failed to register IPI_IN_VCP_READY_0\n");
+ goto vcp0_ready_ipi_unregister;
+ }
+
+ ret = vcp->ipi_ops->ipi_register(vcp->ipi_dev, IPI_IN_VCP_READY_1,
+ (void *)vcp_A_ready_ipi_handler,
+ vcp, &vcp->vcp_cluster->msg_vcp_ready1);
+ if (ret) {
+ dev_err(vcp->dev, "Failed to register IPI_IN_VCP_READY_1\n");
+ goto vcp1_ready_ipi_unregister;
+ }
/* core 0 */
arm_smccc_smc(MTK_SIP_TINYSYS_VCP_CONTROL,
@@ -83,10 +107,22 @@ static int mtk_vcp_start(struct rproc *rproc)
1, 0, 0, 0, 0, 0, &res);
ret = reset_vcp(vcp);
- if (ret)
+ if (ret) {
dev_err(vcp->dev, "bootup fail\n");
- else
+ } else {
dev_info(vcp->dev, "bootup successfully\n");
+ if (vcp_A_register_feature(vcp, RTOS_FEATURE_ID) < 0)
+ vcp_A_deregister_feature(vcp, RTOS_FEATURE_ID);
+ }
+
+ return ret;
+
+vcp1_ready_ipi_unregister:
+ vcp->ipi_ops->ipi_unregister(vcp->ipi_dev, IPI_IN_VCP_READY_1);
+vcp0_ready_ipi_unregister:
+ vcp->ipi_ops->ipi_unregister(vcp->ipi_dev, IPI_IN_VCP_READY_0);
+slp_ipi_unregister:
+ vcp->ipi_ops->ipi_unregister(vcp->ipi_dev, IPI_OUT_C_SLEEP_0);
return ret;
}
@@ -97,6 +133,9 @@ static int mtk_vcp_stop(struct rproc *rproc)
vcp_A_deregister_feature(vcp, RTOS_FEATURE_ID);
+ vcp_extern_notify(VCP_ID, VCP_EVENT_STOP);
+ vcp_extern_notify(MMUP_ID, VCP_EVENT_STOP);
+
return 0;
}
@@ -185,6 +224,8 @@ static struct mtk_vcp_device *vcp_rproc_init(struct platform_device *pdev,
rproc->auto_boot = vcp_of_data->platdata.auto_boot;
rproc->sysfs_read_only = vcp_of_data->platdata.sysfs_read_only;
+ mutex_init(&vcp->vcp_cluster->vcp_feature_mutex);
+ mutex_init(&vcp->vcp_cluster->vcp_ready_mutex);
platform_set_drvdata(pdev, vcp);
ret = vcp_reserve_memory_init(vcp);
@@ -213,6 +254,10 @@ static struct mtk_vcp_device *vcp_rproc_init(struct platform_device *pdev,
if (ret)
return ERR_PTR(dev_err_probe(dev, ret, "vcp_ipi_mbox_init failed\n"));
+ ret = vcp_notify_work_init(vcp);
+ if (ret)
+ return ERR_PTR(dev_err_probe(dev, ret, "vcp_notify_work_init failed\n"));
+
pm_runtime_get_sync(dev);
return vcp;
@@ -287,6 +332,8 @@ static void vcp_device_remove(struct platform_device *pdev)
{
struct mtk_vcp_device *vcp = platform_get_drvdata(pdev);
+ flush_workqueue(vcp->vcp_cluster->vcp_workqueue);
+ destroy_workqueue(vcp->vcp_cluster->vcp_workqueue);
pm_runtime_disable(&pdev->dev);
rproc_del(vcp->rproc);
@@ -297,6 +344,12 @@ static void vcp_device_shutdown(struct platform_device *pdev)
struct mtk_vcp_device *vcp = platform_get_drvdata(pdev);
int ret;
+ vcp->vcp_cluster->vcp_ready[VCP_ID] = false;
+ vcp->vcp_cluster->vcp_ready[MMUP_ID] = false;
+
+ vcp_extern_notify(VCP_ID, VCP_EVENT_STOP);
+ vcp_extern_notify(MMUP_ID, VCP_EVENT_STOP);
+
writel(GIPC_VCP_HART0_SHUT, vcp->vcp_cluster->cfg_core + R_GIPC_IN_SET);
ret = wait_core_hart_shutdown(vcp, VCP_ID);
if (ret)
@@ -382,6 +435,11 @@ static struct mtk_vcp_ipi_ops mt8196_vcp_ipi_ops = {
static const struct mtk_vcp_of_data mt8196_of_data = {
.ops = {
+ .vcp_is_ready = is_vcp_ready,
+ .vcp_register_feature = vcp_A_register_feature,
+ .vcp_deregister_feature = vcp_A_deregister_feature,
+ .vcp_register_notify = vcp_A_register_notify,
+ .vcp_unregister_notify = vcp_A_unregister_notify,
.vcp_get_mem_phys = vcp_get_reserve_mem_phys,
.vcp_get_mem_iova = vcp_get_reserve_mem_iova,
.vcp_get_mem_virt = vcp_get_reserve_mem_virt,
diff --git a/drivers/remoteproc/mtk_vcp_rproc.h b/drivers/remoteproc/mtk_vcp_rproc.h
index ff3e67fc2611..600715b77124 100644
--- a/drivers/remoteproc/mtk_vcp_rproc.h
+++ b/drivers/remoteproc/mtk_vcp_rproc.h
@@ -19,10 +19,19 @@
* @core_nums: total core numbers get from dtb
* @twohart: core weo hart support flag
* @sram_offset: core sram memory layout
+ * @msg_vcp_ready0: core0 ready ipi msg data
+ * @msg_vcp_ready1: core1 ready ipi msg data
+ * @slp_ipi_ack_data: sleep ipi msg data
+ * @feature_enable: feature status count data
+ * @vcp_ready: vcp core status flag
* @share_mem_iova: shared memory iova base
* @share_mem_size: shared memory size
+ * @vcp_feature_mutex: vcp feature register mutex structure
+ * @vcp_ready_mutex: vcp core ready mutex structure
* @vcp_ipidev: struct mtk_ipi_device
+ * @vcp_workqueue: ready workqueue_struct
* @vcp_memory_tb: vcp memory allocated table
+ * @vcp_ready_notify_wk: vcp_work_struct structure
*/
struct mtk_vcp_of_cluster {
void __iomem *sram_base;
@@ -33,10 +42,19 @@ struct mtk_vcp_of_cluster {
u32 core_nums;
u32 twohart[VCP_CORE_TOTAL];
u32 sram_offset[VCP_CORE_TOTAL];
+ u32 msg_vcp_ready0;
+ u32 msg_vcp_ready1;
+ u32 slp_ipi_ack_data;
+ bool feature_enable[NUM_FEATURE_ID];
+ bool vcp_ready[VCP_CORE_TOTAL];
dma_addr_t share_mem_iova;
size_t share_mem_size;
+ struct mutex vcp_feature_mutex;
+ struct mutex vcp_ready_mutex;
struct mtk_ipi_device vcp_ipidev;
+ struct workqueue_struct *vcp_workqueue;
struct vcp_reserve_mblock vcp_memory_tb[NUMS_MEM_ID];
+ struct vcp_work_struct vcp_ready_notify_wk[VCP_CORE_TOTAL];
};
/**
diff --git a/include/linux/remoteproc/mtk_vcp_public.h b/include/linux/remoteproc/mtk_vcp_public.h
index 58ba4b8bb023..b9e1d86685fd 100644
--- a/include/linux/remoteproc/mtk_vcp_public.h
+++ b/include/linux/remoteproc/mtk_vcp_public.h
@@ -100,6 +100,18 @@ struct mtk_vcp_ipi_ops {
};
struct mtk_vcp_ops {
+ bool (*vcp_is_ready)(struct mtk_vcp_device *vcp,
+ enum vcp_feature_id id);
+ int (*vcp_register_feature)(struct mtk_vcp_device *vcp,
+ enum vcp_feature_id id);
+ int (*vcp_deregister_feature)(struct mtk_vcp_device *vcp,
+ enum vcp_feature_id id);
+ void (*vcp_register_notify)(struct mtk_vcp_device *vcp,
+ enum vcp_feature_id id,
+ struct notifier_block *nb);
+ void (*vcp_unregister_notify)(struct mtk_vcp_device *vcp,
+ enum vcp_feature_id id,
+ struct notifier_block *nb);
phys_addr_t (*vcp_get_mem_phys)(struct mtk_vcp_device *vcp,
enum vcp_reserve_mem_id id);
dma_addr_t (*vcp_get_mem_iova)(struct mtk_vcp_device *vcp,
--
2.46.0
More information about the Linux-mediatek
mailing list