From 0cd2e00912c715601850ae9bef6fc735199b41c4 Mon Sep 17 00:00:00 2001 From: Guennadi Liakhovetski Date: Wed, 24 Dec 2025 14:42:56 +0100 Subject: [PATCH 1/4] dp: application: log when starting the thread Add a single information level log entry when the userspace DP thread starts. Signed-off-by: Guennadi Liakhovetski --- src/schedule/zephyr_dp_schedule_application.c | 1 + 1 file changed, 1 insertion(+) diff --git a/src/schedule/zephyr_dp_schedule_application.c b/src/schedule/zephyr_dp_schedule_application.c index 4614da3c1fd7..a7c3458034ad 100644 --- a/src/schedule/zephyr_dp_schedule_application.c +++ b/src/schedule/zephyr_dp_schedule_application.c @@ -283,6 +283,7 @@ void dp_thread_fn(void *p1, void *p2, void *p3) /* The IPC thread is waiting for the thread to be started, it can proceed now. */ k_sem_give(&dp_sync[task->core]); + comp_info(pmod->dev, "userspace thread started"); do { /* From e787a274b441bccad181eee068b813fcc22de25a Mon Sep 17 00:00:00 2001 From: Guennadi Liakhovetski Date: Wed, 24 Dec 2025 14:45:32 +0100 Subject: [PATCH 2/4] dp: application: remove unneeded operations copy The module operation structure is located in module's memory, so it is accessible to the thread, no need to copy it. Signed-off-by: Guennadi Liakhovetski --- src/schedule/zephyr_dp_schedule_application.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/src/schedule/zephyr_dp_schedule_application.c b/src/schedule/zephyr_dp_schedule_application.c index a7c3458034ad..718b05281b59 100644 --- a/src/schedule/zephyr_dp_schedule_application.c +++ b/src/schedule/zephyr_dp_schedule_application.c @@ -416,12 +416,11 @@ int scheduler_dp_task_init(struct task **task, const struct sof_uuid_entry *uid, struct task task; struct task_dp_pdata pdata; struct comp_driver drv; - struct module_interface ops; } *task_memory; int ret; - /* must be called on the same core the task will be binded to */ + /* must be called on the same core the task will be bound to */ assert(cpu_get_id() == core); /* @@ -441,8 +440,6 @@ int scheduler_dp_task_init(struct task **task, const struct sof_uuid_entry *uid, memset(task_memory, 0, sizeof(*task_memory)); task_memory->drv = *mod->dev->drv; - task_memory->ops = *mod->dev->drv->adapter_ops; // FIXME: is this needed? - task_memory->drv.adapter_ops = &task_memory->ops; mod->dev->drv = &task_memory->drv; /* allocate stack - must be aligned and cached so a separate alloc */ From 65a928bdf22870165aeda4b593ca26bda4eaf6ee Mon Sep 17 00:00:00 2001 From: Guennadi Liakhovetski Date: Wed, 24 Dec 2025 15:18:47 +0100 Subject: [PATCH 3/4] dp:application: embed IPC flattening buffer in module data On the one hand IPCs are serialized, so a single IPC buffer for all DP threads would be enough. But it has to be a page large to be added to every DP thread memory domain. On the other hand we can allocate such an IPC flattening buffer for each DP thread. Then it doesn't need to be mapped separately, doesn't need an own memory partition in thread's memory domain. A page is 4KiB, the buffer is probably less than 100 bytes large. So as long as we don't have more than 40 DP threads we're better off using per-thread buffers, and we aren't likely to ever get that many DP threads. Signed-off-by: Guennadi Liakhovetski --- src/schedule/zephyr_dp_schedule.h | 3 +- src/schedule/zephyr_dp_schedule_application.c | 31 ++++++------------- 2 files changed, 12 insertions(+), 22 deletions(-) diff --git a/src/schedule/zephyr_dp_schedule.h b/src/schedule/zephyr_dp_schedule.h index 9a5d30077e15..cc2093e5bfdb 100644 --- a/src/schedule/zephyr_dp_schedule.h +++ b/src/schedule/zephyr_dp_schedule.h @@ -25,11 +25,11 @@ struct scheduler_dp_data { enum sof_dp_part_type { SOF_DP_PART_HEAP, - SOF_DP_PART_IPC, SOF_DP_PART_CFG, SOF_DP_PART_TYPE_COUNT, }; +struct ipc4_flat; struct task_dp_pdata { k_tid_t thread_id; /* zephyr thread ID */ struct k_thread *thread; /* pointer to the kernels' thread object */ @@ -44,6 +44,7 @@ struct task_dp_pdata { #else struct k_sem *sem; /* pointer to semaphore for task scheduling */ struct k_sem sem_struct; /* semaphore for task scheduling for kernel threads */ + struct ipc4_flat *flat; unsigned char pend_ipc; unsigned char pend_proc; struct k_mem_partition mpart[SOF_DP_PART_TYPE_COUNT]; diff --git a/src/schedule/zephyr_dp_schedule_application.c b/src/schedule/zephyr_dp_schedule_application.c index 718b05281b59..3349ab128b29 100644 --- a/src/schedule/zephyr_dp_schedule_application.c +++ b/src/schedule/zephyr_dp_schedule_application.c @@ -36,9 +36,6 @@ static struct k_mem_domain dp_mdom[CONFIG_CORE_COUNT]; #define DP_SYNC_INIT_LIST LISTIFY(CONFIG_CORE_COUNT, DP_SYNC_INIT, (,)) static STRUCT_SECTION_ITERABLE_ARRAY(k_sem, dp_sync, CONFIG_CORE_COUNT) = { DP_SYNC_INIT_LIST }; -/* TODO: make this a shared kernel->module buffer for IPC parameters */ -static uint8_t ipc_buf[4096] __aligned(4096); - struct ipc4_flat { unsigned int cmd; int ret; @@ -52,7 +49,7 @@ struct ipc4_flat { enum ipc4_pipeline_state state; int n_sources; int n_sinks; - void *source_sink[]; + void *source_sink[2 * CONFIG_MODULE_MAX_CONNECTIONS]; } pipeline_state; }; }; @@ -79,15 +76,14 @@ static int ipc_thread_flatten(unsigned int cmd, const union scheduler_dp_thread_ case COMP_TRIGGER_STOP: break; case COMP_TRIGGER_PREPARE: - if (sizeof(flat->cmd) + sizeof(flat->ret) + sizeof(flat->pipeline_state) + - sizeof(void *) * (param->pipeline_state.n_sources + - param->pipeline_state.n_sinks) > - sizeof(ipc_buf)) + if (param->pipeline_state.n_sources > CONFIG_MODULE_MAX_CONNECTIONS || + param->pipeline_state.n_sinks > CONFIG_MODULE_MAX_CONNECTIONS) return -ENOMEM; flat->pipeline_state.state = param->pipeline_state.state; flat->pipeline_state.n_sources = param->pipeline_state.n_sources; flat->pipeline_state.n_sinks = param->pipeline_state.n_sinks; + /* Up to 2 * CONFIG_MODULE_MAX_CONNECTIONS */ memcpy(flat->pipeline_state.source_sink, param->pipeline_state.sources, flat->pipeline_state.n_sources * sizeof(flat->pipeline_state.source_sink[0])); @@ -178,12 +174,10 @@ int scheduler_dp_thread_ipc(struct processing_module *pmod, unsigned int cmd, unsigned int lock_key = scheduler_dp_lock(pmod->dev->task->core); - struct ipc4_flat *flat = (struct ipc4_flat *)ipc_buf; - /* IPCs are serialised */ - flat->ret = -ENOSYS; + pdata->flat->ret = -ENOSYS; - ret = ipc_thread_flatten(cmd, param, flat); + ret = ipc_thread_flatten(cmd, param, pdata->flat); if (!ret) { pdata->pend_ipc++; k_sem_give(pdata->sem); @@ -197,7 +191,7 @@ int scheduler_dp_thread_ipc(struct processing_module *pmod, unsigned int cmd, if (ret < 0) tr_err(&dp_tr, "Failed waiting for DP thread: %d", ret); else - ret = flat->ret; + ret = pdata->flat->ret; } return ret; @@ -316,7 +310,7 @@ void dp_thread_fn(void *p1, void *p2, void *p3) if (pend_ipc) { /* handle IPC */ tr_dbg(&dp_tr, "got IPC wake up for %p state %d", pmod, task->state); - ipc_thread_unflatten_run(pmod, (struct ipc4_flat *)ipc_buf); + ipc_thread_unflatten_run(pmod, task_pdata->flat); k_sem_give(&dp_sync[task->core]); } @@ -401,7 +395,6 @@ void scheduler_dp_domain_free(struct processing_module *pmod) struct task_dp_pdata *pdata = pmod->dev->task->priv_data; k_mem_domain_remove_partition(dp_mdom + core, pdata->mpart + SOF_DP_PART_HEAP); - k_mem_domain_remove_partition(dp_mdom + core, pdata->mpart + SOF_DP_PART_IPC); k_mem_domain_remove_partition(dp_mdom + core, pdata->mpart + SOF_DP_PART_CFG); #endif } @@ -416,6 +409,7 @@ int scheduler_dp_task_init(struct task **task, const struct sof_uuid_entry *uid, struct task task; struct task_dp_pdata pdata; struct comp_driver drv; + struct ipc4_flat flat; } *task_memory; int ret; @@ -465,6 +459,7 @@ int scheduler_dp_task_init(struct task **task, const struct sof_uuid_entry *uid, /* It will be overwritten for K_USER threads to dynamic ones. */ pdata->sem = &pdata->sem_struct; pdata->thread = &pdata->thread_struct; + pdata->flat = &task_memory->flat; #ifdef CONFIG_USERSPACE if (options & K_USER) { @@ -527,12 +522,6 @@ int scheduler_dp_task_init(struct task **task, const struct sof_uuid_entry *uid, .size = size, .attr = K_MEM_PARTITION_P_RW_U_RW, }; - /* IPC flattening buffer partition */ - pdata->mpart[SOF_DP_PART_IPC] = (struct k_mem_partition){ - .start = (uintptr_t)&ipc_buf, - .size = sizeof(ipc_buf), - .attr = K_MEM_PARTITION_P_RW_U_RW, - }; /* Host mailbox partition for additional IPC parameters: read-only */ pdata->mpart[SOF_DP_PART_CFG] = (struct k_mem_partition){ .start = (uintptr_t)MAILBOX_HOSTBOX_BASE, From f471a53625aea203d0bdf0010bc3d17b3f7f6a11 Mon Sep 17 00:00:00 2001 From: Guennadi Liakhovetski Date: Wed, 24 Dec 2025 15:37:29 +0100 Subject: [PATCH 4/4] dp: application: remove left-over code The ppart partition pointer array isn't used any more, remove it. Signed-off-by: Guennadi Liakhovetski --- src/schedule/zephyr_dp_schedule_application.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/src/schedule/zephyr_dp_schedule_application.c b/src/schedule/zephyr_dp_schedule_application.c index 3349ab128b29..65f4c1ee9de1 100644 --- a/src/schedule/zephyr_dp_schedule_application.c +++ b/src/schedule/zephyr_dp_schedule_application.c @@ -510,10 +510,6 @@ int scheduler_dp_task_init(struct task **task, const struct sof_uuid_entry *uid, unsigned int pidx; size_t size; uintptr_t start; - struct k_mem_partition *ppart[SOF_DP_PART_TYPE_COUNT]; - - for (pidx = 0; pidx < ARRAY_SIZE(ppart); pidx++) - ppart[pidx] = pdata->mpart + pidx; /* Module heap partition */ mod_heap_info(mod, &size, &start);