From 9a0b8c4a95a679c263749a2215ba0e2f97589650 Mon Sep 17 00:00:00 2001 From: Kai Vehmanen Date: Tue, 10 Feb 2026 15:17:16 +0200 Subject: [PATCH 01/54] ipc: move standalone-test check later in ipc_init() If CONFIG_SOF_BOOT_TEST_STANDALONE is set, ipc_init() is terminated early. This ensures SOF will not start to generate or respond to IPC messages that could potentially interfere with standalone test cases (some of which send and receive IPCs). The current implementation leaves the component list uninitialized and this can cause trouble to standalone tests that want to utilzie common IPC code to build messages. Fix this problem by executing more of ipc_init() also in the standalone mode. Signed-off-by: Kai Vehmanen --- src/ipc/ipc-common.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/ipc/ipc-common.c b/src/ipc/ipc-common.c index 10f241784625..a62223dc97a2 100644 --- a/src/ipc/ipc-common.c +++ b/src/ipc/ipc-common.c @@ -294,11 +294,6 @@ __cold int ipc_init(struct sof *sof) tr_dbg(&ipc_tr, "entry"); -#if CONFIG_SOF_BOOT_TEST_STANDALONE - LOG_INF("SOF_BOOT_TEST_STANDALONE, disabling IPC."); - return 0; -#endif - /* init ipc data */ sof->ipc = rzalloc(SOF_MEM_FLAG_USER | SOF_MEM_FLAG_COHERENT, sizeof(*sof->ipc)); if (!sof->ipc) { @@ -329,6 +324,11 @@ __cold int ipc_init(struct sof *sof) io_perf_monitor_init_data(&sof->ipc->io_perf_out_msg_count, &init_data); #endif +#if CONFIG_SOF_BOOT_TEST_STANDALONE + LOG_INF("SOF_BOOT_TEST_STANDALONE, disabling IPC."); + return 0; +#endif + #ifdef __ZEPHYR__ struct k_thread *thread = &sof->ipc->ipc_send_wq.thread; From 83dfe00e37f3d578794d74709bc10db17d54f52b Mon Sep 17 00:00:00 2001 From: Kai Vehmanen Date: Fri, 13 Feb 2026 14:54:58 +0200 Subject: [PATCH 02/54] audio: component: add comp_grant_access_to_thread() Add a method to add access to a component object to a particular thread. This is required as component object state includes kernel objects and to use these from user-space, access needs to be granted. Signed-off-by: Kai Vehmanen --- src/audio/component.c | 8 ++++++++ src/include/sof/audio/component.h | 10 ++++++++++ 2 files changed, 18 insertions(+) diff --git a/src/audio/component.c b/src/audio/component.c index 941be20f8534..83d35bc18a1c 100644 --- a/src/audio/component.c +++ b/src/audio/component.c @@ -699,3 +699,11 @@ void comp_update_ibs_obs_cpc(struct comp_dev *dev) #endif } +#ifdef CONFIG_SOF_USERSPACE_LL +void comp_grant_access_to_thread(const struct comp_dev *dev, struct k_thread *th) +{ + assert(dev->list_mutex); + tr_dbg(&ipc_tr, "grant access to mutex %p for thread %p", dev->list_mutex, th); + k_thread_access_grant(th, dev->list_mutex); +} +#endif diff --git a/src/include/sof/audio/component.h b/src/include/sof/audio/component.h index b6695b9cd312..4bc65046fe2e 100644 --- a/src/include/sof/audio/component.h +++ b/src/include/sof/audio/component.h @@ -1228,6 +1228,16 @@ void comp_init_performance_data(struct comp_dev *dev); */ bool comp_update_performance_data(struct comp_dev *dev, uint32_t cycles_used); +/** + * Grant access to component to a thread. + * + * Must be called from kernel context. + * + * @param dev Component to update. + * @param th thread to give access to + */ +void comp_grant_access_to_thread(const struct comp_dev *dev, struct k_thread *th); + static inline int user_get_buffer_memory_region(const struct comp_driver *drv) { #if CONFIG_SOF_USERSPACE_USE_DRIVER_HEAP From ba22d89e6ccdf6a10f180e1146a94b316018e8b6 Mon Sep 17 00:00:00 2001 From: Kai Vehmanen Date: Thu, 12 Feb 2026 21:16:34 +0200 Subject: [PATCH 03/54] pipeline: protect component connections with a mutex In userspace LL builds, use a mutex to protect component connections. This code shoudl work for kernel builds, but at least now add new code under a ifdef to avoid the cost of additional mutex object. Signed-off-by: Kai Vehmanen --- src/audio/pipeline/pipeline-graph.c | 22 ++++++++++++++++------ src/include/sof/audio/component.h | 9 +++++++++ src/include/sof/audio/component_ext.h | 4 ++++ 3 files changed, 29 insertions(+), 6 deletions(-) diff --git a/src/audio/pipeline/pipeline-graph.c b/src/audio/pipeline/pipeline-graph.c index 89bb3574289b..39125b6346e3 100644 --- a/src/audio/pipeline/pipeline-graph.c +++ b/src/audio/pipeline/pipeline-graph.c @@ -178,24 +178,34 @@ static void buffer_set_comp(struct comp_buffer *buffer, struct comp_dev *comp, comp_buffer_set_sink_component(buffer, comp); } +#ifdef CONFIG_SOF_USERSPACE_LL +#define PPL_LOCK_DECLARE +#define PPL_LOCK(x) k_mutex_lock(comp->list_mutex, K_FOREVER) +#define PPL_UNLOCK(x) k_mutex_unlock(comp->list_mutex) +#else +#define PPL_LOCK_DECLARE uint32_t flags +#define PPL_LOCK(x) irq_local_disable(flags) +#define PPL_UNLOCK(x) irq_local_enable(flags) +#endif + int pipeline_connect(struct comp_dev *comp, struct comp_buffer *buffer, int dir) { struct list_item *comp_list; - uint32_t flags; + PPL_LOCK_DECLARE; if (dir == PPL_CONN_DIR_COMP_TO_BUFFER) comp_info(comp, "connect buffer %d as sink", buf_get_id(buffer)); else comp_info(comp, "connect buffer %d as source", buf_get_id(buffer)); - irq_local_disable(flags); + PPL_LOCK(); comp_list = comp_buffer_list(comp, dir); buffer_attach(buffer, comp_list, dir); buffer_set_comp(buffer, comp, dir); - irq_local_enable(flags); + PPL_UNLOCK(); return 0; } @@ -203,20 +213,20 @@ int pipeline_connect(struct comp_dev *comp, struct comp_buffer *buffer, void pipeline_disconnect(struct comp_dev *comp, struct comp_buffer *buffer, int dir) { struct list_item *comp_list; - uint32_t flags; + PPL_LOCK_DECLARE; if (dir == PPL_CONN_DIR_COMP_TO_BUFFER) comp_dbg(comp, "disconnect buffer %d as sink", buf_get_id(buffer)); else comp_dbg(comp, "disconnect buffer %d as source", buf_get_id(buffer)); - irq_local_disable(flags); + PPL_LOCK(); comp_list = comp_buffer_list(comp, dir); buffer_detach(buffer, comp_list, dir); buffer_set_comp(buffer, NULL, dir); - irq_local_enable(flags); + PPL_UNLOCK(); } /* pipelines must be inactive */ diff --git a/src/include/sof/audio/component.h b/src/include/sof/audio/component.h index 4bc65046fe2e..a69be34b15a1 100644 --- a/src/include/sof/audio/component.h +++ b/src/include/sof/audio/component.h @@ -21,6 +21,7 @@ #include #include #include +#include #include #include #include @@ -679,6 +680,10 @@ struct comp_dev { struct list_item bsource_list; /**< list of source buffers */ struct list_item bsink_list; /**< list of sink buffers */ +#ifdef CONFIG_SOF_USERSPACE_LL + struct k_mutex *list_mutex; /**< protect lists of source/sinks */ +#endif + /* performance data*/ struct comp_perf_data perf_data; /* Input Buffer Size for pin 0, add array for other pins if needed */ @@ -863,6 +868,10 @@ static inline void comp_init(const struct comp_driver *drv, dev->state = COMP_STATE_INIT; list_init(&dev->bsink_list); list_init(&dev->bsource_list); +#ifdef CONFIG_SOF_USERSPACE_LL + dev->list_mutex = k_object_alloc(K_OBJ_MUTEX); + k_mutex_init(dev->list_mutex); +#endif memcpy_s(&dev->tctx, sizeof(dev->tctx), trace_comp_drv_get_tr_ctx(dev->drv), sizeof(struct tr_ctx)); } diff --git a/src/include/sof/audio/component_ext.h b/src/include/sof/audio/component_ext.h index d2bbf87a7764..d0afe4be3ad8 100644 --- a/src/include/sof/audio/component_ext.h +++ b/src/include/sof/audio/component_ext.h @@ -53,6 +53,10 @@ static inline void comp_free(struct comp_dev *dev) * be freed after this. */ drv->ops.free(dev); + +#ifdef CONFIG_SOF_USERSPACE_LL + k_object_free(dev->list_mutex); +#endif } /** From 246fa7649468e57712c574b677bb0faeec80588d Mon Sep 17 00:00:00 2001 From: Kai Vehmanen Date: Fri, 13 Feb 2026 16:28:55 +0200 Subject: [PATCH 04/54] zephyr: lib: make sof_heap_alloc/free system calls Add a built option to make sof_heap_allo/free available as system calls to user-space. Add a test case for the functions that runs in a user-space thread. Signed-off-by: Kai Vehmanen --- zephyr/CMakeLists.txt | 2 + zephyr/Kconfig | 7 +++ zephyr/include/rtos/alloc.h | 15 ++++-- zephyr/lib/alloc.c | 6 +-- zephyr/syscall/alloc.c | 20 +++++++ zephyr/test/CMakeLists.txt | 3 ++ zephyr/test/userspace/test_heap_alloc.c | 72 +++++++++++++++++++++++++ 7 files changed, 119 insertions(+), 6 deletions(-) create mode 100644 zephyr/syscall/alloc.c create mode 100644 zephyr/test/userspace/test_heap_alloc.c diff --git a/zephyr/CMakeLists.txt b/zephyr/CMakeLists.txt index 233c93f3e070..0c70b3929c49 100644 --- a/zephyr/CMakeLists.txt +++ b/zephyr/CMakeLists.txt @@ -545,6 +545,8 @@ zephyr_library_sources_ifdef(CONFIG_SHELL zephyr_syscall_header(${SOF_SRC_PATH}/include/sof/audio/module_adapter/module/generic.h) zephyr_syscall_header(${SOF_SRC_PATH}/include/sof/lib/fast-get.h) +zephyr_syscall_header(include/rtos/alloc.h) +zephyr_library_sources(syscall/alloc.c) zephyr_library_link_libraries(SOF) target_link_libraries(SOF INTERFACE zephyr_interface) diff --git a/zephyr/Kconfig b/zephyr/Kconfig index c744b965a07f..accebae6fa73 100644 --- a/zephyr/Kconfig +++ b/zephyr/Kconfig @@ -29,6 +29,13 @@ config SOF_USERSPACE_INTERFACE_DMA help Allow user-space threads to use the SOF DMA interface. +config SOF_USERSPACE_INTERFACE_ALLOC + bool "Enable SOF heap alloc interface to userspace threads" + depends on USERSPACE + help + Allow user-space threads to use sof_heap_alloc/sof_heap_free + as Zephyr system calls. + config SOF_USERSPACE_LL bool "Run Low-Latency pipelines in userspace threads" depends on USERSPACE diff --git a/zephyr/include/rtos/alloc.h b/zephyr/include/rtos/alloc.h index 116789ea6ead..984dc8fa01e5 100644 --- a/zephyr/include/rtos/alloc.h +++ b/zephyr/include/rtos/alloc.h @@ -122,9 +122,16 @@ void rfree(void *ptr); */ void l3_heap_save(void); -void *sof_heap_alloc(struct k_heap *heap, uint32_t flags, size_t bytes, - size_t alignment); -void sof_heap_free(struct k_heap *heap, void *addr); +__syscall void *sof_heap_alloc(struct k_heap *heap, uint32_t flags, size_t bytes, + size_t alignment); + +void *z_impl_sof_heap_alloc(struct k_heap *heap, uint32_t flags, size_t bytes, + size_t alignment); + +__syscall void sof_heap_free(struct k_heap *heap, void *addr); + +void z_impl_sof_heap_free(struct k_heap *heap, void *addr); + struct k_heap *sof_sys_heap_get(void); /* TODO: remove - debug only - only needed for linking */ @@ -149,4 +156,6 @@ size_t get_shared_buffer_heap_size(void); #endif +#include + #endif /* __ZEPHYR_RTOS_ALLOC_H__ */ diff --git a/zephyr/lib/alloc.c b/zephyr/lib/alloc.c index f95f66fbdb7f..a0dfb465f33a 100644 --- a/zephyr/lib/alloc.c +++ b/zephyr/lib/alloc.c @@ -619,8 +619,8 @@ EXPORT_SYMBOL(rfree); * To match the fall-back SOF main heap all private heaps should also be in the * uncached address range. */ -void *sof_heap_alloc(struct k_heap *heap, uint32_t flags, size_t bytes, - size_t alignment) +void *z_impl_sof_heap_alloc(struct k_heap *heap, uint32_t flags, size_t bytes, + size_t alignment) { if (flags & (SOF_MEM_FLAG_LARGE_BUFFER | SOF_MEM_FLAG_USER_SHARED_BUFFER)) return rballoc_align(flags, bytes, alignment); @@ -634,7 +634,7 @@ void *sof_heap_alloc(struct k_heap *heap, uint32_t flags, size_t bytes, return (__sparse_force void *)heap_alloc_aligned_cached(heap, alignment, bytes); } -void sof_heap_free(struct k_heap *heap, void *addr) +void z_impl_sof_heap_free(struct k_heap *heap, void *addr) { if (heap && addr && is_heap_pointer(heap, addr)) heap_free(heap, addr); diff --git a/zephyr/syscall/alloc.c b/zephyr/syscall/alloc.c new file mode 100644 index 000000000000..fad39865b9d2 --- /dev/null +++ b/zephyr/syscall/alloc.c @@ -0,0 +1,20 @@ +// SPDX-License-Identifier: BSD-3-Clause +// +// Copyright(c) 2026 Intel Corporation. + +#include +#include +#include + +static inline void *z_vrfy_sof_heap_alloc(struct k_heap *heap, uint32_t flags, + size_t bytes, size_t alignment) +{ + return z_impl_sof_heap_alloc(heap, flags, bytes, alignment); +} +#include + +static inline void z_vrfy_sof_heap_free(struct k_heap *heap, void *addr) +{ + z_impl_sof_heap_free(heap, addr); +} +#include diff --git a/zephyr/test/CMakeLists.txt b/zephyr/test/CMakeLists.txt index c5b66c83bbaa..b276bb307259 100644 --- a/zephyr/test/CMakeLists.txt +++ b/zephyr/test/CMakeLists.txt @@ -5,6 +5,9 @@ if(CONFIG_SOF_BOOT_TEST) zephyr_library_sources_ifdef(CONFIG_USERSPACE userspace/ksem.c ) + if(CONFIG_USERSPACE AND CONFIG_SOF_USERSPACE_INTERFACE_ALLOC) + zephyr_library_sources(userspace/test_heap_alloc.c) + endif() endif() if(CONFIG_SOF_BOOT_TEST_STANDALONE AND CONFIG_SOF_USERSPACE_INTERFACE_DMA) diff --git a/zephyr/test/userspace/test_heap_alloc.c b/zephyr/test/userspace/test_heap_alloc.c new file mode 100644 index 000000000000..8018e4b27b9d --- /dev/null +++ b/zephyr/test/userspace/test_heap_alloc.c @@ -0,0 +1,72 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* + * Copyright(c) 2026 Intel Corporation. + */ + +/* + * Test case for sof_heap_alloc() / sof_heap_free() use from a Zephyr + * user-space thread. + */ + +#include +#include +#include + +#include +#include +#include + +LOG_MODULE_DECLARE(sof_boot_test, LOG_LEVEL_DBG); + +#define USER_STACKSIZE 2048 + +static struct k_thread user_thread; +static K_THREAD_STACK_DEFINE(user_stack, USER_STACKSIZE); + +static void user_function(void *p1, void *p2, void *p3) +{ + struct k_heap *heap = (struct k_heap *)p1; + void *ptr; + + __ASSERT(k_is_user_context(), "isn't user"); + + LOG_INF("SOF thread %s (%s)", + k_is_user_context() ? "UserSpace!" : "privileged mode.", + CONFIG_BOARD_TARGET); + + /* allocate a block from the user heap */ + ptr = sof_heap_alloc(heap, SOF_MEM_FLAG_USER, 128, 0); + zassert_not_null(ptr, "sof_heap_alloc returned NULL"); + + LOG_INF("sof_heap_alloc returned %p", ptr); + + /* free the block */ + sof_heap_free(heap, ptr); + + LOG_INF("sof_heap_free done"); +} + +static void test_user_thread_heap_alloc(void) +{ + struct k_heap *heap; + + heap = zephyr_ll_user_heap(); + zassert_not_null(heap, "user heap not found"); + + k_thread_create(&user_thread, user_stack, USER_STACKSIZE, + user_function, heap, NULL, NULL, + -1, K_USER, K_FOREVER); + + /* Add thread to LL memory domain so it can access the user heap */ + k_mem_domain_add_thread(zephyr_ll_mem_domain(), &user_thread); + + k_thread_start(&user_thread); + k_thread_join(&user_thread, K_FOREVER); +} + +ZTEST(sof_boot, user_space_heap_alloc) +{ + test_user_thread_heap_alloc(); + + ztest_test_pass(); +} From e76704caf814f745720845d2bbe6c4fb2e4dea98 Mon Sep 17 00:00:00 2001 From: Kai Vehmanen Date: Fri, 13 Feb 2026 17:44:14 +0200 Subject: [PATCH 05/54] schedule: zephyr_ll_user: make the heap accessible from user-space Separate the state for LL scheduler memory into kernel and user accessible resources. The pointer to the LL heap must be accessible from user-space, so that user space can allocate memory and pass the heap pointer as argument. Signed-off-by: Kai Vehmanen --- src/schedule/zephyr_ll_user.c | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/src/schedule/zephyr_ll_user.c b/src/schedule/zephyr_ll_user.c index aa33807b4aa3..9e86b9b58ea9 100644 --- a/src/schedule/zephyr_ll_user.c +++ b/src/schedule/zephyr_ll_user.c @@ -17,15 +17,18 @@ LOG_MODULE_DECLARE(ll_schedule, CONFIG_SOF_LOG_LEVEL); * * This structure encapsulates the memory management resources required for the * low-latency (LL) scheduler in userspace mode. It provides memory isolation - * and heap management for LL scheduler threads. + * and heap management for LL scheduler threads. Only kernel accessible. */ struct zephyr_ll_mem_resources { struct k_mem_domain mem_domain; /**< Memory domain for LL thread isolation */ - struct k_heap *heap; /**< Heap allocator for LL scheduler memory */ }; static struct zephyr_ll_mem_resources ll_mem_resources; +K_APPMEM_PARTITION_DEFINE(ll_common); +/* Heap allocator for LL scheduler memory (user accessible pointer) */ +K_APP_BMEM(ll_common) static struct k_heap *zephyr_ll_heap; + static struct k_heap *zephyr_ll_heap_init(void) { struct k_heap *heap = module_driver_heap_init(); @@ -61,6 +64,11 @@ static struct k_heap *zephyr_ll_heap_init(void) if (ret) k_panic(); + ret = k_mem_domain_add_partition(&ll_mem_resources.mem_domain, &ll_common); + tr_dbg(&ll_tr, "init ll common %p, ret %d", (void *)&ll_common, ret); + if (ret) + k_panic(); + return heap; } @@ -68,7 +76,7 @@ void zephyr_ll_user_resources_init(void) { k_mem_domain_init(&ll_mem_resources.mem_domain, 0, NULL); - ll_mem_resources.heap = zephyr_ll_heap_init(); + zephyr_ll_heap = zephyr_ll_heap_init(); /* attach common partition to LL domain */ user_memory_attach_common_partition(zephyr_ll_mem_domain()); @@ -76,7 +84,7 @@ void zephyr_ll_user_resources_init(void) struct k_heap *zephyr_ll_user_heap(void) { - return ll_mem_resources.heap; + return zephyr_ll_heap; } struct k_mem_domain *zephyr_ll_mem_domain(void) From 6431871230412e286c3b0bab2dad96d223ff1248 Mon Sep 17 00:00:00 2001 From: Kai Vehmanen Date: Fri, 13 Feb 2026 18:48:43 +0200 Subject: [PATCH 06/54] schedule: zephyr_ll: convert pdata->sem into a dynamic object Turn the pdata->sem into a dynamic object in userspace LL builds. Keep statically allocated semaphore for kernel LL builds. Signed-off-by: Kai Vehmanen --- src/schedule/zephyr_ll.c | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) diff --git a/src/schedule/zephyr_ll.c b/src/schedule/zephyr_ll.c index 575a82d91dda..e2d02f12926b 100644 --- a/src/schedule/zephyr_ll.c +++ b/src/schedule/zephyr_ll.c @@ -40,7 +40,10 @@ struct zephyr_ll { struct zephyr_ll_pdata { bool run; bool freeing; - struct k_sem sem; +#ifndef CONFIG_SOF_USERSPACE_LL + struct k_sem sem_obj; +#endif + struct k_sem *sem; }; static void zephyr_ll_lock(struct zephyr_ll *sch, uint32_t *flags) @@ -87,7 +90,7 @@ static void zephyr_ll_task_done(struct zephyr_ll *sch, * zephyr_ll_task_free() is trying to free this task. Complete * it and signal the semaphore to let the function proceed */ - k_sem_give(&pdata->sem); + k_sem_give(pdata->sem); tr_info(&ll_tr, "task complete %p %pU", task, task->uid); tr_info(&ll_tr, "num_tasks %d total_num_tasks %ld", @@ -454,7 +457,11 @@ static int zephyr_ll_task_free(void *data, struct task *task) if (must_wait) /* Wait for up to 100 periods */ - k_sem_take(&pdata->sem, K_USEC(LL_TIMER_PERIOD_US * 100)); + k_sem_take(pdata->sem, K_USEC(LL_TIMER_PERIOD_US * 100)); + +#ifdef CONFIG_SOF_USERSPACE_LL + k_object_free(pdata->sem); +#endif /* Protect against racing with schedule_task() */ zephyr_ll_lock(sch, &flags); @@ -560,7 +567,12 @@ int zephyr_ll_task_init(struct task *task, memset(pdata, 0, sizeof(*pdata)); - k_sem_init(&pdata->sem, 0, 1); +#ifdef CONFIG_SOF_USERSPACE_LL + pdata->sem = k_object_alloc(K_OBJ_SEM); +#else + pdata->sem = &pdata->sem_obj; +#endif + k_sem_init(pdata->sem, 0, 1); task->priv_data = pdata; From e998fa0174a0becaeebe31405c1daa053833415f Mon Sep 17 00:00:00 2001 From: Kai Vehmanen Date: Fri, 13 Feb 2026 18:51:10 +0200 Subject: [PATCH 07/54] schedule: zephyr_ll_user: move user accessible heap to common partition Move the user-accessible heap pointer to common partition defined in userspace_helper.h. Signed-off-by: Kai Vehmanen --- src/schedule/zephyr_ll_user.c | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/src/schedule/zephyr_ll_user.c b/src/schedule/zephyr_ll_user.c index 9e86b9b58ea9..e64d65ab8ae5 100644 --- a/src/schedule/zephyr_ll_user.c +++ b/src/schedule/zephyr_ll_user.c @@ -25,9 +25,8 @@ struct zephyr_ll_mem_resources { static struct zephyr_ll_mem_resources ll_mem_resources; -K_APPMEM_PARTITION_DEFINE(ll_common); /* Heap allocator for LL scheduler memory (user accessible pointer) */ -K_APP_BMEM(ll_common) static struct k_heap *zephyr_ll_heap; +APP_TASK_DATA static struct k_heap *zephyr_ll_heap; static struct k_heap *zephyr_ll_heap_init(void) { @@ -64,11 +63,6 @@ static struct k_heap *zephyr_ll_heap_init(void) if (ret) k_panic(); - ret = k_mem_domain_add_partition(&ll_mem_resources.mem_domain, &ll_common); - tr_dbg(&ll_tr, "init ll common %p, ret %d", (void *)&ll_common, ret); - if (ret) - k_panic(); - return heap; } From 20620b72f24ff29d06392bdf909545599991f612 Mon Sep 17 00:00:00 2001 From: Kai Vehmanen Date: Mon, 16 Feb 2026 16:16:01 +0200 Subject: [PATCH 08/54] dma: dma_sg: allocate on specific heap Add a heap parameter to DMA scatter-gather allocation interface. This makes it possible to control how allocations are done for the DMA buffers. Signed-off-by: Kai Vehmanen --- posix/include/sof/lib/dma.h | 6 ++++-- src/audio/dai-legacy.c | 6 +++--- src/audio/dai-zephyr.c | 6 +++--- src/audio/host-legacy.c | 12 ++++++------ src/audio/host-zephyr.c | 12 ++++++------ src/ipc/ipc3/handler.c | 2 +- src/ipc/ipc3/host-page-table.c | 2 +- src/lib/dma.c | 13 ++++++++----- src/probe/probe.c | 4 ++-- src/trace/dma-trace.c | 2 +- zephyr/include/sof/lib/dma.h | 6 ++++-- 11 files changed, 39 insertions(+), 32 deletions(-) diff --git a/posix/include/sof/lib/dma.h b/posix/include/sof/lib/dma.h index 960cfd469215..cb829dcaa21a 100644 --- a/posix/include/sof/lib/dma.h +++ b/posix/include/sof/lib/dma.h @@ -35,6 +35,7 @@ struct comp_buffer; struct comp_dev; +struct k_heap; /** \addtogroup sof_dma_drivers DMA Drivers * DMA Drivers API specification. @@ -511,13 +512,14 @@ static inline void dma_sg_init(struct dma_sg_elem_array *ea) ea->elems = NULL; } -int dma_sg_alloc(struct dma_sg_elem_array *ea, +int dma_sg_alloc(struct k_heap *heap, + struct dma_sg_elem_array *ea, uint32_t flags, uint32_t direction, uint32_t buffer_count, uint32_t buffer_bytes, uintptr_t dma_buffer_addr, uintptr_t external_addr); -void dma_sg_free(struct dma_sg_elem_array *ea); +void dma_sg_free(struct k_heap *heap, struct dma_sg_elem_array *ea); /** * \brief Get the total size of SG buffer diff --git a/src/audio/dai-legacy.c b/src/audio/dai-legacy.c index 11179334e6fe..655fa94afb86 100644 --- a/src/audio/dai-legacy.c +++ b/src/audio/dai-legacy.c @@ -379,7 +379,7 @@ static int dai_playback_params(struct comp_dev *dev, uint32_t period_bytes, comp_info(dev, "fifo 0x%x", fifo); - err = dma_sg_alloc(&config->elem_array, SOF_MEM_FLAG_USER, + err = dma_sg_alloc(NULL, &config->elem_array, SOF_MEM_FLAG_USER, config->direction, period_count, period_bytes, @@ -444,7 +444,7 @@ static int dai_capture_params(struct comp_dev *dev, uint32_t period_bytes, comp_info(dev, "fifo 0x%x", fifo); - err = dma_sg_alloc(&config->elem_array, SOF_MEM_FLAG_USER, + err = dma_sg_alloc(NULL, &config->elem_array, SOF_MEM_FLAG_USER, config->direction, period_count, period_bytes, @@ -709,7 +709,7 @@ void dai_common_reset(struct dai_data *dd, struct comp_dev *dev) if (!dd->delayed_dma_stop) dai_dma_release(dd, dev); - dma_sg_free(&config->elem_array); + dma_sg_free(NULL, &config->elem_array); if (dd->dma_buffer) { buffer_free(dd->dma_buffer); diff --git a/src/audio/dai-zephyr.c b/src/audio/dai-zephyr.c index 37b0b7c911ea..59bcca3c14a5 100644 --- a/src/audio/dai-zephyr.c +++ b/src/audio/dai-zephyr.c @@ -823,7 +823,7 @@ static int dai_set_sg_config(struct dai_data *dd, struct comp_dev *dev, uint32_t } while (--max_block_count > 0); } - err = dma_sg_alloc(&config->elem_array, SOF_MEM_FLAG_USER, + err = dma_sg_alloc(NULL, &config->elem_array, SOF_MEM_FLAG_USER, config->direction, period_count, period_bytes, @@ -1104,7 +1104,7 @@ int dai_common_params(struct dai_data *dd, struct comp_dev *dev, if (err < 0) { buffer_free(dd->dma_buffer); dd->dma_buffer = NULL; - dma_sg_free(&config->elem_array); + dma_sg_free(NULL, &config->elem_array); rfree(dd->z_config); dd->z_config = NULL; } @@ -1235,7 +1235,7 @@ void dai_common_reset(struct dai_data *dd, struct comp_dev *dev) if (!dd->delayed_dma_stop) dai_dma_release(dd, dev); - dma_sg_free(&config->elem_array); + dma_sg_free(NULL, &config->elem_array); if (dd->z_config) { rfree(dd->z_config->head_block); rfree(dd->z_config); diff --git a/src/audio/host-legacy.c b/src/audio/host-legacy.c index a16b3f74e1c3..36c259b7f23c 100644 --- a/src/audio/host-legacy.c +++ b/src/audio/host-legacy.c @@ -440,7 +440,7 @@ static int create_local_elems(struct host_data *hd, struct comp_dev *dev, uint32 elem_array = &hd->local.elem_array; /* config buffer will be used as proxy */ - err = dma_sg_alloc(&hd->config.elem_array, SOF_MEM_FLAG_USER, + err = dma_sg_alloc(NULL, &hd->config.elem_array, SOF_MEM_FLAG_USER, dir, 1, 0, 0, 0); if (err < 0) { comp_err(dev, "dma_sg_alloc() failed"); @@ -450,7 +450,7 @@ static int create_local_elems(struct host_data *hd, struct comp_dev *dev, uint32 elem_array = &hd->config.elem_array; } - err = dma_sg_alloc(elem_array, SOF_MEM_FLAG_USER, dir, buffer_count, + err = dma_sg_alloc(NULL, elem_array, SOF_MEM_FLAG_USER, dir, buffer_count, buffer_bytes, (uintptr_t)(audio_stream_get_addr(&hd->dma_buffer->stream)), 0); if (err < 0) { @@ -602,7 +602,7 @@ void host_common_free(struct host_data *hd) dma_put(hd->dma); ipc_msg_free(hd->msg); - dma_sg_free(&hd->config.elem_array); + dma_sg_free(NULL, &hd->config.elem_array); } static void host_free(struct comp_dev *dev) @@ -905,9 +905,9 @@ void host_common_reset(struct host_data *hd, uint16_t state) } /* free all DMA elements */ - dma_sg_free(&hd->host.elem_array); - dma_sg_free(&hd->local.elem_array); - dma_sg_free(&hd->config.elem_array); + dma_sg_free(NULL, &hd->host.elem_array); + dma_sg_free(NULL, &hd->local.elem_array); + dma_sg_free(NULL, &hd->config.elem_array); /* It's safe that cleaning out `hd->config` after `dma_sg_free` for config.elem_array */ memset(&hd->config, 0, sizeof(hd->config)); diff --git a/src/audio/host-zephyr.c b/src/audio/host-zephyr.c index bdb5c5759274..e842fb36a965 100644 --- a/src/audio/host-zephyr.c +++ b/src/audio/host-zephyr.c @@ -605,7 +605,7 @@ static int create_local_elems(struct host_data *hd, struct comp_dev *dev, elem_array = &hd->local.elem_array; /* config buffer will be used as proxy */ - err = dma_sg_alloc(&hd->config.elem_array, SOF_MEM_FLAG_USER, + err = dma_sg_alloc(NULL, &hd->config.elem_array, SOF_MEM_FLAG_USER, dir, 1, 0, 0, 0); if (err < 0) { comp_err(dev, "dma_sg_alloc() failed"); @@ -615,7 +615,7 @@ static int create_local_elems(struct host_data *hd, struct comp_dev *dev, elem_array = &hd->config.elem_array; } - err = dma_sg_alloc(elem_array, SOF_MEM_FLAG_USER, dir, buffer_count, + err = dma_sg_alloc(NULL, elem_array, SOF_MEM_FLAG_USER, dir, buffer_count, buffer_bytes, (uintptr_t)audio_stream_get_addr(&hd->dma_buffer->stream), 0); if (err < 0) { @@ -774,7 +774,7 @@ __cold void host_common_free(struct host_data *hd) sof_dma_put(hd->dma); ipc_msg_free(hd->msg); - dma_sg_free(&hd->config.elem_array); + dma_sg_free(NULL, &hd->config.elem_array); } __cold static void host_free(struct comp_dev *dev) @@ -1146,9 +1146,9 @@ void host_common_reset(struct host_data *hd, uint16_t state) } /* free all DMA elements */ - dma_sg_free(&hd->host.elem_array); - dma_sg_free(&hd->local.elem_array); - dma_sg_free(&hd->config.elem_array); + dma_sg_free(NULL, &hd->host.elem_array); + dma_sg_free(NULL, &hd->local.elem_array); + dma_sg_free(NULL, &hd->config.elem_array); /* free DMA buffer */ if (hd->dma_buffer) { diff --git a/src/ipc/ipc3/handler.c b/src/ipc/ipc3/handler.c index 80ddb2225e43..5bd5293c61b7 100644 --- a/src/ipc/ipc3/handler.c +++ b/src/ipc/ipc3/handler.c @@ -878,7 +878,7 @@ static int ipc_dma_trace_config(uint32_t header) error: #if CONFIG_HOST_PTABLE - dma_sg_free(&elem_array); + dma_sg_free(NULL, &elem_array); processing_error: #endif diff --git a/src/ipc/ipc3/host-page-table.c b/src/ipc/ipc3/host-page-table.c index 7a3da31caa82..997ee6683352 100644 --- a/src/ipc/ipc3/host-page-table.c +++ b/src/ipc/ipc3/host-page-table.c @@ -239,6 +239,6 @@ int ipc_process_host_buffer(struct ipc *ipc, return 0; error: - dma_sg_free(elem_array); + dma_sg_free(NULL, elem_array); return err; } diff --git a/src/lib/dma.c b/src/lib/dma.c index 608ee091ac25..70ac5c975058 100644 --- a/src/lib/dma.c +++ b/src/lib/dma.c @@ -285,7 +285,8 @@ void dma_put(struct dma *dma) } #endif -int dma_sg_alloc(struct dma_sg_elem_array *elem_array, +int dma_sg_alloc(struct k_heap *heap, + struct dma_sg_elem_array *elem_array, uint32_t flags, uint32_t direction, uint32_t buffer_count, uint32_t buffer_bytes, @@ -293,11 +294,13 @@ int dma_sg_alloc(struct dma_sg_elem_array *elem_array, { int i; - elem_array->elems = rzalloc(SOF_MEM_FLAG_USER, - sizeof(struct dma_sg_elem) * buffer_count); + elem_array->elems = sof_heap_alloc(heap, SOF_MEM_FLAG_USER, + sizeof(struct dma_sg_elem) * buffer_count, 0); if (!elem_array->elems) return -ENOMEM; + memset(elem_array->elems, 0, sizeof(struct dma_sg_elem) * buffer_count); + for (i = 0; i < buffer_count; i++) { elem_array->elems[i].size = buffer_bytes; // TODO: may count offsets once @@ -319,9 +322,9 @@ int dma_sg_alloc(struct dma_sg_elem_array *elem_array, return 0; } -void dma_sg_free(struct dma_sg_elem_array *elem_array) +void dma_sg_free(struct k_heap *heap, struct dma_sg_elem_array *elem_array) { - rfree(elem_array->elems); + sof_heap_free(heap, elem_array->elems); dma_sg_init(elem_array); } diff --git a/src/probe/probe.c b/src/probe/probe.c index f15ee84f7daf..088a2eb1773b 100644 --- a/src/probe/probe.c +++ b/src/probe/probe.c @@ -176,7 +176,7 @@ static int probe_dma_init(struct probe_dma_ext *dma, uint32_t direction) dma->config.dest_width = sizeof(uint32_t); dma->config.cyclic = 0; - err = dma_sg_alloc(&dma->config.elem_array, SOF_MEM_FLAG_USER, + err = dma_sg_alloc(NULL, &dma->config.elem_array, SOF_MEM_FLAG_USER, dma->config.direction, elem_num, elem_size, elem_addr, 0); if (err < 0) return err; @@ -255,7 +255,7 @@ static int probe_dma_init(struct probe_dma_ext *dma, uint32_t direction) static int probe_dma_deinit(struct probe_dma_ext *dma) { int err = 0; - dma_sg_free(&dma->config.elem_array); + dma_sg_free(NULL, &dma->config.elem_array); #if CONFIG_ZEPHYR_NATIVE_DRIVERS err = dma_stop(dma->dc.dmac->z_dev, dma->dc.chan->index); #else diff --git a/src/trace/dma-trace.c b/src/trace/dma-trace.c index 0523a362e397..554204ac5c70 100644 --- a/src/trace/dma-trace.c +++ b/src/trace/dma-trace.c @@ -407,7 +407,7 @@ void dma_trace_disable(struct dma_trace_data *d) #if (CONFIG_HOST_PTABLE) /* Free up the host SG if it is set */ if (d->host_size) { - dma_sg_free(&d->config.elem_array); + dma_sg_free(NULL, &d->config.elem_array); d->host_size = 0; } #endif diff --git a/zephyr/include/sof/lib/dma.h b/zephyr/include/sof/lib/dma.h index b13f3c25221b..e85f4b4d2e80 100644 --- a/zephyr/include/sof/lib/dma.h +++ b/zephyr/include/sof/lib/dma.h @@ -34,6 +34,7 @@ struct comp_buffer; struct comp_dev; +struct k_heap; /** \addtogroup sof_dma_drivers DMA Drivers * SOF DMA Drivers API specification (deprecated interface, to be @@ -291,13 +292,14 @@ static inline void dma_sg_init(struct dma_sg_elem_array *ea) ea->elems = NULL; } -int dma_sg_alloc(struct dma_sg_elem_array *ea, +int dma_sg_alloc(struct k_heap *heap, + struct dma_sg_elem_array *ea, uint32_t flags, uint32_t direction, uint32_t buffer_count, uint32_t buffer_bytes, uintptr_t dma_buffer_addr, uintptr_t external_addr); -void dma_sg_free(struct dma_sg_elem_array *ea); +void dma_sg_free(struct k_heap *heap, struct dma_sg_elem_array *ea); /** * \brief Get the total size of SG buffer From c0227d06b20b91803b4ccc9c55acd865da5e947a Mon Sep 17 00:00:00 2001 From: Kai Vehmanen Date: Mon, 16 Feb 2026 19:21:57 +0200 Subject: [PATCH 09/54] buffer: extend ability to allocate on specific heap to all functions Continue the work in commit 9567234c3b6b ("buffer: allocate on specific heap") and add ability to specify the heap to all buffer interface functions. Signed-off-by: Kai Vehmanen --- src/audio/buffers/comp_buffer.c | 54 ++++++++++++++------------------- src/include/sof/audio/buffer.h | 3 ++ 2 files changed, 26 insertions(+), 31 deletions(-) diff --git a/src/audio/buffers/comp_buffer.c b/src/audio/buffers/comp_buffer.c index d6562b8d821f..6804bbf71f04 100644 --- a/src/audio/buffers/comp_buffer.c +++ b/src/audio/buffers/comp_buffer.c @@ -161,7 +161,7 @@ static void comp_buffer_free(struct sof_audio_buffer *audio_buffer) struct k_heap *heap = buffer->audio_buffer.heap; - rfree(buffer->stream.addr); + sof_heap_free(heap, buffer->stream.addr); sof_heap_free(heap, buffer); if (heap) { struct dp_heap_user *mod_heap_user = container_of(heap, struct dp_heap_user, heap); @@ -218,6 +218,7 @@ static struct comp_buffer *buffer_alloc_struct(struct k_heap *heap, memset(buffer, 0, sizeof(*buffer)); + buffer->heap = heap; buffer->flags = flags; /* Force channels to 2 for init to prevent bad call to clz in buffer_init_stream */ buffer->stream.runtime_stream_params.channels = 2; @@ -254,7 +255,7 @@ struct comp_buffer *buffer_alloc(struct k_heap *heap, size_t size, uint32_t flag return NULL; } - stream_addr = rballoc_align(flags, size, align); + stream_addr = sof_heap_alloc(heap, flags, size, align); if (!stream_addr) { tr_err(&buffer_tr, "could not alloc size = %zu bytes of flags = 0x%x", size, flags); @@ -264,9 +265,11 @@ struct comp_buffer *buffer_alloc(struct k_heap *heap, size_t size, uint32_t flag buffer = buffer_alloc_struct(heap, stream_addr, size, flags, is_shared); if (!buffer) { tr_err(&buffer_tr, "could not alloc buffer structure"); - rfree(stream_addr); + sof_heap_free(heap, stream_addr); } + buffer->heap = heap; + return buffer; } @@ -292,7 +295,7 @@ struct comp_buffer *buffer_alloc_range(struct k_heap *heap, size_t preferred_siz preferred_size += minimum_size - preferred_size % minimum_size; for (size = preferred_size; size >= minimum_size; size -= minimum_size) { - stream_addr = rballoc_align(flags, size, align); + stream_addr = sof_heap_alloc(heap, flags, size, align); if (stream_addr) break; } @@ -308,9 +311,11 @@ struct comp_buffer *buffer_alloc_range(struct k_heap *heap, size_t preferred_siz buffer = buffer_alloc_struct(heap, stream_addr, size, flags, is_shared); if (!buffer) { tr_err(&buffer_tr, "could not alloc buffer structure"); - rfree(stream_addr); + sof_heap_free(heap, stream_addr); } + buffer->heap = heap; + return buffer; } @@ -341,14 +346,8 @@ int buffer_set_size(struct comp_buffer *buffer, uint32_t size, uint32_t alignmen if (size == audio_stream_get_size(&buffer->stream)) return 0; - if (!alignment) - new_ptr = rbrealloc(audio_stream_get_addr(&buffer->stream), - buffer->flags | SOF_MEM_FLAG_NO_COPY, - size, audio_stream_get_size(&buffer->stream)); - else - new_ptr = rbrealloc_align(audio_stream_get_addr(&buffer->stream), - buffer->flags | SOF_MEM_FLAG_NO_COPY, size, - audio_stream_get_size(&buffer->stream), alignment); + new_ptr = sof_heap_alloc(buffer->heap, buffer->flags, size, alignment); + /* we couldn't allocate bigger chunk */ if (!new_ptr && size > audio_stream_get_size(&buffer->stream)) { buf_err(buffer, "resize can't alloc %u bytes of flags 0x%x", @@ -357,8 +356,10 @@ int buffer_set_size(struct comp_buffer *buffer, uint32_t size, uint32_t alignmen } /* use bigger chunk, else just use the old chunk but set smaller */ - if (new_ptr) + if (new_ptr) { + sof_heap_free(buffer->heap, audio_stream_get_addr(&buffer->stream)); buffer->stream.addr = new_ptr; + } buffer_init_stream(buffer, size); @@ -389,22 +390,11 @@ int buffer_set_size_range(struct comp_buffer *buffer, size_t preferred_size, siz if (preferred_size == actual_size) return 0; - if (!alignment) { - for (new_size = preferred_size; new_size >= minimum_size; - new_size -= minimum_size) { - new_ptr = rbrealloc(ptr, buffer->flags | SOF_MEM_FLAG_NO_COPY, - new_size, actual_size); - if (new_ptr) - break; - } - } else { - for (new_size = preferred_size; new_size >= minimum_size; - new_size -= minimum_size) { - new_ptr = rbrealloc_align(ptr, buffer->flags | SOF_MEM_FLAG_NO_COPY, - new_size, actual_size, alignment); - if (new_ptr) - break; - } + for (new_size = preferred_size; new_size >= minimum_size; + new_size -= minimum_size) { + new_ptr = sof_heap_alloc(buffer->heap, buffer->flags, new_size, alignment); + if (new_ptr) + break; } /* we couldn't allocate bigger chunk */ @@ -415,8 +405,10 @@ int buffer_set_size_range(struct comp_buffer *buffer, size_t preferred_size, siz } /* use bigger chunk, else just use the old chunk but set smaller */ - if (new_ptr) + if (new_ptr) { + sof_heap_free(buffer->heap, audio_stream_get_addr(&buffer->stream)); buffer->stream.addr = new_ptr; + } buffer_init_stream(buffer, new_size); diff --git a/src/include/sof/audio/buffer.h b/src/include/sof/audio/buffer.h index 91c09ef2e510..5d5e64e8043d 100644 --- a/src/include/sof/audio/buffer.h +++ b/src/include/sof/audio/buffer.h @@ -33,6 +33,7 @@ #include struct comp_dev; +struct k_heap; /** \name Trace macros * @{ @@ -148,6 +149,8 @@ struct comp_buffer { /* list of buffers, to be used i.e. in raw data processing mode*/ struct list_item buffers_list; + + struct k_heap *heap; }; /* From 4c7b36e61ca5fd656e5f8423acdfe3a055d31a85 Mon Sep 17 00:00:00 2001 From: Kai Vehmanen Date: Thu, 19 Feb 2026 14:50:56 +0200 Subject: [PATCH 10/54] (section host-zephyr START) From 878a39f62887844458918bec3dc430192a29e711 Mon Sep 17 00:00:00 2001 From: Kai Vehmanen Date: Thu, 19 Feb 2026 14:37:07 +0200 Subject: [PATCH 11/54] zephyr: userspace: sof_dma: allow circular SG lists Allow a non-null pointer at the end of the DMA transfer block list, if and only if it points to the first entry in the block list. The SOF DAI module sets the DMA transfers blocks like this and this change is required to use DAI module from user-space. Signed-off-by: Kai Vehmanen --- zephyr/syscall/sof_dma.c | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/zephyr/syscall/sof_dma.c b/zephyr/syscall/sof_dma.c index ed69ffc78423..11ee8156f7ff 100644 --- a/zephyr/syscall/sof_dma.c +++ b/zephyr/syscall/sof_dma.c @@ -119,9 +119,15 @@ static inline struct dma_block_config *deep_copy_dma_blk_cfg_list(struct dma_con for (user_next = cfg->head_block, kern_next = kern_cfg; user_next; - user_next = user_next->next_block, kern_next++) { - if (++i > cfg->block_count) - goto err; + user_next = user_next->next_block, kern_next++, i++) { + if (i == cfg->block_count) { + /* last block can point to first one */ + if (user_next != cfg->head_block) + goto err; + + kern_prev->next_block = kern_cfg; + break; + } if (k_usermode_from_copy(kern_next, user_next, sizeof(*kern_next))) goto err; From ade85d3bdd633ba18d86b972fbf64f34822607ca Mon Sep 17 00:00:00 2001 From: Kai Vehmanen Date: Fri, 13 Feb 2026 18:54:45 +0200 Subject: [PATCH 12/54] zephyr: lib: dma: make DMA platform data available to user-space The platform data descriptions need to be accessible to all threads. These are e.g. used when setting up host/DAI copiers and they need platform DMA properties. Signed-off-by: Kai Vehmanen --- zephyr/lib/dma.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/zephyr/lib/dma.c b/zephyr/lib/dma.c index 7452459f8b0a..0b8987ed65d0 100644 --- a/zephyr/lib/dma.c +++ b/zephyr/lib/dma.c @@ -15,12 +15,13 @@ #include #include #include +#include #include #define DW_DMA_BUFFER_PERIOD_COUNT 0x4 #define HDA_DMA_BUFFER_PERIOD_COUNT 4 -SHARED_DATA struct sof_dma dma[] = { +APP_TASK_DATA SHARED_DATA struct sof_dma dma[] = { #if DT_NODE_HAS_STATUS(DT_NODELABEL(lpgpdma0), okay) { /* Low Power GP DMAC 0 */ .plat_data = { From 1a9358ac00d7a43ca41cc76eb4619e100a2f55c4 Mon Sep 17 00:00:00 2001 From: Kai Vehmanen Date: Fri, 13 Feb 2026 18:53:39 +0200 Subject: [PATCH 13/54] audio: host-zephyr: select heap when allocating host buffers Use separate heaps depending whether host copier is run in user or kernel space. Signed-off-by: Kai Vehmanen --- src/audio/host-zephyr.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/src/audio/host-zephyr.c b/src/audio/host-zephyr.c index e842fb36a965..153a17d1722f 100644 --- a/src/audio/host-zephyr.c +++ b/src/audio/host-zephyr.c @@ -855,6 +855,11 @@ int host_common_params(struct host_data *hd, struct comp_dev *dev, int i, channel, err; bool is_scheduling_source = dev == dev->pipeline->sched_comp; uint32_t round_up_size; +#ifdef CONFIG_SOF_USERSPACE_LL + struct k_heap *heap = zephyr_ll_user_heap(); +#else + struct k_heap *heap = NULL; +#endif /* host params always installed by pipeline IPC */ hd->host_size = params->buffer.size; @@ -943,7 +948,7 @@ int host_common_params(struct host_data *hd, struct comp_dev *dev, } } else { /* allocate not shared buffer */ - hd->dma_buffer = buffer_alloc_range(NULL, buffer_size_preferred, buffer_size, + hd->dma_buffer = buffer_alloc_range(heap, buffer_size_preferred, buffer_size, SOF_MEM_FLAG_USER | SOF_MEM_FLAG_DMA, addr_align, BUFFER_USAGE_NOT_SHARED); if (!hd->dma_buffer) { From 82d667729db61ec730ca52b133ae0f5d4caa8a5a Mon Sep 17 00:00:00 2001 From: Kai Vehmanen Date: Mon, 16 Feb 2026 19:27:56 +0200 Subject: [PATCH 14/54] audio: host-zephyr: rework calls to DMA driver, remove channel pointer For historical reasons, host-zephyr has somewhat complicated code to manage the DMA channel instance information. When a DMA channel is allocated, a pointer to the system DMA channel table is acquired and some additional information is stored per channel. This is however redundant as the only piece of information actually needed is the channel index. Simplify the code by not storing the channel pointer anymore, but rather just store the channel index and use that in all calls to the DMA driver. Signed-off-by: Kai Vehmanen --- src/audio/copier/host_copier.h | 3 ++- src/audio/host-zephyr.c | 48 +++++++++++++++------------------- 2 files changed, 23 insertions(+), 28 deletions(-) diff --git a/src/audio/copier/host_copier.h b/src/audio/copier/host_copier.h index 28605ed9c3ab..83e891192690 100644 --- a/src/audio/copier/host_copier.h +++ b/src/audio/copier/host_copier.h @@ -50,10 +50,11 @@ struct host_data { /* local DMA config */ #if CONFIG_ZEPHYR_NATIVE_DRIVERS struct sof_dma *dma; + int chan_index; #else struct dma *dma; -#endif struct dma_chan_data *chan; +#endif struct dma_sg_config config; #ifdef __ZEPHYR__ struct dma_config z_config; diff --git a/src/audio/host-zephyr.c b/src/audio/host-zephyr.c index 153a17d1722f..0b0656419ea8 100644 --- a/src/audio/host-zephyr.c +++ b/src/audio/host-zephyr.c @@ -83,7 +83,7 @@ static int host_dma_set_config_and_copy(struct host_data *hd, struct comp_dev *d local_elem->size = bytes; /* reconfigure transfer */ - ret = sof_dma_config(hd->chan->dma, hd->chan->index, &hd->z_config); + ret = sof_dma_config(hd->dma, hd->chan_index, &hd->z_config); if (ret < 0) { comp_err(dev, "dma_config() failed, ret = %d", ret); @@ -92,7 +92,7 @@ static int host_dma_set_config_and_copy(struct host_data *hd, struct comp_dev *d cb(dev, bytes); - ret = sof_dma_reload(hd->chan->dma, hd->chan->index, bytes); + ret = sof_dma_reload(hd->dma, hd->chan_index, bytes); if (ret < 0) { comp_err(dev, "dma_copy() failed, ret = %d", ret); @@ -222,7 +222,7 @@ static int host_copy_one_shot(struct host_data *hd, struct comp_dev *dev, copy_c hd->z_config.head_block->block_size = local_elem->size; /* reconfigure transfer */ - ret = sof_dma_config(hd->chan->dma, hd->chan->index, &hd->z_config); + ret = sof_dma_config(hd->dma, hd->chan_index, &hd->z_config); if (ret < 0) { comp_err(dev, "dma_config() failed, ret = %u", ret); return ret; @@ -230,7 +230,7 @@ static int host_copy_one_shot(struct host_data *hd, struct comp_dev *dev, copy_c cb(dev, copy_bytes); - ret = sof_dma_reload(hd->chan->dma, hd->chan->index, copy_bytes); + ret = sof_dma_reload(hd->dma, hd->chan_index, copy_bytes); if (ret < 0) comp_err(dev, "dma_copy() failed, ret = %u", ret); @@ -364,7 +364,7 @@ static void host_dma_cb(struct comp_dev *dev, size_t bytes) /* get status from dma and check for xrun */ static int host_get_status(struct comp_dev *dev, struct host_data *hd, struct dma_status *stat) { - int ret = sof_dma_get_status(hd->chan->dma, hd->chan->index, stat); + int ret = sof_dma_get_status(hd->dma, hd->chan_index, stat); #if CONFIG_XRUN_NOTIFICATIONS_ENABLE if (ret == -EPIPE && !hd->xrun_notification_sent) { hd->xrun_notification_sent = send_copier_gateway_xrun_notif_msg @@ -551,7 +551,7 @@ static int host_copy_normal(struct host_data *hd, struct comp_dev *dev, copy_cal if (!copy_bytes) { if (hd->partial_size != 0) { if (stream_sync(hd, dev)) { - ret = sof_dma_reload(hd->chan->dma, hd->chan->index, + ret = sof_dma_reload(hd->dma, hd->chan_index, hd->partial_size); if (ret < 0) comp_err(dev, "dma_reload() failed, ret = %u", ret); @@ -578,7 +578,7 @@ static int host_copy_normal(struct host_data *hd, struct comp_dev *dev, copy_cal hd->dma_buffer_size - hd->partial_size <= (2 + threshold) * hd->period_bytes) { if (stream_sync(hd, dev)) { - ret = sof_dma_reload(hd->chan->dma, hd->chan->index, + ret = sof_dma_reload(hd->dma, hd->chan_index, hd->partial_size); if (ret < 0) comp_err(dev, "dma_reload() failed, ret = %u", ret); @@ -646,7 +646,7 @@ int host_common_trigger(struct host_data *hd, struct comp_dev *dev, int cmd) if (cmd != COMP_TRIGGER_START && hd->copy_type == COMP_COPY_ONE_SHOT) return ret; - if (!hd->chan) { + if (hd->chan_index == -1) { comp_err(dev, "no dma channel configured"); return -EINVAL; } @@ -654,14 +654,14 @@ int host_common_trigger(struct host_data *hd, struct comp_dev *dev, int cmd) switch (cmd) { case COMP_TRIGGER_START: hd->partial_size = 0; - ret = sof_dma_start(hd->chan->dma, hd->chan->index); + ret = sof_dma_start(hd->dma, hd->chan_index); if (ret < 0) comp_err(dev, "dma_start() failed, ret = %u", ret); break; case COMP_TRIGGER_STOP: case COMP_TRIGGER_XRUN: - ret = sof_dma_stop(hd->chan->dma, hd->chan->index); + ret = sof_dma_stop(hd->dma, hd->chan_index); if (ret < 0) comp_err(dev, "dma stop failed: %d", ret); @@ -721,7 +721,7 @@ __cold int host_common_new(struct host_data *hd, struct comp_dev *dev, sof_dma_put(hd->dma); return -ENOMEM; } - hd->chan = NULL; + hd->chan_index = -1; hd->copy_type = COMP_COPY_NORMAL; return 0; @@ -852,7 +852,7 @@ int host_common_params(struct host_data *hd, struct comp_dev *dev, uint32_t buffer_size_preferred; uint32_t addr_align; uint32_t align; - int i, channel, err; + int i, err; bool is_scheduling_source = dev == dev->pipeline->sched_comp; uint32_t round_up_size; #ifdef CONFIG_SOF_USERSPACE_LL @@ -993,22 +993,16 @@ int host_common_params(struct host_data *hd, struct comp_dev *dev, /* get DMA channel from DMAC * note: stream_tag is ignored by dw-dma */ - channel = sof_dma_request_channel(hd->dma, hda_chan); - if (channel < 0) { + hd->chan_index = sof_dma_request_channel(hd->dma, hda_chan); + if (hd->chan_index < 0) { comp_err(dev, "requested channel %d is busy", hda_chan); return -ENODEV; } - hd->chan = &hd->dma->chan[channel]; uint32_t buffer_addr = 0; uint32_t buffer_bytes = 0; uint32_t addr; - hd->chan->direction = config->direction; - hd->chan->desc_count = config->elem_array.count; - hd->chan->is_scheduling_source = config->is_scheduling_source; - hd->chan->period = config->period; - memset(dma_cfg, 0, sizeof(*dma_cfg)); dma_block_cfg = rzalloc(SOF_MEM_FLAG_USER, @@ -1055,7 +1049,7 @@ int host_common_params(struct host_data *hd, struct comp_dev *dev, break; } - err = sof_dma_config(hd->chan->dma, hd->chan->index, dma_cfg); + err = sof_dma_config(hd->dma, hd->chan_index, dma_cfg); if (err < 0) { comp_err(dev, "dma_config() failed"); goto err_free_block_cfg; @@ -1085,8 +1079,8 @@ int host_common_params(struct host_data *hd, struct comp_dev *dev, dma_cfg->head_block = NULL; rfree(dma_block_cfg); err_release_channel: - sof_dma_release_channel(hd->dma, hd->chan->index); - hd->chan = NULL; + sof_dma_release_channel(hd->dma, hd->chan_index); + hd->chan_index = -1; return err; } @@ -1144,10 +1138,10 @@ static int host_position(struct comp_dev *dev, void host_common_reset(struct host_data *hd, uint16_t state) { - if (hd->chan) { - sof_dma_stop(hd->chan->dma, hd->chan->index); - sof_dma_release_channel(hd->dma, hd->chan->index); - hd->chan = NULL; + if (hd->chan_index != -1) { + sof_dma_stop(hd->dma, hd->chan_index); + sof_dma_release_channel(hd->dma, hd->chan_index); + hd->chan_index = -1; } /* free all DMA elements */ From a2127555ca0a4df6bc971e30e9c7c626491d4b68 Mon Sep 17 00:00:00 2001 From: Kai Vehmanen Date: Mon, 16 Feb 2026 19:41:35 +0200 Subject: [PATCH 15/54] audio: host-zephyr: pass component heap to dma_sg_alloc Make sure we use the same heap to allocate DMA SG buffers as we use for other component resources. Signed-off-by: Kai Vehmanen --- src/audio/host-zephyr.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/audio/host-zephyr.c b/src/audio/host-zephyr.c index 0b0656419ea8..a9cfbda7adec 100644 --- a/src/audio/host-zephyr.c +++ b/src/audio/host-zephyr.c @@ -605,7 +605,7 @@ static int create_local_elems(struct host_data *hd, struct comp_dev *dev, elem_array = &hd->local.elem_array; /* config buffer will be used as proxy */ - err = dma_sg_alloc(NULL, &hd->config.elem_array, SOF_MEM_FLAG_USER, + err = dma_sg_alloc(hd->heap, &hd->config.elem_array, SOF_MEM_FLAG_USER, dir, 1, 0, 0, 0); if (err < 0) { comp_err(dev, "dma_sg_alloc() failed"); @@ -615,7 +615,7 @@ static int create_local_elems(struct host_data *hd, struct comp_dev *dev, elem_array = &hd->config.elem_array; } - err = dma_sg_alloc(NULL, elem_array, SOF_MEM_FLAG_USER, dir, buffer_count, + err = dma_sg_alloc(hd->heap, elem_array, SOF_MEM_FLAG_USER, dir, buffer_count, buffer_bytes, (uintptr_t)audio_stream_get_addr(&hd->dma_buffer->stream), 0); if (err < 0) { @@ -774,7 +774,7 @@ __cold void host_common_free(struct host_data *hd) sof_dma_put(hd->dma); ipc_msg_free(hd->msg); - dma_sg_free(NULL, &hd->config.elem_array); + dma_sg_free(hd->heap, &hd->config.elem_array); } __cold static void host_free(struct comp_dev *dev) @@ -1145,9 +1145,9 @@ void host_common_reset(struct host_data *hd, uint16_t state) } /* free all DMA elements */ - dma_sg_free(NULL, &hd->host.elem_array); - dma_sg_free(NULL, &hd->local.elem_array); - dma_sg_free(NULL, &hd->config.elem_array); + dma_sg_free(hd->heap, &hd->host.elem_array); + dma_sg_free(hd->heap, &hd->local.elem_array); + dma_sg_free(hd->heap, &hd->config.elem_array); /* free DMA buffer */ if (hd->dma_buffer) { From 27a4d95e866e39c977ea492b74ae15c967d639df Mon Sep 17 00:00:00 2001 From: Kai Vehmanen Date: Mon, 16 Feb 2026 19:53:13 +0200 Subject: [PATCH 16/54] alloc.h: remove rbrelloac() and rbrealloc_align() These interfaces are no longer used anywhere, so they can be safely removed. Signed-off-by: Kai Vehmanen --- posix/include/rtos/alloc.h | 23 ----------------------- test/cmocka/src/common_mocks.c | 10 ---------- zephyr/include/rtos/alloc.h | 23 ----------------------- 3 files changed, 56 deletions(-) diff --git a/posix/include/rtos/alloc.h b/posix/include/rtos/alloc.h index b8efac0d3112..5ba40b258512 100644 --- a/posix/include/rtos/alloc.h +++ b/posix/include/rtos/alloc.h @@ -97,29 +97,6 @@ static inline void *rballoc(uint32_t flags, size_t bytes) return rballoc_align(flags, bytes, PLATFORM_DCACHE_ALIGN); } -/** - * Changes size of the memory block allocated. - * @param ptr Address of the block to resize. - * @param flags Flags, see SOF_MEM_FLAG_... - * @param bytes New size in bytes. - * @param old_bytes Old size in bytes. - * @param alignment Alignment in bytes. - * @return Pointer to the resized memory of NULL if failed. - */ -void *rbrealloc_align(void *ptr, uint32_t flags, size_t bytes, - size_t old_bytes, uint32_t alignment); - -/** - * Similar to rballoc_align(), returns resized buffer aligned to - * PLATFORM_DCACHE_ALIGN. - */ -static inline void *rbrealloc(void *ptr, uint32_t flags, - size_t bytes, size_t old_bytes) -{ - return rbrealloc_align(ptr, flags, bytes, old_bytes, - PLATFORM_DCACHE_ALIGN); -} - /** * Frees the memory block. * @param ptr Pointer to the memory block. diff --git a/test/cmocka/src/common_mocks.c b/test/cmocka/src/common_mocks.c index 60b6215c4cd5..16fee8f2ff48 100644 --- a/test/cmocka/src/common_mocks.c +++ b/test/cmocka/src/common_mocks.c @@ -59,16 +59,6 @@ void WEAK *rzalloc(uint32_t flags, return calloc(bytes, 1); } -void WEAK *rbrealloc_align(void *ptr, uint32_t flags, - size_t bytes, size_t old_bytes, uint32_t alignment) -{ - (void)flags; - (void)old_bytes; - (void)alignment; - - return realloc(ptr, bytes); -} - void WEAK *rmalloc_align(uint32_t flags, size_t bytes, uint32_t alignment) { (void)flags; diff --git a/zephyr/include/rtos/alloc.h b/zephyr/include/rtos/alloc.h index 984dc8fa01e5..6a7fd17aa474 100644 --- a/zephyr/include/rtos/alloc.h +++ b/zephyr/include/rtos/alloc.h @@ -88,29 +88,6 @@ static inline void *rballoc(uint32_t flags, size_t bytes) return rballoc_align(flags, bytes, PLATFORM_DCACHE_ALIGN); } -/** - * Changes size of the memory block allocated. - * @param ptr Address of the block to resize. - * @param flags Flags, see SOF_MEM_FLAG_... - * @param bytes New size in bytes. - * @param old_bytes Old size in bytes. - * @param alignment Alignment in bytes. - * @return Pointer to the resized memory of NULL if failed. - */ -void *rbrealloc_align(void *ptr, uint32_t flags, size_t bytes, - size_t old_bytes, uint32_t alignment); - -/** - * Similar to rballoc_align(), returns resized buffer aligned to - * PLATFORM_DCACHE_ALIGN. - */ -static inline void *rbrealloc(void *ptr, uint32_t flags, - size_t bytes, size_t old_bytes) -{ - return rbrealloc_align(ptr, flags, bytes, old_bytes, - PLATFORM_DCACHE_ALIGN); -} - /** * Frees the memory block. * @param ptr Pointer to the memory block. From 8b0fe8b54026e1cfd9c5c49b17deeac3dfd244a6 Mon Sep 17 00:00:00 2001 From: Kai Vehmanen Date: Mon, 16 Feb 2026 19:56:52 +0200 Subject: [PATCH 17/54] audio: host-zephyr: ensure host data heap is set Force to use user-space LL heap in host_common_new() if CONFIG_SOF_USERSPACE_LL is set. Signed-off-by: Kai Vehmanen --- src/audio/host-zephyr.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/src/audio/host-zephyr.c b/src/audio/host-zephyr.c index a9cfbda7adec..0257a509d6d3 100644 --- a/src/audio/host-zephyr.c +++ b/src/audio/host-zephyr.c @@ -724,6 +724,15 @@ __cold int host_common_new(struct host_data *hd, struct comp_dev *dev, hd->chan_index = -1; hd->copy_type = COMP_COPY_NORMAL; +#ifdef CONFIG_SOF_USERSPACE_LL + /* + * copier_host_create() uses mod_zalloc() to allocate + * the 'hd' host data object and does not set hd->heap. + * If LL is run in user-space, assign the 'heap' here. + */ + hd->heap = zephyr_ll_user_heap(); +#endif + return 0; } From 92907cc08df287a4c7f46f18b1be76f0c594e038 Mon Sep 17 00:00:00 2001 From: Kai Vehmanen Date: Mon, 16 Feb 2026 20:06:10 +0200 Subject: [PATCH 18/54] audio: host-zephyr: make component usable from user-space Ensure component heap is correctly passed whenever memory is allocated in the component. This allows to run the component both in kernel and user space. Signed-off-by: Kai Vehmanen --- src/audio/copier/host_copier.h | 1 + src/audio/host-zephyr.c | 32 +++++++++++++++++++------------- 2 files changed, 20 insertions(+), 13 deletions(-) diff --git a/src/audio/copier/host_copier.h b/src/audio/copier/host_copier.h index 83e891192690..5192506da42f 100644 --- a/src/audio/copier/host_copier.h +++ b/src/audio/copier/host_copier.h @@ -113,6 +113,7 @@ struct host_data { uint64_t next_sync; uint64_t period_in_cycles; #endif + struct k_heap *heap; }; int host_common_new(struct host_data *hd, struct comp_dev *dev, diff --git a/src/audio/host-zephyr.c b/src/audio/host-zephyr.c index 0257a509d6d3..7ae35d88a7a9 100644 --- a/src/audio/host-zephyr.c +++ b/src/audio/host-zephyr.c @@ -21,6 +21,7 @@ #include #include #include +#include #include #include #include @@ -743,6 +744,7 @@ __cold static struct comp_dev *host_new(const struct comp_driver *drv, struct comp_dev *dev; struct host_data *hd; const struct ipc_config_host *ipc_host = spec; + struct k_heap *heap = NULL; int ret; assert_can_be_cold(); @@ -754,10 +756,17 @@ __cold static struct comp_dev *host_new(const struct comp_driver *drv, return NULL; dev->ipc_config = *config; - hd = rzalloc(SOF_MEM_FLAG_USER, sizeof(*hd)); +#ifdef CONFIG_SOF_USERSPACE_LL + heap = zephyr_ll_user_heap(); +#endif + + hd = sof_heap_alloc(heap, SOF_MEM_FLAG_USER, sizeof(*hd), 0); if (!hd) goto e_data; + memset(hd, 0, sizeof(*hd)); + hd->heap = heap; + hd->nobytes_last_logged = k_uptime_get(); comp_set_drvdata(dev, hd); @@ -770,7 +779,7 @@ __cold static struct comp_dev *host_new(const struct comp_driver *drv, return dev; e_dev: - rfree(hd); + sof_heap_free(heap, hd); e_data: comp_free_device(dev); return NULL; @@ -794,7 +803,7 @@ __cold static void host_free(struct comp_dev *dev) comp_dbg(dev, "entry"); host_common_free(hd); - rfree(hd); + sof_heap_free(hd->heap, hd); comp_free_device(dev); } @@ -864,11 +873,6 @@ int host_common_params(struct host_data *hd, struct comp_dev *dev, int i, err; bool is_scheduling_source = dev == dev->pipeline->sched_comp; uint32_t round_up_size; -#ifdef CONFIG_SOF_USERSPACE_LL - struct k_heap *heap = zephyr_ll_user_heap(); -#else - struct k_heap *heap = NULL; -#endif /* host params always installed by pipeline IPC */ hd->host_size = params->buffer.size; @@ -957,7 +961,7 @@ int host_common_params(struct host_data *hd, struct comp_dev *dev, } } else { /* allocate not shared buffer */ - hd->dma_buffer = buffer_alloc_range(heap, buffer_size_preferred, buffer_size, + hd->dma_buffer = buffer_alloc_range(hd->heap, buffer_size_preferred, buffer_size, SOF_MEM_FLAG_USER | SOF_MEM_FLAG_DMA, addr_align, BUFFER_USAGE_NOT_SHARED); if (!hd->dma_buffer) { @@ -1014,8 +1018,8 @@ int host_common_params(struct host_data *hd, struct comp_dev *dev, memset(dma_cfg, 0, sizeof(*dma_cfg)); - dma_block_cfg = rzalloc(SOF_MEM_FLAG_USER, - sizeof(*dma_block_cfg)); + dma_block_cfg = sof_heap_alloc(hd->heap, SOF_MEM_FLAG_USER, + sizeof(*dma_block_cfg), 0); if (!dma_block_cfg) { comp_err(dev, "dma_block_config allocation failed"); @@ -1023,6 +1027,8 @@ int host_common_params(struct host_data *hd, struct comp_dev *dev, goto err_release_channel; } + memset(dma_block_cfg, 0, sizeof(*dma_block_cfg)); + dma_cfg->block_count = 1; dma_cfg->source_data_size = config->src_width; dma_cfg->dest_data_size = config->dest_width; @@ -1086,7 +1092,7 @@ int host_common_params(struct host_data *hd, struct comp_dev *dev, err_free_block_cfg: dma_cfg->head_block = NULL; - rfree(dma_block_cfg); + sof_heap_free(hd->heap, dma_block_cfg); err_release_channel: sof_dma_release_channel(hd->dma, hd->chan_index); hd->chan_index = -1; @@ -1166,7 +1172,7 @@ void host_common_reset(struct host_data *hd, uint16_t state) /* free DMA block configuration */ if (hd->z_config.head_block) - rfree(hd->z_config.head_block); + sof_heap_free(hd->heap, hd->z_config.head_block); /* reset buffer pointers */ hd->local_pos = 0; From 761674354f3b11ec24bd339dbb5a8e692a1ceec0 Mon Sep 17 00:00:00 2001 From: Kai Vehmanen Date: Thu, 19 Feb 2026 14:51:14 +0200 Subject: [PATCH 19/54] (section host-zephyr STOP) From 1e93105f66d59614057cf85d7c2e65ff355b9b17 Mon Sep 17 00:00:00 2001 From: Kai Vehmanen Date: Thu, 19 Feb 2026 14:51:22 +0200 Subject: [PATCH 20/54] (section dai-zephyr START) From 5b4411c779e724cef288077697dc7947bd885b56 Mon Sep 17 00:00:00 2001 From: Kai Vehmanen Date: Tue, 17 Feb 2026 18:36:07 +0200 Subject: [PATCH 21/54] audio: pcm_converter: make global tables available to user-space The pcm_converter depends on a set of global function tables to set up correct conversion functions. These objects need to be made available to user-space threads, so that pcm_converter can be also run in user-space. No impact to kernel-space use of pcm_converter. Signed-off-by: Kai Vehmanen --- src/audio/pcm_converter/pcm_converter_generic.c | 9 +++++---- src/audio/pcm_converter/pcm_converter_hifi3.c | 9 +++++---- src/audio/pcm_converter/pcm_remap.c | 5 +++-- 3 files changed, 13 insertions(+), 10 deletions(-) diff --git a/src/audio/pcm_converter/pcm_converter_generic.c b/src/audio/pcm_converter/pcm_converter_generic.c index fe96e6d1f124..4938be438dbc 100644 --- a/src/audio/pcm_converter/pcm_converter_generic.c +++ b/src/audio/pcm_converter/pcm_converter_generic.c @@ -27,6 +27,7 @@ #include #include #include +#include #include #include @@ -669,7 +670,7 @@ static int pcm_convert_f_to_s32(const struct audio_stream *source, } #endif /* CONFIG_PCM_CONVERTER_FORMAT_FLOAT && CONFIG_PCM_CONVERTER_FORMAT_S32LE */ -const struct pcm_func_map pcm_func_map[] = { +APP_TASK_DATA const struct pcm_func_map pcm_func_map[] = { #if CONFIG_PCM_CONVERTER_FORMAT_U8 { SOF_IPC_FRAME_U8, SOF_IPC_FRAME_U8, just_copy }, #endif /* CONFIG_PCM_CONVERTER_FORMAT_U8 */ @@ -732,7 +733,7 @@ const struct pcm_func_map pcm_func_map[] = { #endif /* CONFIG_PCM_CONVERTER_FORMAT_FLOAT && CONFIG_PCM_CONVERTER_FORMAT_S32LE */ }; -const size_t pcm_func_count = ARRAY_SIZE(pcm_func_map); +APP_TASK_DATA const size_t pcm_func_count = ARRAY_SIZE(pcm_func_map); #if CONFIG_PCM_CONVERTER_FORMAT_S16_C16_AND_S16_C32 static int pcm_convert_s16_c16_to_s16_c32(const struct audio_stream *source, @@ -1020,7 +1021,7 @@ static int pcm_convert_s24_c32_to_s24_c24_link_gtw(const struct audio_stream *so #endif -const struct pcm_func_vc_map pcm_func_vc_map[] = { +APP_TASK_DATA const struct pcm_func_vc_map pcm_func_vc_map[] = { #if CONFIG_PCM_CONVERTER_FORMAT_S16_C16_AND_S16_C32 { SOF_IPC_FRAME_S16_LE, SOF_IPC_FRAME_S16_LE, SOF_IPC_FRAME_S32_LE, SOF_IPC_FRAME_S16_LE, pcm_convert_s16_c16_to_s16_c32 }, @@ -1101,6 +1102,6 @@ const struct pcm_func_vc_map pcm_func_vc_map[] = { #endif }; -const size_t pcm_func_vc_count = ARRAY_SIZE(pcm_func_vc_map); +APP_TASK_DATA const size_t pcm_func_vc_count = ARRAY_SIZE(pcm_func_vc_map); #endif diff --git a/src/audio/pcm_converter/pcm_converter_hifi3.c b/src/audio/pcm_converter/pcm_converter_hifi3.c index 2b6ca607415d..7c75e326b1c0 100644 --- a/src/audio/pcm_converter/pcm_converter_hifi3.c +++ b/src/audio/pcm_converter/pcm_converter_hifi3.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include @@ -764,7 +765,7 @@ static int pcm_convert_f_to_s32(const struct audio_stream *source, #endif /* CONFIG_PCM_CONVERTER_FORMAT_FLOAT && CONFIG_PCM_CONVERTER_FORMAT_32LE */ #endif /* XCHAL_HAVE_FP */ -const struct pcm_func_map pcm_func_map[] = { +APP_TASK_DATA const struct pcm_func_map pcm_func_map[] = { #if CONFIG_PCM_CONVERTER_FORMAT_S16LE { SOF_IPC_FRAME_S16_LE, SOF_IPC_FRAME_S16_LE, just_copy }, #endif /* CONFIG_PCM_CONVERTER_FORMAT_S16LE */ @@ -807,7 +808,7 @@ const struct pcm_func_map pcm_func_map[] = { #endif /* CONFIG_PCM_CONVERTER_FORMAT_FLOAT && CONFIG_PCM_CONVERTER_FORMAT_S32LE */ #endif /* XCHAL_HAVE_FP */ }; -const size_t pcm_func_count = ARRAY_SIZE(pcm_func_map); +APP_TASK_DATA const size_t pcm_func_count = ARRAY_SIZE(pcm_func_map); #if CONFIG_PCM_CONVERTER_FORMAT_S16_C16_AND_S16_C32 static int pcm_convert_s16_c16_to_s16_c32(const struct audio_stream *source, @@ -1206,7 +1207,7 @@ static int pcm_convert_s24_c32_to_s24_c24(const struct audio_stream *source, */ #endif -const struct pcm_func_vc_map pcm_func_vc_map[] = { +APP_TASK_DATA const struct pcm_func_vc_map pcm_func_vc_map[] = { #if CONFIG_PCM_CONVERTER_FORMAT_S16_C16_AND_S16_C32 { SOF_IPC_FRAME_S16_LE, SOF_IPC_FRAME_S16_LE, SOF_IPC_FRAME_S32_LE, SOF_IPC_FRAME_S16_LE, pcm_convert_s16_c16_to_s16_c32 }, @@ -1283,6 +1284,6 @@ const struct pcm_func_vc_map pcm_func_vc_map[] = { #endif }; -const size_t pcm_func_vc_count = ARRAY_SIZE(pcm_func_vc_map); +APP_TASK_DATA const size_t pcm_func_vc_count = ARRAY_SIZE(pcm_func_vc_map); #endif diff --git a/src/audio/pcm_converter/pcm_remap.c b/src/audio/pcm_converter/pcm_remap.c index 9204b21ee8ab..4ae300e195c9 100644 --- a/src/audio/pcm_converter/pcm_remap.c +++ b/src/audio/pcm_converter/pcm_remap.c @@ -5,6 +5,7 @@ #include #include +#include static void mute_channel_c16(struct audio_stream *stream, int channel, int frames) { @@ -423,7 +424,7 @@ static int remap_c16_to_c32_no_shift(const struct audio_stream *source, uint32_t /* Unfortunately, all these nice "if"s were commented out to suppress * CI "defined but not used" warnings. */ -const struct pcm_func_map pcm_remap_func_map[] = { +APP_TASK_DATA const struct pcm_func_map pcm_remap_func_map[] = { /* #if CONFIG_PCM_CONVERTER_FORMAT_S16LE */ { SOF_IPC_FRAME_S16_LE, SOF_IPC_FRAME_S16_LE, remap_c16}, /* #endif */ @@ -474,4 +475,4 @@ const struct pcm_func_map pcm_remap_func_map[] = { /* #endif */ }; -const size_t pcm_remap_func_count = ARRAY_SIZE(pcm_remap_func_map); +APP_TASK_DATA const size_t pcm_remap_func_count = ARRAY_SIZE(pcm_remap_func_map); From e75247f930402dd530a664aa5eaab113ad79024e Mon Sep 17 00:00:00 2001 From: Kai Vehmanen Date: Tue, 17 Feb 2026 18:39:00 +0200 Subject: [PATCH 22/54] audio: comp_buffer: do not call notifier interface in user-space The notifier feature is not available when running in user-space, so disable the notify calls in comp_buffer_free() in these builds. Signed-off-by: Kai Vehmanen --- src/audio/buffers/comp_buffer.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/audio/buffers/comp_buffer.c b/src/audio/buffers/comp_buffer.c index 6804bbf71f04..56c9ec56f6f4 100644 --- a/src/audio/buffers/comp_buffer.c +++ b/src/audio/buffers/comp_buffer.c @@ -153,11 +153,14 @@ static void comp_buffer_free(struct sof_audio_buffer *audio_buffer) buf_dbg(buffer, "buffer_free()"); +#ifndef CONFIG_SOF_USERSPACE_LL + /* TODO: to be replaced with direct function calls */ notifier_event(buffer, NOTIFIER_ID_BUFFER_FREE, NOTIFIER_TARGET_CORE_LOCAL, &cb_data, sizeof(cb_data)); /* In case some listeners didn't unregister from buffer's callbacks */ notifier_unregister_all(NULL, buffer); +#endif struct k_heap *heap = buffer->audio_buffer.heap; From f40986a98328d80bb52f15f43e472679bfbdc7cc Mon Sep 17 00:00:00 2001 From: Kai Vehmanen Date: Tue, 17 Feb 2026 13:22:08 +0200 Subject: [PATCH 23/54] audio: dai-zephyr: make memory allocations user-space compatible Convert all memory allocations to use the sof_heap_alloc() interface and pass the dai_data specific heap object. This makes dai-zephyr code compatible with use from user-space, but does not effect kernel space use. Signed-off-by: Kai Vehmanen --- src/audio/dai-zephyr.c | 52 ++++++++++++++++++++++---------- src/include/sof/lib/dai-zephyr.h | 1 + src/ipc/ipc4/dai.c | 6 ++-- 3 files changed, 41 insertions(+), 18 deletions(-) diff --git a/src/audio/dai-zephyr.c b/src/audio/dai-zephyr.c index 59bcca3c14a5..dd25876d46f6 100644 --- a/src/audio/dai-zephyr.c +++ b/src/audio/dai-zephyr.c @@ -23,6 +23,7 @@ #include #include #include +#include #include #include #include @@ -505,6 +506,15 @@ __cold int dai_common_new(struct dai_data *dd, struct comp_dev *dev, dd->xrun = 0; dd->chan = NULL; +#ifdef CONFIG_SOF_USERSPACE_LL + /* + * copier_dai_create() uses mod_zalloc() to allocate + * the 'dd' dai data object and does not set dd->heap. + * If LL is run in user-space, assign the 'heap' here. + */ + dd->heap = zephyr_ll_user_heap(); +#endif + /* I/O performance init, keep it last so the function does not reach this in case * of return on error, so that we do not waste a slot */ @@ -557,6 +567,7 @@ __cold static struct comp_dev *dai_new(const struct comp_driver *drv, struct comp_dev *dev; const struct ipc_config_dai *dai_cfg = spec; struct dai_data *dd; + struct k_heap *heap = NULL; int ret; assert_can_be_cold(); @@ -569,10 +580,17 @@ __cold static struct comp_dev *dai_new(const struct comp_driver *drv, dev->ipc_config = *config; - dd = rzalloc(SOF_MEM_FLAG_USER | SOF_MEM_FLAG_COHERENT, sizeof(*dd)); +#ifdef CONFIG_SOF_USERSPACE_LL + heap = zephyr_ll_user_heap(); +#endif + + dd = sof_heap_alloc(heap, SOF_MEM_FLAG_USER | SOF_MEM_FLAG_COHERENT, sizeof(*dd), 0); if (!dd) goto e_data; + memset(dd, 0, sizeof(*dd)); + dd->heap = heap; + comp_set_drvdata(dev, dd); ret = dai_common_new(dd, dev, dai_cfg); @@ -586,7 +604,7 @@ __cold static struct comp_dev *dai_new(const struct comp_driver *drv, return dev; error: - rfree(dd); + sof_heap_free(dd->heap, dd); e_data: comp_free_device(dev); return NULL; @@ -614,7 +632,7 @@ __cold void dai_common_free(struct dai_data *dd) dai_put(dd->dai); - rfree(dd->dai_spec_config); + sof_heap_free(dd->heap, dd->dai_spec_config); } __cold static void dai_free(struct comp_dev *dev) @@ -628,7 +646,7 @@ __cold static void dai_free(struct comp_dev *dev) dai_common_free(dd); - rfree(dd); + sof_heap_free(dd->heap, dd); comp_free_device(dev); } @@ -823,7 +841,7 @@ static int dai_set_sg_config(struct dai_data *dd, struct comp_dev *dev, uint32_t } while (--max_block_count > 0); } - err = dma_sg_alloc(NULL, &config->elem_array, SOF_MEM_FLAG_USER, + err = dma_sg_alloc(dd->heap, &config->elem_array, SOF_MEM_FLAG_USER, config->direction, period_count, period_bytes, @@ -849,8 +867,9 @@ static int dai_set_dma_config(struct dai_data *dd, struct comp_dev *dev) comp_dbg(dev, "entry"); - dma_cfg = rballoc(SOF_MEM_FLAG_USER | SOF_MEM_FLAG_COHERENT | SOF_MEM_FLAG_DMA, - sizeof(struct dma_config)); + dma_cfg = sof_heap_alloc(dd->heap, + SOF_MEM_FLAG_USER | SOF_MEM_FLAG_COHERENT | SOF_MEM_FLAG_DMA, + sizeof(struct dma_config), 0); if (!dma_cfg) { comp_err(dev, "dma_cfg allocation failed"); return -ENOMEM; @@ -879,10 +898,11 @@ static int dai_set_dma_config(struct dai_data *dd, struct comp_dev *dev) else dma_cfg->dma_slot = config->src_dev; - dma_block_cfg = rballoc(SOF_MEM_FLAG_USER | SOF_MEM_FLAG_COHERENT | SOF_MEM_FLAG_DMA, - sizeof(struct dma_block_config) * dma_cfg->block_count); + dma_block_cfg = sof_heap_alloc(dd->heap, + SOF_MEM_FLAG_USER | SOF_MEM_FLAG_COHERENT | SOF_MEM_FLAG_DMA, + sizeof(struct dma_block_config) * dma_cfg->block_count, 0); if (!dma_block_cfg) { - rfree(dma_cfg); + sof_heap_free(dd->heap, dma_cfg); comp_err(dev, "dma_block_config allocation failed"); return -ENOMEM; } @@ -1016,7 +1036,7 @@ static int dai_set_dma_buffer(struct dai_data *dd, struct comp_dev *dev, return err; } } else { - dd->dma_buffer = buffer_alloc_range(NULL, buffer_size_preferred, buffer_size, + dd->dma_buffer = buffer_alloc_range(dd->heap, buffer_size_preferred, buffer_size, SOF_MEM_FLAG_USER | SOF_MEM_FLAG_DMA, addr_align, BUFFER_USAGE_NOT_SHARED); if (!dd->dma_buffer) { @@ -1104,8 +1124,8 @@ int dai_common_params(struct dai_data *dd, struct comp_dev *dev, if (err < 0) { buffer_free(dd->dma_buffer); dd->dma_buffer = NULL; - dma_sg_free(NULL, &config->elem_array); - rfree(dd->z_config); + dma_sg_free(dd->heap, &config->elem_array); + sof_heap_free(dd->heap, dd->z_config); dd->z_config = NULL; } @@ -1235,10 +1255,10 @@ void dai_common_reset(struct dai_data *dd, struct comp_dev *dev) if (!dd->delayed_dma_stop) dai_dma_release(dd, dev); - dma_sg_free(NULL, &config->elem_array); + dma_sg_free(dd->heap, &config->elem_array); if (dd->z_config) { - rfree(dd->z_config->head_block); - rfree(dd->z_config); + sof_heap_free(dd->heap, dd->z_config->head_block); + sof_heap_free(dd->heap, dd->z_config); dd->z_config = NULL; } diff --git a/src/include/sof/lib/dai-zephyr.h b/src/include/sof/lib/dai-zephyr.h index 8ff739fa42d5..429f32f52e2c 100644 --- a/src/include/sof/lib/dai-zephyr.h +++ b/src/include/sof/lib/dai-zephyr.h @@ -168,6 +168,7 @@ struct dai_data { #endif /* Copier gain params */ struct copier_gain_params *gain_data; + struct k_heap *heap; }; /* these 3 are here to satisfy clk.c and ssp.h interconnection, will be removed leter */ diff --git a/src/ipc/ipc4/dai.c b/src/ipc/ipc4/dai.c index 9f63fd4196f7..4814f4f6a484 100644 --- a/src/ipc/ipc4/dai.c +++ b/src/ipc/ipc4/dai.c @@ -374,15 +374,17 @@ __cold int dai_config(struct dai_data *dd, struct comp_dev *dev, /* allocated dai_config if not yet */ if (!dd->dai_spec_config) { size = sizeof(*copier_cfg); - dd->dai_spec_config = rzalloc(SOF_MEM_FLAG_USER, size); + dd->dai_spec_config = sof_heap_alloc(dd->heap, SOF_MEM_FLAG_USER, size, 0); if (!dd->dai_spec_config) { comp_err(dev, "No memory for size %d", size); return -ENOMEM; } + memset(dd->dai_spec_config, 0, size); + ret = memcpy_s(dd->dai_spec_config, size, copier_cfg, size); if (ret < 0) { - rfree(dd->dai_spec_config); + sof_heap_free(dd->heap, dd->dai_spec_config); dd->dai_spec_config = NULL; return -EINVAL; } From 63e55185ed878eb29e11a54d6396de8594d26b3a Mon Sep 17 00:00:00 2001 From: Kai Vehmanen Date: Tue, 17 Feb 2026 13:45:44 +0200 Subject: [PATCH 24/54] lib: dai: make dai_get() and dai_put() compatible with user-space The dai_get()/dai_put() provide a helper to access DAI devices. When used in user-space, the wrapper struct should be created in user-space memory. Signed-off-by: Kai Vehmanen --- src/lib/dai.c | 19 ++++++++++++++++--- 1 file changed, 16 insertions(+), 3 deletions(-) diff --git a/src/lib/dai.c b/src/lib/dai.c index 8e45f8a4bb97..3f77fe0a4b20 100644 --- a/src/lib/dai.c +++ b/src/lib/dai.c @@ -11,6 +11,7 @@ #include #include #include +#include /* for zephyr_ll_user_heap() */ #include #include #include @@ -298,6 +299,11 @@ struct dai *dai_get(uint32_t type, uint32_t index, uint32_t flags) { const struct device *dev; struct dai *d; + struct k_heap *heap = NULL; + +#ifdef CONFIG_SOF_USERSPACE_LL + heap = zephyr_ll_user_heap(); +#endif dev = dai_get_device(type, index); if (!dev) { @@ -306,10 +312,12 @@ struct dai *dai_get(uint32_t type, uint32_t index, uint32_t flags) return NULL; } - d = rzalloc(SOF_MEM_FLAG_USER | SOF_MEM_FLAG_COHERENT, sizeof(struct dai)); + d = sof_heap_alloc(heap, SOF_MEM_FLAG_USER | SOF_MEM_FLAG_COHERENT, sizeof(struct dai), 0); if (!d) return NULL; + memset(d, 0, sizeof(struct dai)); + d->index = index; d->type = type; d->dev = dev; @@ -319,7 +327,7 @@ struct dai *dai_get(uint32_t type, uint32_t index, uint32_t flags) if (dai_probe(d->dev)) { tr_err(&dai_tr, "dai_get: failed to probe dai with index %d type %d", index, type); - rfree(d); + sof_heap_free(heap, d); return NULL; } @@ -330,6 +338,11 @@ struct dai *dai_get(uint32_t type, uint32_t index, uint32_t flags) void dai_put(struct dai *dai) { int ret; + struct k_heap *heap = NULL; + +#ifdef CONFIG_SOF_USERSPACE_LL + heap = zephyr_ll_user_heap(); +#endif ret = dai_remove(dai->dev); if (ret < 0) { @@ -337,7 +350,7 @@ void dai_put(struct dai *dai) dai->index, ret); } - rfree(dai); + sof_heap_free(heap, dai); } #else static inline const struct dai_type_info *dai_find_type(uint32_t type) From 0287036d6a00d75a04f7c4bf1244f4092bf189e4 Mon Sep 17 00:00:00 2001 From: Kai Vehmanen Date: Tue, 17 Feb 2026 18:34:48 +0200 Subject: [PATCH 25/54] audio: dai-zephyr: rework calls to DMA driver, remove channel pointer For historical reasons, dai-zephyr has somewhat complicated code to manage the DMA channel instance information. When a DMA channel is allocated, a pointer to the system DMA channel table is acquired and some additional information is stored per channel. This is however redundant as the only piece of information actually needed is the channel index. Simplify the code by not storing the channel pointer anymore, but rather just store the channel index and use that in all calls to the DMA driver. Signed-off-by: Kai Vehmanen --- src/audio/copier/copier_dai.c | 2 + src/audio/dai-zephyr.c | 63 +++++++++++++++----------------- src/include/sof/lib/dai-zephyr.h | 2 +- src/ipc/ipc4/dai.c | 19 +++++----- 4 files changed, 42 insertions(+), 44 deletions(-) diff --git a/src/audio/copier/copier_dai.c b/src/audio/copier/copier_dai.c index ceb93b2a22bf..1e82339556ba 100644 --- a/src/audio/copier/copier_dai.c +++ b/src/audio/copier/copier_dai.c @@ -208,6 +208,8 @@ __cold static int copier_dai_init(struct comp_dev *dev, if (!dd) return -ENOMEM; memset(dd, 0, sizeof(*dd)); + dd->chan_index = -1; + comp_info(dev, "dd %p initialized, index %d", dd, dd->chan_index); ret = dai_common_new(dd, dev, dai); if (ret < 0) diff --git a/src/audio/dai-zephyr.c b/src/audio/dai-zephyr.c index dd25876d46f6..7ebe62d10f78 100644 --- a/src/audio/dai-zephyr.c +++ b/src/audio/dai-zephyr.c @@ -504,7 +504,6 @@ __cold int dai_common_new(struct dai_data *dd, struct comp_dev *dev, dma_sg_init(&dd->config.elem_array); dd->xrun = 0; - dd->chan = NULL; #ifdef CONFIG_SOF_USERSPACE_LL /* @@ -590,6 +589,8 @@ __cold static struct comp_dev *dai_new(const struct comp_driver *drv, memset(dd, 0, sizeof(*dd)); dd->heap = heap; + dd->chan_index = -1; + comp_info(dev, "dd %p initialized, index %d", dd, dd->chan_index); comp_set_drvdata(dev, dd); @@ -621,10 +622,8 @@ __cold void dai_common_free(struct dai_data *dd) if (dd->group) dai_group_put(dd->group); - if (dd->chan) { - sof_dma_release_channel(dd->dma, dd->chan->index); - dd->chan->dev_data = NULL; - } + if (dd->chan_index != -1) + sof_dma_release_channel(dd->dma, dd->chan_index); sof_dma_put(dd->dma); @@ -1156,9 +1155,9 @@ int dai_common_config_prepare(struct dai_data *dd, struct comp_dev *dev) return -EINVAL; } - if (dd->chan) { + if (dd->chan_index != -1) { comp_info(dev, "dma channel index %d already configured", - dd->chan->index); + dd->chan_index); return 0; } @@ -1172,18 +1171,14 @@ int dai_common_config_prepare(struct dai_data *dd, struct comp_dev *dev) } /* get DMA channel */ - channel = sof_dma_request_channel(dd->dma, channel); - if (channel < 0) { + dd->chan_index = sof_dma_request_channel(dd->dma, channel); + if (dd->chan_index < 0) { comp_err(dev, "dma_request_channel() failed"); - dd->chan = NULL; return -EIO; } - dd->chan = &dd->dma->chan[channel]; - dd->chan->dev_data = dd; - comp_dbg(dev, "new configured dma channel index %d", - dd->chan->index); + dd->chan_index); return 0; } @@ -1194,8 +1189,8 @@ int dai_common_prepare(struct dai_data *dd, struct comp_dev *dev) dd->total_data_processed = 0; - if (!dd->chan) { - comp_err(dev, "Missing dd->chan."); + if (dd->chan_index == -1) { + comp_err(dev, "Missing dd->chan_index."); comp_set_state(dev, COMP_TRIGGER_RESET); return -EINVAL; } @@ -1216,7 +1211,7 @@ int dai_common_prepare(struct dai_data *dd, struct comp_dev *dev) return 0; } - ret = sof_dma_config(dd->chan->dma, dd->chan->index, dd->z_config); + ret = sof_dma_config(dd->dma, dd->chan_index, dd->z_config); if (ret < 0) comp_set_state(dev, COMP_TRIGGER_RESET); @@ -1303,7 +1298,7 @@ static int dai_comp_trigger_internal(struct dai_data *dd, struct comp_dev *dev, /* only start the DAI if we are not XRUN handling */ if (dd->xrun == 0) { - ret = sof_dma_start(dd->chan->dma, dd->chan->index); + ret = sof_dma_start(dd->dma, dd->chan_index); if (ret < 0) return ret; @@ -1341,16 +1336,16 @@ static int dai_comp_trigger_internal(struct dai_data *dd, struct comp_dev *dev, /* only start the DAI if we are not XRUN handling */ if (dd->xrun == 0) { /* recover valid start position */ - ret = sof_dma_stop(dd->chan->dma, dd->chan->index); + ret = sof_dma_stop(dd->dma, dd->chan_index); if (ret < 0) return ret; /* dma_config needed after stop */ - ret = sof_dma_config(dd->chan->dma, dd->chan->index, dd->z_config); + ret = sof_dma_config(dd->dma, dd->chan_index, dd->z_config); if (ret < 0) return ret; - ret = sof_dma_start(dd->chan->dma, dd->chan->index); + ret = sof_dma_start(dd->dma, dd->chan_index); if (ret < 0) return ret; @@ -1378,11 +1373,11 @@ static int dai_comp_trigger_internal(struct dai_data *dd, struct comp_dev *dev, * as soon as possible. */ #if CONFIG_COMP_DAI_STOP_TRIGGER_ORDER_REVERSE - ret = sof_dma_stop(dd->chan->dma, dd->chan->index); + ret = sof_dma_stop(dd->dma, dd->chan_index); dai_trigger_op(dd->dai, cmd, dev->direction); #else dai_trigger_op(dd->dai, cmd, dev->direction); - ret = sof_dma_stop(dd->chan->dma, dd->chan->index); + ret = sof_dma_stop(dd->dma, dd->chan_index); if (ret) { comp_warn(dev, "dma was stopped earlier"); ret = 0; @@ -1392,11 +1387,11 @@ static int dai_comp_trigger_internal(struct dai_data *dd, struct comp_dev *dev, case COMP_TRIGGER_PAUSE: comp_dbg(dev, "PAUSE"); #if CONFIG_COMP_DAI_STOP_TRIGGER_ORDER_REVERSE - ret = sof_dma_suspend(dd->chan->dma, dd->chan->index); + ret = sof_dma_suspend(dd->dma, dd->chan_index); dai_trigger_op(dd->dai, cmd, dev->direction); #else dai_trigger_op(dd->dai, cmd, dev->direction); - ret = sof_dma_suspend(dd->chan->dma, dd->chan->index); + ret = sof_dma_suspend(dd->dma, dd->chan_index); #endif break; case COMP_TRIGGER_PRE_START: @@ -1494,7 +1489,7 @@ static int dai_comp_trigger(struct comp_dev *dev, int cmd) */ static int dai_get_status(struct comp_dev *dev, struct dai_data *dd, struct dma_status *stat) { - int ret = sof_dma_get_status(dd->chan->dma, dd->chan->index, stat); + int ret = sof_dma_get_status(dd->dma, dd->chan_index, stat); #if CONFIG_XRUN_NOTIFICATIONS_ENABLE if (ret == -EPIPE && !dd->xrun_notification_sent) { dd->xrun_notification_sent = send_copier_gateway_xrun_notif_msg @@ -1599,7 +1594,7 @@ int dai_zephyr_multi_endpoint_copy(struct dai_data **dd, struct comp_dev *dev, #endif for (i = 0; i < num_endpoints; i++) { - ret = sof_dma_reload(dd[i]->chan->dma, dd[i]->chan->index, 0); + ret = sof_dma_reload(dd[i]->dma, dd[i]->chan_index, 0); if (ret < 0) { dai_report_reload_xrun(dd[i], dev, 0); return ret; @@ -1625,10 +1620,10 @@ int dai_zephyr_multi_endpoint_copy(struct dai_data **dd, struct comp_dev *dev, status = dai_dma_multi_endpoint_cb(dd[i], dev, frames, multi_endpoint_buffer); if (status == SOF_DMA_CB_STATUS_END) - sof_dma_stop(dd[i]->chan->dma, dd[i]->chan->index); + sof_dma_stop(dd[i]->dma, dd[i]->chan_index); copy_bytes = frames * audio_stream_frame_bytes(&dd[i]->dma_buffer->stream); - ret = sof_dma_reload(dd[i]->chan->dma, dd[i]->chan->index, copy_bytes); + ret = sof_dma_reload(dd[i]->dma, dd[i]->chan_index, copy_bytes); if (ret < 0) { dai_report_reload_xrun(dd[i], dev, copy_bytes); return ret; @@ -1817,7 +1812,7 @@ int dai_common_copy(struct dai_data *dd, struct comp_dev *dev, pcm_converter_fun comp_warn(dev, "nothing to copy, src_frames: %u, sink_frames: %u", src_frames, sink_frames); #endif - sof_dma_reload(dd->chan->dma, dd->chan->index, 0); + sof_dma_reload(dd->dma, dd->chan_index, 0); return 0; } @@ -1827,9 +1822,9 @@ int dai_common_copy(struct dai_data *dd, struct comp_dev *dev, pcm_converter_fun comp_warn(dev, "dai trigger copy failed"); if (dai_dma_cb(dd, dev, copy_bytes, converter) == SOF_DMA_CB_STATUS_END) - sof_dma_stop(dd->chan->dma, dd->chan->index); + sof_dma_stop(dd->dma, dd->chan_index); - ret = sof_dma_reload(dd->chan->dma, dd->chan->index, copy_bytes); + ret = sof_dma_reload(dd->dma, dd->chan_index, copy_bytes); if (ret < 0) { dai_report_reload_xrun(dd, dev, copy_bytes); return ret; @@ -1867,7 +1862,7 @@ int dai_common_ts_config_op(struct dai_data *dd, struct comp_dev *dev) struct dai_ts_cfg *cfg = &dd->ts_config; comp_dbg(dev, "dai_ts_config()"); - if (!dd->chan) { + if (dd->chan_index == -1) { comp_err(dev, "No DMA channel information"); return -EINVAL; } @@ -1890,7 +1885,7 @@ int dai_common_ts_config_op(struct dai_data *dd, struct comp_dev *dev) cfg->direction = dai->direction; cfg->index = dd->dai->index; cfg->dma_id = dd->dma->plat_data.id; - cfg->dma_chan_index = dd->chan->index; + cfg->dma_chan_index = dd->chan_index; cfg->dma_chan_count = dd->dma->plat_data.channels; return dai_ts_config(dd->dai->dev, cfg); diff --git a/src/include/sof/lib/dai-zephyr.h b/src/include/sof/lib/dai-zephyr.h index 429f32f52e2c..c446a1ac1edd 100644 --- a/src/include/sof/lib/dai-zephyr.h +++ b/src/include/sof/lib/dai-zephyr.h @@ -117,7 +117,7 @@ typedef int (*channel_copy_func)(const struct audio_stream *src, unsigned int sr */ struct dai_data { /* local DMA config */ - struct dma_chan_data *chan; + int chan_index; uint32_t stream_id; struct dma_sg_config config; struct dma_config *z_config; diff --git a/src/ipc/ipc4/dai.c b/src/ipc/ipc4/dai.c index 4814f4f6a484..025f23e2e7a7 100644 --- a/src/ipc/ipc4/dai.c +++ b/src/ipc/ipc4/dai.c @@ -202,7 +202,7 @@ void dai_dma_release(struct dai_data *dd, struct comp_dev *dev) } /* put the allocated DMA channel first */ - if (dd->chan) { + if (dd->chan_index != -1) { struct ipc4_llp_reading_slot slot; if (dd->slot_info.node_id) { @@ -224,15 +224,16 @@ void dai_dma_release(struct dai_data *dd, struct comp_dev *dev) */ #if CONFIG_ZEPHYR_NATIVE_DRIVERS /* if reset is after pause dma has already been stopped */ - dma_stop(dd->chan->dma->z_dev, dd->chan->index); + dma_stop(dd->dma->z_dev, dd->chan_index); - dma_release_channel(dd->chan->dma->z_dev, dd->chan->index); + dma_release_channel(dd->dma->z_dev, dd->chan_index); #else + /* TODO: to remove this, no longer works! */ dma_stop_legacy(dd->chan); dma_channel_put_legacy(dd->chan); -#endif - dd->chan->dev_data = NULL; dd->chan = NULL; +#endif + } } @@ -351,9 +352,9 @@ __cold int dai_config(struct dai_data *dd, struct comp_dev *dev, return 0; } - if (dd->chan) { + if (dd->chan_index != -1) { comp_info(dev, "Configured. dma channel index %d, ignore...", - dd->chan->index); + dd->chan_index); return 0; } @@ -414,7 +415,7 @@ int dai_common_position(struct dai_data *dd, struct comp_dev *dev, platform_dai_wallclock(dev, &dd->wallclock); posn->wallclock = dd->wallclock; - ret = dma_get_status(dd->dma->z_dev, dd->chan->index, &status); + ret = dma_get_status(dd->dma->z_dev, dd->chan_index, &status); if (ret < 0) return ret; @@ -439,7 +440,7 @@ void dai_dma_position_update(struct dai_data *dd, struct comp_dev *dev) if (!dd->slot_info.node_id) return; - ret = dma_get_status(dd->dma->z_dev, dd->chan->index, &status); + ret = dma_get_status(dd->dma->z_dev, dd->chan_index, &status); if (ret < 0) return; From a32d6b892f1dbf86b19744806f326a4ce7e680d4 Mon Sep 17 00:00:00 2001 From: Kai Vehmanen Date: Tue, 17 Feb 2026 21:41:58 +0200 Subject: [PATCH 26/54] audio: dai-zephyr: convert spinlock into mutex for properties The spinlock used to protect access to DAI properties can be converted to a mutex as this is only accessed from IPC and LL threads and both are normal Zephyr threads. As an additional benefit, use of mutex allows to run the dai-zephyr module in user-space. Signed-off-by: Kai Vehmanen --- src/audio/dai-zephyr.c | 53 +++++++++++++++++++------------- src/include/sof/lib/dai-zephyr.h | 5 ++- 2 files changed, 36 insertions(+), 22 deletions(-) diff --git a/src/audio/dai-zephyr.c b/src/audio/dai-zephyr.c index 7ebe62d10f78..c9001fd62edf 100644 --- a/src/audio/dai-zephyr.c +++ b/src/audio/dai-zephyr.c @@ -202,12 +202,13 @@ __cold int dai_set_config(struct dai *dai, struct ipc_config_dai *common_config, /* called from ipc/ipc3/dai.c */ int dai_get_handshake(struct dai *dai, int direction, int stream_id) { - k_spinlock_key_t key = k_spin_lock(&dai->lock); - const struct dai_properties *props = dai_get_properties(dai->dev, direction, - stream_id); - int hs_id = props->dma_hs_id; + const struct dai_properties *props; + int hs_id; - k_spin_unlock(&dai->lock, key); + k_mutex_lock(dai->lock, K_FOREVER); + props = dai_get_properties(dai->dev, direction, stream_id); + hs_id = props->dma_hs_id; + k_mutex_unlock(dai->lock); return hs_id; } @@ -216,39 +217,41 @@ int dai_get_handshake(struct dai *dai, int direction, int stream_id) int dai_get_fifo_depth(struct dai *dai, int direction) { const struct dai_properties *props; - k_spinlock_key_t key; int fifo_depth; if (!dai) return 0; - key = k_spin_lock(&dai->lock); + k_mutex_lock(dai->lock, K_FOREVER); props = dai_get_properties(dai->dev, direction, 0); fifo_depth = props->fifo_depth; - k_spin_unlock(&dai->lock, key); + k_mutex_unlock(dai->lock); return fifo_depth; } int dai_get_stream_id(struct dai *dai, int direction) { - k_spinlock_key_t key = k_spin_lock(&dai->lock); - const struct dai_properties *props = dai_get_properties(dai->dev, direction, 0); - int stream_id = props->stream_id; + const struct dai_properties *props; + int stream_id; - k_spin_unlock(&dai->lock, key); + k_mutex_lock(dai->lock, K_FOREVER); + props = dai_get_properties(dai->dev, direction, 0); + stream_id = props->stream_id; + k_mutex_unlock(dai->lock); return stream_id; } static int dai_get_fifo(struct dai *dai, int direction, int stream_id) { - k_spinlock_key_t key = k_spin_lock(&dai->lock); - const struct dai_properties *props = dai_get_properties(dai->dev, direction, - stream_id); - int fifo_address = props->fifo_address; + const struct dai_properties *props; + int fifo_address; - k_spin_unlock(&dai->lock, key); + k_mutex_lock(dai->lock, K_FOREVER); + props = dai_get_properties(dai->dev, direction, stream_id); + fifo_address = props->fifo_address; + k_mutex_unlock(dai->lock); return fifo_address; } @@ -500,7 +503,12 @@ __cold int dai_common_new(struct dai_data *dd, struct comp_dev *dev, return -ENODEV; } - k_spinlock_init(&dd->dai->lock); +#ifdef CONFIG_SOF_USERSPACE_LL + dd->dai->lock = k_object_alloc(K_OBJ_MUTEX); +#else + dd->dai->lock = &dd->dai->lock_obj; +#endif + k_mutex_init(dd->dai->lock); dma_sg_init(&dd->config.elem_array); dd->xrun = 0; @@ -629,6 +637,10 @@ __cold void dai_common_free(struct dai_data *dd) dai_release_llp_slot(dd); +#ifdef CONFIG_SOF_USERSPACE_LL + k_object_free(dd->dai->lock); +#endif + dai_put(dd->dai); sof_heap_free(dd->heap, dd->dai_spec_config); @@ -1944,16 +1956,15 @@ static int dai_ts_stop_op(struct comp_dev *dev) uint32_t dai_get_init_delay_ms(struct dai *dai) { const struct dai_properties *props; - k_spinlock_key_t key; uint32_t init_delay; if (!dai) return 0; - key = k_spin_lock(&dai->lock); + k_mutex_lock(dai->lock, K_FOREVER); props = dai_get_properties(dai->dev, 0, 0); init_delay = props->reg_init_delay; - k_spin_unlock(&dai->lock, key); + k_mutex_unlock(dai->lock); return init_delay; } diff --git a/src/include/sof/lib/dai-zephyr.h b/src/include/sof/lib/dai-zephyr.h index c446a1ac1edd..76e9de96c325 100644 --- a/src/include/sof/lib/dai-zephyr.h +++ b/src/include/sof/lib/dai-zephyr.h @@ -52,7 +52,10 @@ struct dai { uint32_t dma_dev; const struct device *dev; const struct dai_data *dd; - struct k_spinlock lock; /* protect properties */ + struct k_mutex *lock; /* protect properties */ +#ifndef CONFIG_SOF_USERSPACE_LL + struct k_mutex lock_obj; +#endif }; union hdalink_cfg { From 5b41923901adc4f9d4ba2f14f4906883d6ad9cb3 Mon Sep 17 00:00:00 2001 From: Kai Vehmanen Date: Wed, 18 Feb 2026 15:49:22 +0200 Subject: [PATCH 27/54] audio: dai-zephyr: migrate to use dai_get_properties_copy() Modify code to allocate DAI properties object on stack and use dai_get_properties_copy(). This is required when DAI code is run in user-space and a syscall is needed to talk to the DAI driver. It's not possible to return a pointer to kernel memory, so instead data needs to be copied to caller stack. Signed-off-by: Kai Vehmanen --- src/audio/dai-zephyr.c | 54 +++++++++++++++++++++++------------------- 1 file changed, 30 insertions(+), 24 deletions(-) diff --git a/src/audio/dai-zephyr.c b/src/audio/dai-zephyr.c index c9001fd62edf..d12ca2f86061 100644 --- a/src/audio/dai-zephyr.c +++ b/src/audio/dai-zephyr.c @@ -202,58 +202,62 @@ __cold int dai_set_config(struct dai *dai, struct ipc_config_dai *common_config, /* called from ipc/ipc3/dai.c */ int dai_get_handshake(struct dai *dai, int direction, int stream_id) { - const struct dai_properties *props; - int hs_id; + struct dai_properties props; + int ret; k_mutex_lock(dai->lock, K_FOREVER); - props = dai_get_properties(dai->dev, direction, stream_id); - hs_id = props->dma_hs_id; + ret = dai_get_properties_copy(dai->dev, direction, stream_id, &props); k_mutex_unlock(dai->lock); + if (ret < 0) + return ret; - return hs_id; + return props.dma_hs_id; } /* called from ipc/ipc3/dai.c and ipc/ipc4/dai.c */ int dai_get_fifo_depth(struct dai *dai, int direction) { - const struct dai_properties *props; - int fifo_depth; + struct dai_properties props; + int ret; if (!dai) return 0; k_mutex_lock(dai->lock, K_FOREVER); - props = dai_get_properties(dai->dev, direction, 0); - fifo_depth = props->fifo_depth; + ret = dai_get_properties_copy(dai->dev, direction, 0, &props); k_mutex_unlock(dai->lock); + if (ret < 0) + return 0; - return fifo_depth; + return props.fifo_depth; } int dai_get_stream_id(struct dai *dai, int direction) { - const struct dai_properties *props; - int stream_id; + struct dai_properties props; + int ret; k_mutex_lock(dai->lock, K_FOREVER); - props = dai_get_properties(dai->dev, direction, 0); - stream_id = props->stream_id; + ret = dai_get_properties_copy(dai->dev, direction, 0, &props); k_mutex_unlock(dai->lock); + if (ret < 0) + return ret; - return stream_id; + return props.stream_id; } static int dai_get_fifo(struct dai *dai, int direction, int stream_id) { - const struct dai_properties *props; - int fifo_address; + struct dai_properties props; + int ret; k_mutex_lock(dai->lock, K_FOREVER); - props = dai_get_properties(dai->dev, direction, stream_id); - fifo_address = props->fifo_address; + ret = dai_get_properties_copy(dai->dev, direction, stream_id, &props); k_mutex_unlock(dai->lock); + if (ret < 0) + return ret; - return fifo_address; + return props.fifo_address; } /* this is called by DMA driver every time descriptor has completed */ @@ -1955,15 +1959,17 @@ static int dai_ts_stop_op(struct comp_dev *dev) uint32_t dai_get_init_delay_ms(struct dai *dai) { - const struct dai_properties *props; - uint32_t init_delay; + struct dai_properties props; + uint32_t init_delay = 0; + int ret; if (!dai) return 0; k_mutex_lock(dai->lock, K_FOREVER); - props = dai_get_properties(dai->dev, 0, 0); - init_delay = props->reg_init_delay; + ret = dai_get_properties_copy(dai->dev, 0, 0, &props); + if (!ret) + init_delay = props.reg_init_delay; k_mutex_unlock(dai->lock); return init_delay; From 2049d7ceaf3795830b0e4731e85233321deca520 Mon Sep 17 00:00:00 2001 From: Kai Vehmanen Date: Thu, 19 Feb 2026 15:05:22 +0200 Subject: [PATCH 28/54] (section dai-zephyr STOP) From dcbac4675d2a2de2ceb7803e2943fd87c363db00 Mon Sep 17 00:00:00 2001 From: Kai Vehmanen Date: Tue, 3 Mar 2026 15:54:12 +0200 Subject: [PATCH 29/54] (section audio module infra START) From 5ffe6a9a2683cbd55d5cbcf4b9408dfc79098f23 Mon Sep 17 00:00:00 2001 From: Kai Vehmanen Date: Thu, 12 Feb 2026 20:38:50 +0200 Subject: [PATCH 30/54] audio: module_adapter: alloc from LL user heap if LL run in user When SOF is built with LL pipes in user-space, module adapter should allocate all resources from the LL user heap. Signed-off-by: Kai Vehmanen --- src/audio/module_adapter/module_adapter.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/audio/module_adapter/module_adapter.c b/src/audio/module_adapter/module_adapter.c index 9ccbcf33dca5..79da6dc4b96e 100644 --- a/src/audio/module_adapter/module_adapter.c +++ b/src/audio/module_adapter/module_adapter.c @@ -111,7 +111,12 @@ static struct processing_module *module_adapter_mem_alloc(const struct comp_driv } mod_heap = &mod_heap_user->heap; } else { +#ifdef CONFIG_SOF_USERSPACE_LL + mod_heap = zephyr_ll_user_heap(); + comp_cl_dbg(drv, "using ll user heap for module"); +#else mod_heap = drv->user_heap; +#endif mod_heap_user = NULL; heap_size = 0; } From 8b010f874e7098830dbb034e2997f31d975d8b9f Mon Sep 17 00:00:00 2001 From: Kai Vehmanen Date: Thu, 26 Feb 2026 17:11:56 +0200 Subject: [PATCH 31/54] audio: copier: export copier endpoint ops to user-space Allow the copier endpoint interface ops to be used also from user-space. Signed-off-by: Kai Vehmanen --- src/audio/copier/copier.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/audio/copier/copier.c b/src/audio/copier/copier.c index 0468664c0634..b25a7d69056a 100644 --- a/src/audio/copier/copier.c +++ b/src/audio/copier/copier.c @@ -1187,7 +1187,7 @@ __cold static int copier_unbind(struct processing_module *mod, struct bind_info return 0; } -static struct module_endpoint_ops copier_endpoint_ops = { +static APP_TASK_DATA const struct module_endpoint_ops copier_endpoint_ops = { .get_total_data_processed = copier_get_processed_data, .position = copier_position, .dai_ts_config = copier_dai_ts_config_op, @@ -1198,7 +1198,7 @@ static struct module_endpoint_ops copier_endpoint_ops = { .trigger = copier_comp_trigger }; -static const struct module_interface copier_interface = { +static APP_TASK_DATA const struct module_interface copier_interface = { .init = copier_init, .prepare = copier_prepare, .process_audio_stream = copier_process, From 7f22668c69ec5c3c75807512bb84d1507579dc70 Mon Sep 17 00:00:00 2001 From: Kai Vehmanen Date: Tue, 3 Mar 2026 15:54:37 +0200 Subject: [PATCH 32/54] (section schduler changes START) From 759b05868ac6a751b5421b8477a7c89c0e614a2f Mon Sep 17 00:00:00 2001 From: Kai Vehmanen Date: Thu, 26 Feb 2026 17:42:13 +0200 Subject: [PATCH 33/54] schedule: zephyr_ll: add zephyr_ll_task_free() Add counterpart to zephyr_ll_task_alloc() to allow freeing the task with correct heap. Signed-off-by: Kai Vehmanen --- src/include/sof/schedule/ll_schedule_domain.h | 1 + src/schedule/zephyr_ll.c | 10 ++++++++-- 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/src/include/sof/schedule/ll_schedule_domain.h b/src/include/sof/schedule/ll_schedule_domain.h index dc28e43c7461..d3a8200d0085 100644 --- a/src/include/sof/schedule/ll_schedule_domain.h +++ b/src/include/sof/schedule/ll_schedule_domain.h @@ -99,6 +99,7 @@ static inline struct ll_schedule_domain *dma_domain_get(void) #ifdef CONFIG_SOF_USERSPACE_LL struct task *zephyr_ll_task_alloc(void); +void zephyr_ll_task_free(struct task *task); struct k_heap *zephyr_ll_user_heap(void); void zephyr_ll_user_resources_init(void); #endif /* CONFIG_SOF_USERSPACE_LL */ diff --git a/src/schedule/zephyr_ll.c b/src/schedule/zephyr_ll.c index e2d02f12926b..0dacea6c0a9a 100644 --- a/src/schedule/zephyr_ll.c +++ b/src/schedule/zephyr_ll.c @@ -406,7 +406,7 @@ static int zephyr_ll_task_schedule_after(void *data, struct task *task, uint64_t * This is synchronous - after this returns the object can be destroyed! * Assertion: under Zephyr this is always called from a thread context! */ -static int zephyr_ll_task_free(void *data, struct task *task) +static int zephyr_ll_task_sched_free(void *data, struct task *task) { struct zephyr_ll *sch = data; uint32_t flags; @@ -522,7 +522,7 @@ static const struct scheduler_ops zephyr_ll_ops = { .schedule_task = zephyr_ll_task_schedule, .schedule_task_before = zephyr_ll_task_schedule_before, .schedule_task_after = zephyr_ll_task_schedule_after, - .schedule_task_free = zephyr_ll_task_free, + .schedule_task_free = zephyr_ll_task_sched_free, .schedule_task_cancel = zephyr_ll_task_cancel, .scheduler_free = zephyr_ll_scheduler_free, }; @@ -533,6 +533,12 @@ struct task *zephyr_ll_task_alloc(void) return sof_heap_alloc(zephyr_ll_user_heap(), SOF_MEM_FLAG_USER, sizeof(struct task), sizeof(void *)); } + +void zephyr_ll_task_free(struct task *task) +{ + sof_heap_free(zephyr_ll_user_heap(), task); +} + #endif /* CONFIG_SOF_USERSPACE_LL */ int zephyr_ll_task_init(struct task *task, From 4b76f90009fc7417d1a570e6670a215e62e4051a Mon Sep 17 00:00:00 2001 From: Kai Vehmanen Date: Thu, 26 Feb 2026 17:43:48 +0200 Subject: [PATCH 34/54] schedule: zephyr_ll: add zephyr_ll_grant_access() Add function zephyr_ll_grant_access() to allow other threads to access the scheduler mutex. This is needed if work is submitted from other threads to the scheduler. Signed-off-by: Kai Vehmanen --- src/include/sof/schedule/ll_schedule_domain.h | 1 + src/schedule/zephyr_ll.c | 7 +++++++ 2 files changed, 8 insertions(+) diff --git a/src/include/sof/schedule/ll_schedule_domain.h b/src/include/sof/schedule/ll_schedule_domain.h index d3a8200d0085..ec1e093879f1 100644 --- a/src/include/sof/schedule/ll_schedule_domain.h +++ b/src/include/sof/schedule/ll_schedule_domain.h @@ -102,6 +102,7 @@ struct task *zephyr_ll_task_alloc(void); void zephyr_ll_task_free(struct task *task); struct k_heap *zephyr_ll_user_heap(void); void zephyr_ll_user_resources_init(void); +void zephyr_ll_grant_access(struct k_thread *thread); #endif /* CONFIG_SOF_USERSPACE_LL */ static inline struct ll_schedule_domain *domain_init diff --git a/src/schedule/zephyr_ll.c b/src/schedule/zephyr_ll.c index 0dacea6c0a9a..88e9ae0b33fc 100644 --- a/src/schedule/zephyr_ll.c +++ b/src/schedule/zephyr_ll.c @@ -539,6 +539,13 @@ void zephyr_ll_task_free(struct task *task) sof_heap_free(zephyr_ll_user_heap(), task); } +void zephyr_ll_grant_access(struct k_thread *thread) +{ + struct zephyr_ll *ll_sch = (struct zephyr_ll *)scheduler_get_data(SOF_SCHEDULE_LL_TIMER); + + k_thread_access_grant(thread, ll_sch->lock); +} + #endif /* CONFIG_SOF_USERSPACE_LL */ int zephyr_ll_task_init(struct task *task, From b5ad4b8102d3b5dc830011f6800197139361f11a Mon Sep 17 00:00:00 2001 From: Kai Vehmanen Date: Wed, 4 Mar 2026 19:29:08 +0200 Subject: [PATCH 35/54] schedule: zephyr_ll_user: make double-mapping conditional Only double-map the LL resources if CONFIG_CACHE_HAS_MIRRORED_MEMORY_REGIONS is set. Signed-off-by: Kai Vehmanen --- src/schedule/zephyr_ll_user.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/schedule/zephyr_ll_user.c b/src/schedule/zephyr_ll_user.c index e64d65ab8ae5..011a3be020ec 100644 --- a/src/schedule/zephyr_ll_user.c +++ b/src/schedule/zephyr_ll_user.c @@ -55,6 +55,7 @@ static struct k_heap *zephyr_ll_heap_init(void) if (ret) k_panic(); +#ifdef CONFIG_CACHE_HAS_MIRRORED_MEMORY_REGIONS mem_partition.start = (uintptr_t)sys_cache_uncached_ptr_get(heap->heap.init_mem); mem_partition.attr = K_MEM_PARTITION_P_RW_U_RW; ret = k_mem_domain_add_partition(&ll_mem_resources.mem_domain, &mem_partition); @@ -62,6 +63,7 @@ static struct k_heap *zephyr_ll_heap_init(void) (void *)mem_partition.start, heap->heap.init_bytes, ret); if (ret) k_panic(); +#endif return heap; } From e4ced86edc4f6b6bd5efb0a9c50c721c0a2d835d Mon Sep 17 00:00:00 2001 From: Kai Vehmanen Date: Thu, 26 Feb 2026 17:45:16 +0200 Subject: [PATCH 36/54] schedule: allocate the scheduler objects with sof_heap_alloc Ensure the scheduler objects and lists of schedulers are allocated such that they can be used with both kernel and user-space LL scheduler implementations. Signed-off-by: Kai Vehmanen --- src/schedule/schedule.c | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/src/schedule/schedule.c b/src/schedule/schedule.c index 5e56b6c490e0..3b80ab4bf552 100644 --- a/src/schedule/schedule.c +++ b/src/schedule/schedule.c @@ -10,6 +10,7 @@ #include #include #include +#include #include #include #include @@ -47,15 +48,20 @@ int schedule_task_init(struct task *task, static void scheduler_register(struct schedule_data *scheduler) { struct schedulers **sch = arch_schedulers_get(); + struct k_heap *heap = NULL; +#ifdef CONFIG_SOF_USERSPACE_LL + heap = zephyr_ll_user_heap(); +#endif if (!*sch) { /* init schedulers list */ - *sch = rzalloc(SOF_MEM_FLAG_KERNEL, - sizeof(**sch)); + *sch = sof_heap_alloc(heap, SOF_MEM_FLAG_KERNEL, + sizeof(**sch), 0); if (!*sch) { tr_err(&sch_tr, "allocation failed"); return; } + memset(*sch, 0, sizeof(**sch)); list_init(&(*sch)->list); } @@ -65,16 +71,21 @@ static void scheduler_register(struct schedule_data *scheduler) void scheduler_init(int type, const struct scheduler_ops *ops, void *data) { struct schedule_data *sch; + struct k_heap *heap = NULL; +#ifdef CONFIG_SOF_USERSPACE_LL + heap = zephyr_ll_user_heap(); +#endif if (!ops || !ops->schedule_task || !ops->schedule_task_cancel || !ops->schedule_task_free) return; - sch = rzalloc(SOF_MEM_FLAG_KERNEL, sizeof(*sch)); + sch = sof_heap_alloc(heap, SOF_MEM_FLAG_KERNEL, sizeof(*sch), 0); if (!sch) { tr_err(&sch_tr, "allocation failed"); sof_panic(SOF_IPC_PANIC_IPC); } + memset(sch, 0, sizeof(*sch)); list_init(&sch->list); sch->type = type; sch->ops = ops; From 6d616b95b3e20ff7ebb7960af5d65b24b660850c Mon Sep 17 00:00:00 2001 From: Kai Vehmanen Date: Thu, 26 Feb 2026 17:47:29 +0200 Subject: [PATCH 37/54] WIP: schedule: limit user-LL to core0 Multiple temporary limitations to limit user LL scheduler to only work on core 0. This is mostly related to privileged cpu_get_id() that is used in many places to check the id of the core code is running on. This is not available to user-space at the moment, so temporary measures are needed. Signed-off-by: Kai Vehmanen --- src/schedule/zephyr_domain.c | 4 ++-- zephyr/schedule.c | 5 +++++ 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/src/schedule/zephyr_domain.c b/src/schedule/zephyr_domain.c index 6a5812353d9e..612d14dbdf38 100644 --- a/src/schedule/zephyr_domain.c +++ b/src/schedule/zephyr_domain.c @@ -294,7 +294,7 @@ static int zephyr_domain_register_user(struct ll_schedule_domain *domain, void (*handler)(void *arg), void *arg) { struct zephyr_domain *zephyr_domain = ll_sch_domain_get_pdata(domain); - int core = cpu_get_id(); + int core = 0; /* cpu_get_id(); */ struct zephyr_domain_thread *dt = zephyr_domain->domain_thread + core; char thread_name[] = "ll_thread0"; k_tid_t thread; @@ -407,7 +407,7 @@ static int zephyr_domain_unregister_user(struct ll_schedule_domain *domain, struct k_thread *zephyr_domain_thread_tid(struct ll_schedule_domain *domain) { struct zephyr_domain *zephyr_domain = ll_sch_domain_get_pdata(domain); - int core = cpu_get_id(); + int core = 0; /* cpu_get_id(); */ struct zephyr_domain_thread *dt = zephyr_domain->domain_thread + core; tr_dbg(&ll_tr, "entry"); diff --git a/zephyr/schedule.c b/zephyr/schedule.c index 75155b5d4913..886132e1c04f 100644 --- a/zephyr/schedule.c +++ b/zephyr/schedule.c @@ -21,6 +21,11 @@ static struct schedulers *_schedulers[CONFIG_CORE_COUNT]; */ struct schedulers **arch_schedulers_get(void) { + if (k_is_user_context()) { + printk("FIXME: using core0 scheduler\n"); + return _schedulers; + } + return _schedulers + cpu_get_id(); } EXPORT_SYMBOL(arch_schedulers_get); From a5e4df0b2e9332f617d9fbdad1b12f35b011f900 Mon Sep 17 00:00:00 2001 From: Kai Vehmanen Date: Thu, 26 Feb 2026 17:59:32 +0200 Subject: [PATCH 38/54] zephyr: schedule: allow user-space to access scheduler list Make the scheduler list available to all user-space threads. Signed-off-by: Kai Vehmanen --- zephyr/schedule.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/zephyr/schedule.c b/zephyr/schedule.c index 886132e1c04f..94623b2ed5db 100644 --- a/zephyr/schedule.c +++ b/zephyr/schedule.c @@ -10,10 +10,11 @@ #include #include #include +#include #include #include -static struct schedulers *_schedulers[CONFIG_CORE_COUNT]; +static APP_TASK_BSS struct schedulers *_schedulers[CONFIG_CORE_COUNT]; /** * Retrieves registered schedulers. From 1f1ee4ea3b9920cc80cd96d295cede36aa830e26 Mon Sep 17 00:00:00 2001 From: Kai Vehmanen Date: Thu, 26 Feb 2026 18:00:10 +0200 Subject: [PATCH 39/54] zephyr: wrapper: modify platform_dai_wallclock() for user-space Don't use sof_cycle_get_64() if SOF built for user-space LL. Signed-off-by: Kai Vehmanen --- zephyr/wrapper.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/zephyr/wrapper.c b/zephyr/wrapper.c index 2e302f22c94b..3e75363ceb58 100644 --- a/zephyr/wrapper.c +++ b/zephyr/wrapper.c @@ -265,7 +265,11 @@ void platform_dai_timestamp(struct comp_dev *dai, /* get current wallclock for componnent */ void platform_dai_wallclock(struct comp_dev *dai, uint64_t *wallclock) { +#ifndef CONFIG_SOF_USERSPACE_LL *wallclock = sof_cycle_get_64(); +#else + *wallclock = k_uptime_get(); +#endif } /* From bd095b3967b7dc868f4095fb70e4826767568e75 Mon Sep 17 00:00:00 2001 From: Kai Vehmanen Date: Fri, 27 Feb 2026 16:59:02 +0200 Subject: [PATCH 40/54] schedule: add scheduler_init_context() and scheduler_free_context() Add two optional ops that allow the schedule.h user to get access to the thread context that will be used for scheduling. This is critical when the callbacks are run in user-space context and schedule.h client needs to grant access to objects like locks to the callback thread. Signed-off-by: Kai Vehmanen --- src/include/sof/schedule/schedule.h | 54 +++++++++++++++++++++++++++++ 1 file changed, 54 insertions(+) diff --git a/src/include/sof/schedule/schedule.h b/src/include/sof/schedule/schedule.h index bbdcbbecf3b4..9925e8d9b273 100644 --- a/src/include/sof/schedule/schedule.h +++ b/src/include/sof/schedule/schedule.h @@ -158,6 +158,25 @@ struct scheduler_ops { * This operation is optional. */ int (*scheduler_restore)(void *data); + + /** + * Initializes context + * @param data Private data of selected scheduler. + * @param task task that needs to be scheduled + * @return thread that will be used to run the scheduled task + * + * This operation is optional. + */ + struct k_thread *(*scheduler_init_context)(void *data, struct task *task); + + /** + * Frees scheduler context + * @param data Private data of selected scheduler. + * + * This operation is optional. + */ + void (*scheduler_free_context)(void *data); + }; /** \brief Holds information about scheduler. */ @@ -379,6 +398,41 @@ static inline int schedulers_restore(void) return 0; } + +/** See scheduler_ops::scheduler_init_context */ +static inline struct k_thread *scheduler_init_context(struct task *task) +{ + struct schedulers *schedulers = *arch_schedulers_get(); + struct schedule_data *sch; + struct list_item *slist; + + assert(schedulers); + + list_for_item(slist, &schedulers->list) { + sch = container_of(slist, struct schedule_data, list); + if (sch->ops->scheduler_init_context) + return sch->ops->scheduler_init_context(sch->data, task); + } + + return 0; +} + +/** See scheduler_ops::scheduler_free_context */ +static inline void scheduler_free_context(void) +{ + struct schedulers *schedulers = *arch_schedulers_get(); + struct schedule_data *sch; + struct list_item *slist; + + assert(schedulers); + + list_for_item(slist, &schedulers->list) { + sch = container_of(slist, struct schedule_data, list); + if (sch->ops->scheduler_free_context) + sch->ops->scheduler_free_context(sch->data); + } +} + /** * Initializes scheduling task. * @param task Task to be initialized. From cb3eaef45c679245ad83718399717ac0cf4c7803 Mon Sep 17 00:00:00 2001 From: Kai Vehmanen Date: Thu, 26 Feb 2026 18:02:34 +0200 Subject: [PATCH 41/54] schedule: zephyr_ll: implement scheduler_init_context() Implement the optional scheduler_init_context() and scheduler_free_context() ops for user-space LL scheduler. Signed-off-by: Kai Vehmanen --- src/schedule/zephyr_domain.c | 12 ++++++--- src/schedule/zephyr_ll.c | 48 +++++++++++++++++++++++++++--------- 2 files changed, 46 insertions(+), 14 deletions(-) diff --git a/src/schedule/zephyr_domain.c b/src/schedule/zephyr_domain.c index 612d14dbdf38..0cf96ce0a1e5 100644 --- a/src/schedule/zephyr_domain.c +++ b/src/schedule/zephyr_domain.c @@ -294,18 +294,24 @@ static int zephyr_domain_register_user(struct ll_schedule_domain *domain, void (*handler)(void *arg), void *arg) { struct zephyr_domain *zephyr_domain = ll_sch_domain_get_pdata(domain); - int core = 0; /* cpu_get_id(); */ - struct zephyr_domain_thread *dt = zephyr_domain->domain_thread + core; + struct zephyr_domain_thread *dt; char thread_name[] = "ll_thread0"; k_tid_t thread; + int core; tr_dbg(&ll_tr, "entry"); + if (task->core < 0 || task->core >= CONFIG_CORE_COUNT) + return -EINVAL; + + dt = zephyr_domain->domain_thread + task->core; + /* domain work only needs registered once on each core */ if (dt->handler) return 0; - __ASSERT_NO_MSG(task->core == core); + /* safety check executed in kernel mode */ + __ASSERT_NO_MSG(cpu_get_id() == core); dt->handler = handler; dt->arg = arg; diff --git a/src/schedule/zephyr_ll.c b/src/schedule/zephyr_ll.c index 88e9ae0b33fc..0b2c91c4be3d 100644 --- a/src/schedule/zephyr_ll.c +++ b/src/schedule/zephyr_ll.c @@ -363,17 +363,7 @@ static int zephyr_ll_task_schedule_common(struct zephyr_ll *sch, struct task *ta ret = domain_register(sch->ll_domain, task, &schedule_ll_callback, sch); if (ret < 0) - tr_err(&ll_tr, "cannot register domain %d", - ret); - -#if CONFIG_SOF_USERSPACE_LL - k_thread_access_grant(zephyr_domain_thread_tid(sch->ll_domain), sch->lock); - - tr_dbg(&ll_tr, "granting access to lock %p for thread %p", sch->lock, - zephyr_domain_thread_tid(sch->ll_domain)); - tr_dbg(&ll_tr, "granting access to domain lock %p for thread %p", &sch->ll_domain->lock, - zephyr_domain_thread_tid(sch->ll_domain)); -#endif + tr_err(&ll_tr, "cannot register domain %d", ret); return 0; } @@ -518,6 +508,38 @@ static void zephyr_ll_scheduler_free(void *data, uint32_t flags) sch->n_tasks); } +#if CONFIG_SOF_USERSPACE_LL +struct k_thread *zephyr_ll_init_context(void *data, struct task *task) +{ + struct zephyr_ll *sch = data; + int ret; + + ret = domain_register(sch->ll_domain, task, &schedule_ll_callback, sch); + if (ret < 0) { + tr_err(&ll_tr, "cannot init_context %d", ret); + return NULL; + } + + if (!k_is_user_context()) { + k_thread_access_grant(zephyr_domain_thread_tid(sch->ll_domain), sch->lock); + + tr_dbg(&ll_tr, "granting access to lock %p for thread %p", sch->lock, + zephyr_domain_thread_tid(sch->ll_domain)); + tr_dbg(&ll_tr, "granting access to domain lock %p for thread %p", &sch->ll_domain->lock, + zephyr_domain_thread_tid(sch->ll_domain)); + } + + return zephyr_domain_thread_tid(sch->ll_domain); +} + +void zephyr_ll_free_context(void *data) +{ + struct zephyr_ll *sch = data; + + (void *)sch; +} +#endif + static const struct scheduler_ops zephyr_ll_ops = { .schedule_task = zephyr_ll_task_schedule, .schedule_task_before = zephyr_ll_task_schedule_before, @@ -525,6 +547,10 @@ static const struct scheduler_ops zephyr_ll_ops = { .schedule_task_free = zephyr_ll_task_sched_free, .schedule_task_cancel = zephyr_ll_task_cancel, .scheduler_free = zephyr_ll_scheduler_free, +#if CONFIG_SOF_USERSPACE_LL + .scheduler_init_context = zephyr_ll_init_context, + .scheduler_free_context = zephyr_ll_free_context, +#endif }; #if CONFIG_SOF_USERSPACE_LL From 947c647c0cee227cd085ba7eb9556a31ebeb63ee Mon Sep 17 00:00:00 2001 From: Kai Vehmanen Date: Wed, 4 Mar 2026 19:25:43 +0200 Subject: [PATCH 42/54] schedule: ll_schedule_domain: add domain_thread_init/free ops Add new domain ops that are called from privileged context and are used to set up resources like threads and initialize other kernel objects. Signed-off-by: Kai Vehmanen --- src/include/sof/schedule/ll_schedule_domain.h | 39 +++++++++++++++++++ 1 file changed, 39 insertions(+) diff --git a/src/include/sof/schedule/ll_schedule_domain.h b/src/include/sof/schedule/ll_schedule_domain.h index ec1e093879f1..08c4626ef5a6 100644 --- a/src/include/sof/schedule/ll_schedule_domain.h +++ b/src/include/sof/schedule/ll_schedule_domain.h @@ -43,6 +43,20 @@ struct ll_schedule_domain_ops { void (*handler)(void *arg), void *arg); int (*domain_unregister)(struct ll_schedule_domain *domain, struct task *task, uint32_t num_tasks); +#if CONFIG_SOF_USERSPACE_LL + /* + * Initialize the scheduling thread and perform all privileged setup + * (thread creation, timer init, access grants). Called once from + * kernel context before any user-space domain_register() calls. + */ + int (*domain_thread_init)(struct ll_schedule_domain *domain, + struct task *task); + /* Free resources acquired by domain_thread_init(). Called from + * kernel context when the scheduling context is being torn down. + */ + void (*domain_thread_free)(struct ll_schedule_domain *domain, + uint32_t num_tasks); +#endif void (*domain_enable)(struct ll_schedule_domain *domain, int core); void (*domain_disable)(struct ll_schedule_domain *domain, int core); #if CONFIG_CROSS_CORE_STREAM @@ -179,6 +193,31 @@ static inline void domain_task_cancel(struct ll_schedule_domain *domain, domain->ops->domain_task_cancel(domain, task); } +#if CONFIG_SOF_USERSPACE_LL +/* + * Initialize the scheduling thread and do all privileged setup. + * Must be called from kernel context before user-space tasks register. + */ +static inline int domain_thread_init(struct ll_schedule_domain *domain, + struct task *task) +{ + assert(domain->ops->domain_thread_init); + + return domain->ops->domain_thread_init(domain, task); +} + +/* + * Free resources acquired by domain_thread_init(). + * Must be called from kernel context. + */ +static inline void domain_thread_free(struct ll_schedule_domain *domain, + uint32_t num_tasks) +{ + if (domain->ops->domain_thread_free) + domain->ops->domain_thread_free(domain, num_tasks); +} +#endif + static inline int domain_register(struct ll_schedule_domain *domain, struct task *task, void (*handler)(void *arg), void *arg) From dafc7656edb193979851d36091a20178a05fab50 Mon Sep 17 00:00:00 2001 From: Kai Vehmanen Date: Wed, 4 Mar 2026 19:27:31 +0200 Subject: [PATCH 43/54] schedule: zephyr_ll: implement thread_init/free domain ops Implement the new domain_thread_init/free() ops for the Zephyr LL scheduler implementation. Move all privileged operations to these methods. After this change, the domain_register() and domain_unregister() are now safe to call from user-space context. Signed-off-by: Kai Vehmanen --- src/schedule/zephyr_domain.c | 159 +++++++++++++++++++++++++---------- src/schedule/zephyr_ll.c | 10 ++- 2 files changed, 122 insertions(+), 47 deletions(-) diff --git a/src/schedule/zephyr_domain.c b/src/schedule/zephyr_domain.c index 0cf96ce0a1e5..70631ab2cf76 100644 --- a/src/schedule/zephyr_domain.c +++ b/src/schedule/zephyr_domain.c @@ -126,7 +126,8 @@ static void zephyr_domain_thread_fn(void *p1, void *p2, void *p3) } #endif - dt->handler(dt->arg); + if (dt->handler) + dt->handler(dt->arg); #ifdef CONFIG_SCHEDULE_LL_STATS_LOG cycles1 = k_cycle_get_32(); @@ -287,11 +288,14 @@ static int zephyr_domain_unregister(struct ll_schedule_domain *domain, #else /* CONFIG_SOF_USERSPACE_LL */ -/* User-space implementation for register/unregister */ - -static int zephyr_domain_register_user(struct ll_schedule_domain *domain, - struct task *task, - void (*handler)(void *arg), void *arg) +/* + * Privileged thread initialization for userspace LL scheduling. + * Creates the scheduling thread, sets up timer, grants access to kernel + * objects. Must be called from kernel context before any user-space + * domain_register() calls. + */ +static int zephyr_domain_thread_init(struct ll_schedule_domain *domain, + struct task *task) { struct zephyr_domain *zephyr_domain = ll_sch_domain_get_pdata(domain); struct zephyr_domain_thread *dt; @@ -299,55 +303,50 @@ static int zephyr_domain_register_user(struct ll_schedule_domain *domain, k_tid_t thread; int core; - tr_dbg(&ll_tr, "entry"); + tr_dbg(&ll_tr, "thread_init entry"); if (task->core < 0 || task->core >= CONFIG_CORE_COUNT) return -EINVAL; - dt = zephyr_domain->domain_thread + task->core; + core = task->core; + dt = zephyr_domain->domain_thread + core; - /* domain work only needs registered once on each core */ - if (dt->handler) + /* thread only needs to be created once per core */ + if (dt->ll_thread) return 0; - /* safety check executed in kernel mode */ - __ASSERT_NO_MSG(cpu_get_id() == core); - - dt->handler = handler; - dt->arg = arg; + dt->handler = NULL; /* 10 is rather random, we better not accumulate 10 missed timer interrupts */ k_sem_init(dt->sem, 0, 10); thread_name[sizeof(thread_name) - 2] = '0' + core; + /* Allocate thread structure dynamically */ + dt->ll_thread = k_object_alloc(K_OBJ_THREAD); if (!dt->ll_thread) { - /* Allocate thread structure dynamically */ - dt->ll_thread = k_object_alloc(K_OBJ_THREAD); - if (!dt->ll_thread) { - tr_err(&ll_tr, "Failed to allocate thread object for core %d", core); - dt->handler = NULL; - dt->arg = NULL; - return -ENOMEM; - } + tr_err(&ll_tr, "Failed to allocate thread object for core %d", core); + dt->handler = NULL; + dt->arg = NULL; + return -ENOMEM; + } - thread = k_thread_create(dt->ll_thread, ll_sched_stack[core], ZEPHYR_LL_STACK_SIZE, - zephyr_domain_thread_fn, zephyr_domain, - INT_TO_POINTER(core), NULL, CONFIG_LL_THREAD_PRIORITY, - K_USER, K_FOREVER); + thread = k_thread_create(dt->ll_thread, ll_sched_stack[core], ZEPHYR_LL_STACK_SIZE, + zephyr_domain_thread_fn, zephyr_domain, + INT_TO_POINTER(core), NULL, CONFIG_LL_THREAD_PRIORITY, + K_USER, K_FOREVER); - k_thread_cpu_mask_clear(thread); - k_thread_cpu_mask_enable(thread, core); - k_thread_name_set(thread, thread_name); + k_thread_cpu_mask_clear(thread); + k_thread_cpu_mask_enable(thread, core); + k_thread_name_set(thread, thread_name); - k_mem_domain_add_thread(zephyr_ll_mem_domain(), thread); - k_thread_access_grant(thread, dt->sem, domain->lock, zephyr_domain->timer); - user_grant_dai_access_all(thread); - user_grant_dma_access_all(thread); - tr_dbg(&ll_tr, "granted LL access to thread %p (core %d)", thread, core); + k_mem_domain_add_thread(zephyr_ll_mem_domain(), thread); + k_thread_access_grant(thread, dt->sem, domain->lock, zephyr_domain->timer); + user_grant_dai_access_all(thread); + user_grant_dma_access_all(thread); + tr_dbg(&ll_tr, "granted LL access to thread %p (core %d)", thread, core); - k_thread_start(thread); - } + k_thread_start(thread); k_mutex_lock(domain->lock, K_FOREVER); if (!k_timer_user_data_get(zephyr_domain->timer)) { @@ -370,6 +369,43 @@ static int zephyr_domain_register_user(struct ll_schedule_domain *domain, return 0; } +/* + * User-space register: bookkeeping only. The privileged thread setup has + * already been done by domain_thread_init() called from kernel context. + */ +static int zephyr_domain_register_user(struct ll_schedule_domain *domain, + struct task *task, + void (*handler)(void *arg), void *arg) +{ + struct zephyr_domain *zephyr_domain = ll_sch_domain_get_pdata(domain); + struct zephyr_domain_thread *dt; + int core; + + tr_dbg(&ll_tr, "register_user entry"); + + if (task->core < 0 || task->core >= CONFIG_CORE_COUNT) + return -EINVAL; + + core = task->core; + dt = zephyr_domain->domain_thread + core; + + if (!dt->ll_thread) { + tr_err(&ll_tr, "domain_thread_init() not called for core %d", core); + return -EINVAL; + } + + __ASSERT_NO_MSG(!dt->handler || dt->handler == handler); + if (dt->handler) + return 0; + + dt->handler = handler; + dt->arg = arg; + + tr_info(&ll_tr, "task registered on core %d", core); + + return 0; +} + static int zephyr_domain_unregister_user(struct ll_schedule_domain *domain, struct task *task, uint32_t num_tasks) { @@ -384,14 +420,6 @@ static int zephyr_domain_unregister_user(struct ll_schedule_domain *domain, k_mutex_lock(domain->lock, K_FOREVER); - if (!atomic_read(&domain->total_num_tasks)) { - /* Disable the watchdog */ - watchdog_disable(core); - - k_timer_stop(zephyr_domain->timer); - k_timer_user_data_set(zephyr_domain->timer, NULL); - } - zephyr_domain->domain_thread[core].handler = NULL; k_mutex_unlock(domain->lock); @@ -410,6 +438,45 @@ static int zephyr_domain_unregister_user(struct ll_schedule_domain *domain, return 0; } +/* + * Free resources acquired by zephyr_domain_thread_init(). + * Stops the timer, aborts the scheduling thread and frees the thread object. + * Must be called from kernel context. + */ +static void zephyr_domain_thread_free(struct ll_schedule_domain *domain, + uint32_t num_tasks) +{ + struct zephyr_domain *zephyr_domain = ll_sch_domain_get_pdata(domain); + int core = cpu_get_id(); + struct zephyr_domain_thread *dt = zephyr_domain->domain_thread + core; + + tr_dbg(&ll_tr, "thread_free entry, core %d, num_tasks %u", core, num_tasks); + + /* Still tasks on other cores, only clean up this core's thread */ + k_mutex_lock(domain->lock, K_FOREVER); + + if (!num_tasks && !atomic_read(&domain->total_num_tasks)) { + /* Last task globally: stop the timer and watchdog */ + watchdog_disable(core); + + k_timer_stop(zephyr_domain->timer); + k_timer_user_data_set(zephyr_domain->timer, NULL); + } + + dt->handler = NULL; + dt->arg = NULL; + + k_mutex_unlock(domain->lock); + + if (dt->ll_thread) { + k_thread_abort(dt->ll_thread); + k_object_free(dt->ll_thread); + dt->ll_thread = NULL; + } + + tr_info(&ll_tr, "thread_free done, core %d", core); +} + struct k_thread *zephyr_domain_thread_tid(struct ll_schedule_domain *domain) { struct zephyr_domain *zephyr_domain = ll_sch_domain_get_pdata(domain); @@ -452,6 +519,8 @@ APP_TASK_DATA static const struct ll_schedule_domain_ops zephyr_domain_ops = { #ifdef CONFIG_SOF_USERSPACE_LL .domain_register = zephyr_domain_register_user, .domain_unregister = zephyr_domain_unregister_user, + .domain_thread_init = zephyr_domain_thread_init, + .domain_thread_free = zephyr_domain_thread_free, #else .domain_register = zephyr_domain_register, .domain_unregister = zephyr_domain_unregister, diff --git a/src/schedule/zephyr_ll.c b/src/schedule/zephyr_ll.c index 0b2c91c4be3d..b46105e3d2f4 100644 --- a/src/schedule/zephyr_ll.c +++ b/src/schedule/zephyr_ll.c @@ -514,7 +514,12 @@ struct k_thread *zephyr_ll_init_context(void *data, struct task *task) struct zephyr_ll *sch = data; int ret; - ret = domain_register(sch->ll_domain, task, &schedule_ll_callback, sch); + /* + * Use domain_thread_init() for privileged setup (thread creation, + * timer, access grants). domain_register() is now bookkeeping only + * and will be called later from user context when scheduling tasks. + */ + ret = domain_thread_init(sch->ll_domain, task); if (ret < 0) { tr_err(&ll_tr, "cannot init_context %d", ret); return NULL; @@ -536,7 +541,8 @@ void zephyr_ll_free_context(void *data) { struct zephyr_ll *sch = data; - (void *)sch; + tr_info(&ll_tr, "free the domain thread"); + domain_thread_free(sch->ll_domain, sch->n_tasks); } #endif From b8345da3cf6a8f8097cd634167620f2df2022f8d Mon Sep 17 00:00:00 2001 From: Kai Vehmanen Date: Tue, 3 Mar 2026 15:54:51 +0200 Subject: [PATCH 44/54] (section schduler changes END) From 4bcd158c26da17ad528c76b0db502fbf864ef774 Mon Sep 17 00:00:00 2001 From: Kai Vehmanen Date: Thu, 19 Feb 2026 16:24:36 +0200 Subject: [PATCH 45/54] (section test-case START) From ed20b95cd24e3e3dc0c27c3bad88b49fedab14b1 Mon Sep 17 00:00:00 2001 From: Kai Vehmanen Date: Mon, 9 Feb 2026 20:03:49 +0200 Subject: [PATCH 46/54] zephyr: test: userspace: add pipeline_two_components test Add a new test to userspace_ll set that takes a step towards running full audio pipelines in user-space. The test creates a pipeline with two components (IPC4 host and DAI copiers), does pipeline prepare, one copy cycle in prepared state and tears down the pipeline. One user-space thread is created to manage the pipelines. This would be equivalent to user IPC handler thread. Another user-space thread is created for the LL scheduler. This test has some limitation, but is a useful test point to test resource allocations in pipeline, component and module adapter code. The code adds a reference test case where the same flow is fully run in kernel space. Signed-off-by: Kai Vehmanen --- src/audio/dai-zephyr.c | 1 + zephyr/test/userspace/test_ll_task.c | 425 ++++++++++++++++++++++++++- 2 files changed, 424 insertions(+), 2 deletions(-) diff --git a/src/audio/dai-zephyr.c b/src/audio/dai-zephyr.c index d12ca2f86061..b58f464bb100 100644 --- a/src/audio/dai-zephyr.c +++ b/src/audio/dai-zephyr.c @@ -509,6 +509,7 @@ __cold int dai_common_new(struct dai_data *dd, struct comp_dev *dev, #ifdef CONFIG_SOF_USERSPACE_LL dd->dai->lock = k_object_alloc(K_OBJ_MUTEX); + comp_set_drvdata(dev, dd); #else dd->dai->lock = &dd->dai->lock_obj; #endif diff --git a/zephyr/test/userspace/test_ll_task.c b/zephyr/test/userspace/test_ll_task.c index 234423defc60..5a4f5e058131 100644 --- a/zephyr/test/userspace/test_ll_task.c +++ b/zephyr/test/userspace/test_ll_task.c @@ -14,9 +14,19 @@ #include #include #include +#include +#include +#include +#include #include #include #include +#include +#include +#include +#include +#include +#include #include #include @@ -24,6 +34,7 @@ #include #include /* offsetof() */ +#include LOG_MODULE_DECLARE(sof_boot_test, LOG_LEVEL_DBG); @@ -36,10 +47,15 @@ K_APPMEM_PARTITION_DEFINE(userspace_ll_part); /* Global variable for test runs counter, accessible from user-space */ K_APP_BMEM(userspace_ll_part) static int test_runs; +/* User-space thread for pipeline_two_components test */ +#define PPL_USER_STACKSIZE 4096 + +static struct k_thread ppl_user_thread; +static K_THREAD_STACK_DEFINE(ppl_user_stack, PPL_USER_STACKSIZE); + static enum task_state task_callback(void *arg) { LOG_INF("entry"); - if (++test_runs > 3) return SOF_TASK_STATE_COMPLETED; @@ -77,7 +93,7 @@ static void ll_task_test(void) LOG_INF("task scheduled and running"); /* Let the task run for a bit */ - k_sleep(K_MSEC(10)); + k_sleep(K_MSEC(100)); /* Cancel the task to stop any scheduled execution */ ret = schedule_task_cancel(task); @@ -87,6 +103,9 @@ static void ll_task_test(void) ret = schedule_task_free(task); zassert_equal(ret, 0); + k_mem_domain_remove_partition(zephyr_ll_mem_domain(), &userspace_ll_part); + zephyr_ll_task_free(task); + LOG_INF("test complete"); } @@ -129,6 +148,408 @@ ZTEST(userspace_ll, pipeline_check) pipeline_check(); } +/* Copier UUID: 9ba00c83-ca12-4a83-943c-1fa2e82f9dda */ +static const uint8_t copier_uuid[16] = { + 0x83, 0x0c, 0xa0, 0x9b, 0x12, 0xca, 0x83, 0x4a, + 0x94, 0x3c, 0x1f, 0xa2, 0xe8, 0x2f, 0x9d, 0xda +}; + +/** + * Find the module_id (manifest entry index) for the copier module + * by iterating the firmware manifest and matching the copier UUID. + */ +static int find_copier_module_id(void) +{ + const struct sof_man_fw_desc *desc = basefw_vendor_get_manifest(); + const struct sof_man_module *mod; + uint32_t i; + + if (!desc) + return -1; + + for (i = 0; i < desc->header.num_module_entries; i++) { + mod = (const struct sof_man_module *)((const char *)desc + + SOF_MAN_MODULE_OFFSET(i)); + if (!memcmp(&mod->uuid, copier_uuid, sizeof(copier_uuid))) + return (int)i; + } + + return -1; +} + +/** + * IPC4 copier module config - used as payload for comp_new_ipc4(). + * Placed at MAILBOX_HOSTBOX_BASE before calling comp_new_ipc4(). + * Layout matches struct ipc4_copier_module_cfg from copier.h. + */ +struct copier_init_data { + struct ipc4_base_module_cfg base; + struct ipc4_audio_format out_fmt; + uint32_t copier_feature_mask; + /* Gateway config (matches struct ipc4_copier_gateway_cfg) */ + union ipc4_connector_node_id node_id; + uint32_t dma_buffer_size; + uint32_t config_length; +} __packed __aligned(4); + +static void fill_audio_format(struct ipc4_audio_format *fmt) +{ + memset(fmt, 0, sizeof(*fmt)); + fmt->sampling_frequency = IPC4_FS_48000HZ; + fmt->depth = IPC4_DEPTH_32BIT; + fmt->ch_cfg = IPC4_CHANNEL_CONFIG_STEREO; + fmt->channels_count = 2; + fmt->valid_bit_depth = 32; + fmt->s_type = IPC4_TYPE_MSB_INTEGER; + fmt->interleaving_style = IPC4_CHANNELS_INTERLEAVED; +} + +/** + * Create a copier component via IPC4. + * + * @param module_id Copier module_id from manifest + * @param instance_id Instance ID for this component + * @param pipeline_id Parent pipeline ID + * @param node_id Gateway node ID (type + virtual DMA index) + */ +static struct comp_dev *create_copier(int module_id, int instance_id, + int pipeline_id, + union ipc4_connector_node_id node_id) +{ + struct ipc4_module_init_instance module_init; + struct copier_init_data cfg; + struct comp_dev *dev; + + /* Prepare copier config payload */ + memset(&cfg, 0, sizeof(cfg)); + fill_audio_format(&cfg.base.audio_fmt); + /* 2 channels * 4 bytes * 48 frames = 384 bytes */ + cfg.base.ibs = 384; + cfg.base.obs = 384; + cfg.base.is_pages = 0; + cfg.base.cpc = 0; + cfg.out_fmt = cfg.base.audio_fmt; + cfg.copier_feature_mask = 0; + cfg.node_id = node_id; + cfg.dma_buffer_size = 768; + cfg.config_length = 0; + + /* Write config data to mailbox hostbox (where comp_new_ipc4 reads it). + * Flush cache so that data is visible in SRAM before comp_new_ipc4() + * invalidates the cache line (in normal IPC flow, host writes via DMA + * directly to SRAM, so the invalidation reads fresh data; here the DSP + * core itself writes, so an explicit flush is needed). + */ + memcpy((void *)MAILBOX_HOSTBOX_BASE, &cfg, sizeof(cfg)); + sys_cache_data_flush_range((void *)MAILBOX_HOSTBOX_BASE, sizeof(cfg)); + + /* Prepare IPC4 module init header */ + memset(&module_init, 0, sizeof(module_init)); + module_init.primary.r.module_id = module_id; + module_init.primary.r.instance_id = instance_id; + module_init.primary.r.type = SOF_IPC4_MOD_INIT_INSTANCE; + module_init.primary.r.msg_tgt = SOF_IPC4_MESSAGE_TARGET_MODULE_MSG; + module_init.primary.r.rsp = SOF_IPC4_MESSAGE_DIR_MSG_REQUEST; + + module_init.extension.r.param_block_size = sizeof(cfg) / sizeof(uint32_t); + module_init.extension.r.ppl_instance_id = pipeline_id; + module_init.extension.r.core_id = 0; + module_init.extension.r.proc_domain = 0; /* LL */ + + dev = comp_new_ipc4(&module_init); + + /* + * We use the IPC code to create the components. This code runs + * in kernel space, so we need to separately assign thecreated + * components to the user LL and IPC threads before it can be used. + */ + comp_grant_access_to_thread(dev, &ppl_user_thread); + + return dev; +} + +/** + * Context shared between kernel setup and the user-space pipeline thread. + */ +struct ppl_test_ctx { + struct pipeline *p; + struct k_heap *heap; + struct comp_dev *host_comp; + struct comp_dev *dai_comp; + struct comp_buffer *buf; + struct ipc *ipc; + struct ipc_comp_dev *ipc_pipe; +}; + +/** + * Pipeline operations: connect, complete, prepare, copy, verify, and clean up. + * This function is called either directly (kernel mode) or from a user-space + * thread, exercising pipeline_*() calls from the requested context. + */ +static void pipeline_ops(struct ppl_test_ctx *ctx) +{ + struct pipeline *p = ctx->p; + struct comp_dev *host_comp = ctx->host_comp; + struct comp_dev *dai_comp = ctx->dai_comp; + struct comp_buffer *buf = ctx->buf; + int ret; + + LOG_INF("pipeline_ops: user_context=%d", k_is_user_context()); + + /* Step: Connect host -> buffer -> DAI */ + ret = pipeline_connect(host_comp, buf, PPL_CONN_DIR_COMP_TO_BUFFER); + zassert_equal(ret, 0, "connect host to buffer failed"); + + ret = pipeline_connect(dai_comp, buf, PPL_CONN_DIR_BUFFER_TO_COMP); + zassert_equal(ret, 0, "connect buffer to DAI failed"); + + LOG_INF("host -> buffer -> DAI connected"); + + /* Step: Complete the pipeline */ + ret = pipeline_complete(p, host_comp, dai_comp); + zassert_equal(ret, 0, "pipeline complete failed"); + + /* Step: Prepare the pipeline */ + p->sched_comp = host_comp; + + ret = pipeline_prepare(p, host_comp); + zassert_equal(ret, 0, "pipeline prepare failed"); + + ret = pipeline_trigger(p, host_comp, COMP_TRIGGER_PRE_START); + //zassert_equal(ret, 0, "pipeline TRIGGER_START failed"); + + LOG_INF("pipeline complete, status = %d", p->status); + + /* Step: Run copies */ + pipeline_schedule_copy(p, 1000); + + /* Step: let run for 3 msec */ + k_sleep(K_MSEC(3)); + + /* Verify pipeline source and sink assignments */ + zassert_equal(p->source_comp, host_comp, "source comp mismatch"); + zassert_equal(p->sink_comp, dai_comp, "sink comp mismatch"); + + LOG_INF("pipeline_ops done"); +} + +/** + * User-space thread entry point for pipeline_two_components test. + * p1 points to the ppl_test_ctx shared with the kernel launcher. + */ +static void pipeline_user_thread(void *p1, void *p2, void *p3) +{ + struct ppl_test_ctx *ctx = (struct ppl_test_ctx *)p1; + + zassert_true(k_is_user_context(), "expected user context"); + pipeline_ops(ctx); +} + +/** + * Test creating a pipeline with a host copier and a DAI (link) copier, + * connected through a shared buffer. + * + * When run_in_user is true, all pipeline_*() calls are made from a + * separate user-space thread. + */ +static void pipeline_two_components(bool run_in_user) +{ + struct ppl_test_ctx *ctx; + struct k_heap *heap = NULL; + uint32_t pipeline_id = 2; + uint32_t priority = 0; + struct task *task; + uint32_t comp_id; + int copier_module_id; + int host_instance_id = 0; + int dai_instance_id = 1; + int core = 0; + int ret; + + /* Step: Find the copier module_id from the firmware manifest */ + copier_module_id = find_copier_module_id(); + zassert_true(copier_module_id >= 0, "copier module not found in manifest"); + LOG_INF("copier module_id = %d", copier_module_id); + + /* Step: Create pipeline */ + if (run_in_user) { + LOG_INF("running test with user memory domain"); + heap = zephyr_ll_user_heap(); + zassert_not_null(heap, "user heap not found"); + + task = zephyr_ll_task_alloc(); + zassert_not_null(task, "task allocation failed"); + } else { + task = sof_heap_alloc(NULL, SOF_MEM_FLAG_USER, sizeof(struct task), sizeof(void *)); + LOG_INF("running test with kernel memory domain"); + } + + ctx = sof_heap_alloc(heap, SOF_MEM_FLAG_USER, sizeof(*ctx), 0); + ctx->heap = heap; + ctx->ipc = ipc_get(); + + comp_id = IPC4_COMP_ID(copier_module_id, host_instance_id); + ctx->p = pipeline_new(ctx->heap, pipeline_id, priority, comp_id, NULL); + zassert_not_null(ctx->p, "pipeline creation failed"); + + /* create the LL scheduler thread by initializing one task */ + k_mem_domain_add_partition(zephyr_ll_mem_domain(), &userspace_ll_part); + + test_runs = 0; + ret = schedule_task_init_ll(task, SOF_UUID(test_task_uuid), SOF_SCHEDULE_LL_TIMER, + priority, task_callback, + (void *)&test_runs, core, 0); + zassert_equal(ret, 0); + + LOG_INF("task init done"); + + /* Set pipeline period so components get correct dev->period and dev->frames. + * This mirrors what ipc4_create_pipeline() does in normal IPC flow. + */ + ctx->p->time_domain = SOF_TIME_DOMAIN_TIMER; + ctx->p->period = LL_TIMER_PERIOD_US; + + /* Register pipeline in IPC component list so comp_new_ipc4() can + * find it via ipc_get_comp_by_ppl_id() and set dev->period. + */ + ctx->ipc_pipe = rzalloc(SOF_MEM_FLAG_USER | SOF_MEM_FLAG_COHERENT, + sizeof(struct ipc_comp_dev)); + zassert_not_null(ctx->ipc_pipe, "ipc_comp_dev alloc failed"); + ctx->ipc_pipe->pipeline = ctx->p; + ctx->ipc_pipe->type = COMP_TYPE_PIPELINE; + ctx->ipc_pipe->id = pipeline_id; + ctx->ipc_pipe->core = 0; + list_item_append(&ctx->ipc_pipe->list, &ctx->ipc->comp_list); + + /* Step: Create host copier with HDA host output gateway */ + union ipc4_connector_node_id host_node_id = { .f = { + .dma_type = ipc4_hda_host_output_class, + .v_index = 0 + }}; + ctx->host_comp = create_copier(copier_module_id, host_instance_id, pipeline_id, + host_node_id); + zassert_not_null(ctx->host_comp, "host copier creation failed"); + + /* Assign pipeline to host component */ + ctx->host_comp->pipeline = ctx->p; + ctx->host_comp->ipc_config.type = SOF_COMP_HOST; + + LOG_INF("host copier created, comp_id = 0x%x", ctx->host_comp->ipc_config.id); + + /* Step: Create link copier with HDA link output gateway */ + union ipc4_connector_node_id link_node_id = { .f = { + .dma_type = ipc4_hda_link_output_class, + .v_index = 0 + }}; + ctx->dai_comp = create_copier(copier_module_id, dai_instance_id, pipeline_id, + link_node_id); + zassert_not_null(ctx->dai_comp, "DAI copier creation failed"); + + /* Assign pipeline to DAI component */ + ctx->dai_comp->pipeline = ctx->p; + ctx->dai_comp->ipc_config.type = SOF_COMP_DAI; + + LOG_INF("DAI copier created, comp_id = 0x%x", ctx->dai_comp->ipc_config.id); + + /* Step: Allocate a buffer to connect host -> DAI */ + ctx->buf = buffer_alloc(ctx->heap, 384, 0, 0, false); + zassert_not_null(ctx->buf, "buffer allocation failed"); + + if (run_in_user) { + struct k_thread *task_thread; + + /* Create a user-space thread to execute pipeline operations */ + k_thread_create(&ppl_user_thread, ppl_user_stack, PPL_USER_STACKSIZE, + pipeline_user_thread, ctx, NULL, NULL, + -1, K_USER, K_FOREVER); + + /* Add thread to LL memory domain so it can access pipeline memory */ + k_mem_domain_add_thread(zephyr_ll_mem_domain(), &ppl_user_thread); + + user_grant_dai_access_all(&ppl_user_thread); + user_grant_dma_access_all(&ppl_user_thread); + user_access_to_mailbox(zephyr_ll_mem_domain(), &ppl_user_thread); + zephyr_ll_grant_access(&ppl_user_thread); + + task_thread = scheduler_init_context(task); + zassert_not_null(task_thread); + + /* + * A hack for testing purposes, normally DAI module + * is created in user-space so it gets access + * automatically. Until that works, use dai_dd directly. + */ + struct dai_data *dai_dd = comp_get_drvdata(ctx->dai_comp); + struct k_mutex *dai_lock = dai_dd->dai->lock; + LOG_INF("dai_lock mutex %p", dai_lock); + k_thread_access_grant(task_thread, dai_lock); + k_thread_access_grant(&ppl_user_thread, dai_lock); + comp_grant_access_to_thread(ctx->dai_comp, task_thread); + comp_grant_access_to_thread(ctx->host_comp, task_thread); + + k_thread_start(&ppl_user_thread); + + LOG_INF("user thread started, waiting for completion"); + + k_thread_join(&ppl_user_thread, K_FOREVER); + } else { + /* Run pipeline operations directly in kernel context */ + pipeline_ops(ctx); + } + + /* Step: Clean up - reset, disconnect, free buffer, free components, free pipeline */ + /* Reset pipeline to bring components back to COMP_STATE_READY, + * required before ipc_comp_free() which rejects non-READY components. + */ + ret = pipeline_reset(ctx->p, ctx->host_comp); + zassert_equal(ret, 0, "pipeline reset failed"); + + pipeline_disconnect(ctx->host_comp, ctx->buf, PPL_CONN_DIR_COMP_TO_BUFFER); + pipeline_disconnect(ctx->dai_comp, ctx->buf, PPL_CONN_DIR_BUFFER_TO_COMP); + + buffer_free(ctx->buf); + + /* Free components through IPC to properly remove from IPC device list */ + ret = ipc_comp_free(ctx->ipc, ctx->host_comp->ipc_config.id); + zassert_equal(ret, 0, "host comp free failed"); + + ret = ipc_comp_free(ctx->ipc, ctx->dai_comp->ipc_config.id); + zassert_equal(ret, 0, "DAI comp free failed"); + + /* Unregister pipeline from IPC component list */ + list_item_del(&ctx->ipc_pipe->list); + rfree(ctx->ipc_pipe); + + ret = pipeline_free(ctx->p); + zassert_equal(ret, 0, "pipeline free failed"); + + scheduler_free_context(); + + ret = schedule_task_free(task); + zassert_equal(ret, 0); + + sof_heap_free(heap, ctx); + + if (run_in_user) { + zephyr_ll_task_free(task); + k_mem_domain_remove_partition(zephyr_ll_mem_domain(), &userspace_ll_part); + } else { + sof_heap_free(NULL, task); + } + + LOG_INF("two component pipeline test complete"); +} + +ZTEST(userspace_ll, pipeline_two_components_kernel) +{ + pipeline_two_components(false); +} + +ZTEST(userspace_ll, pipeline_two_components_user) +{ + pipeline_two_components(true); +} + ZTEST_SUITE(userspace_ll, NULL, NULL, NULL, NULL, NULL); /** From b7d0c0cc0568a33f1698ed5a0b59781ef4f637d6 Mon Sep 17 00:00:00 2001 From: Kai Vehmanen Date: Tue, 17 Mar 2026 20:21:06 +0200 Subject: [PATCH 47/54] (section: IPC user support START) From 2d0f777c78f61f3426c242e033f6da6858f326d9 Mon Sep 17 00:00:00 2001 From: Kai Vehmanen Date: Thu, 12 Mar 2026 16:33:15 +0200 Subject: [PATCH 48/54] WIP: ipc: implement user-space IPC handling Start a separate user thread to handle application IPC messages when SOF is built with CONFIG_SOF_USERSPACE_LL. This is a in-progress implementation that only handles one IPC message type and is thus not a full implementation. This does allow to proceed to test IPC user thread creation and the basic mechanism to handle messages. Signed-off-by: Kai Vehmanen --- src/include/sof/ipc/common.h | 35 +++++ src/ipc/ipc-common.c | 246 ++++++++++++++++++++++++++++++++--- src/ipc/ipc4/handler.c | 7 + src/ipc/ipc4/helper.c | 17 ++- zephyr/include/rtos/sof.h | 2 + 5 files changed, 285 insertions(+), 22 deletions(-) diff --git a/src/include/sof/ipc/common.h b/src/include/sof/ipc/common.h index e46fc10b9521..7f770ac904b4 100644 --- a/src/include/sof/ipc/common.h +++ b/src/include/sof/ipc/common.h @@ -53,6 +53,19 @@ extern struct tr_ctx ipc_tr; #define IPC_TASK_SECONDARY_CORE BIT(2) #define IPC_TASK_POWERDOWN BIT(3) +struct ipc_user { + struct k_thread *thread; + struct k_sem *sem; + struct k_event *event; + /** @brief Copy of IPC4 message primary word forwarded to user thread */ + uint32_t ipc_msg_pri; + /** @brief Copy of IPC4 message extension word forwarded to user thread */ + uint32_t ipc_msg_ext; + /** @brief Result code from user thread processing */ + int result; + struct ipc *ipc; +}; + struct ipc { struct k_spinlock lock; /* locking mechanism */ void *comp_data; @@ -74,6 +87,10 @@ struct ipc { struct task ipc_task; #endif +#ifdef CONFIG_SOF_USERSPACE_LL + struct ipc_user *ipc_user_pdata; +#endif + #ifdef CONFIG_SOF_TELEMETRY_IO_PERFORMANCE_MEASUREMENTS /* io performance measurement */ struct io_perf_data_item *io_perf_in_msg_count; @@ -95,6 +112,12 @@ struct ipc { extern struct task_ops ipc_task_ops; +#ifdef CONFIG_SOF_USERSPACE_LL + +struct ipc *ipc_get(void); + +#else + /** * \brief Get the IPC global context. * @return The global IPC context. @@ -104,6 +127,8 @@ static inline struct ipc *ipc_get(void) return sof_get()->ipc; } +#endif /* CONFIG_SOF_USERSPACE_LL */ + /** * \brief Initialise global IPC context. * @param[in,out] sof Global SOF context. @@ -250,4 +275,14 @@ void ipc_complete_cmd(struct ipc *ipc); /* GDB stub: should enter GDB after completing the IPC processing */ extern bool ipc_enter_gdb; +#ifdef CONFIG_SOF_USERSPACE_LL +struct ipc4_message_request; +/** + * @brief Forward an IPC4 command to the user-space thread. + * @param ipc4 Pointer to the IPC4 message request + * @return Result from user thread processing + */ +int ipc_user_forward_cmd(struct ipc4_message_request *ipc4); +#endif + #endif /* __SOF_DRIVERS_IPC_H__ */ diff --git a/src/ipc/ipc-common.c b/src/ipc/ipc-common.c index a62223dc97a2..1b8bdc0fe895 100644 --- a/src/ipc/ipc-common.c +++ b/src/ipc/ipc-common.c @@ -35,6 +35,16 @@ #include #include +#ifdef __ZEPHYR__ +#include +#endif + +#ifdef CONFIG_SOF_USERSPACE_LL +#include +#include +#include +#endif + #include LOG_MODULE_REGISTER(ipc, CONFIG_SOF_LOG_LEVEL); @@ -43,6 +53,18 @@ SOF_DEFINE_REG_UUID(ipc); DECLARE_TR_CTX(ipc_tr, SOF_UUID(ipc_uuid), LOG_LEVEL_INFO); +#ifdef CONFIG_SOF_USERSPACE_LL +K_APPMEM_PARTITION_DEFINE(ipc_context_part); + +K_APP_BMEM(ipc_context_part) static struct ipc ipc_context; + +struct ipc *ipc_get(void) +{ + return &ipc_context; +} +EXPORT_SYMBOL(ipc_get); +#endif + int ipc_process_on_core(uint32_t core, bool blocking) { struct ipc *ipc = ipc_get(); @@ -256,7 +278,11 @@ void ipc_msg_send(struct ipc_msg *msg, void *data, bool high_priority) list_item_append(&msg->list, &ipc->msg_list); } +#ifdef CONFIG_SOF_USERSPACE_LL + LOG_WRN("Skipping IPC worker schedule. TODO to fix\n"); +#else schedule_ipc_worker(); +#endif k_spin_unlock(&ipc->lock, key); } @@ -288,29 +314,207 @@ void ipc_schedule_process(struct ipc *ipc) #endif } +#ifdef CONFIG_SOF_USERSPACE_LL +/* User-space thread for pipeline_two_components test */ +#define IPC_USER_STACKSIZE 8192 + +#define IPC_USER_EVENT_CMD BIT(0) +#define IPC_USER_EVENT_STOP BIT(1) + +static struct k_thread ipc_user_thread; +static K_THREAD_STACK_DEFINE(ipc_user_stack, IPC_USER_STACKSIZE); + +/** + * @brief Forward an IPC4 command to the user-space thread. + * + * Called from kernel context (IPC EDF task) to forward the IPC4 + * message to the user-space thread for processing. Sets + * IPC_TASK_IN_THREAD in task_mask so the host is not signaled + * until the user thread completes. Blocks until the user thread + * finishes processing and returns the result. + * + * @param ipc4 Pointer to the IPC4 message request + * @return Result from user thread processing + */ +int ipc_user_forward_cmd(struct ipc4_message_request *ipc4) +{ + struct ipc *ipc = ipc_get(); + struct ipc_user *pdata = ipc->ipc_user_pdata; + k_spinlock_key_t key; + int ret; + + LOG_DBG("IPC: forward cmd %08x", ipc4->primary.dat); + + /* Copy message words — original buffer may be reused */ + pdata->ipc_msg_pri = ipc4->primary.dat; + pdata->ipc_msg_ext = ipc4->extension.dat; + pdata->ipc = ipc; + + /* Prevent host completion until user thread finishes */ + key = k_spin_lock(&ipc->lock); + ipc->task_mask |= IPC_TASK_IN_THREAD; + k_spin_unlock(&ipc->lock, key); + + /* Wake the user thread */ + k_event_set(pdata->event, IPC_USER_EVENT_CMD); + + /* Wait for user thread to complete */ + ret = k_sem_take(pdata->sem, K_MSEC(10)); + if (ret) { + LOG_ERR("IPC user: sem error %d\n", ret); + return ret; + } + + /* Clear the task mask bit and check for completion */ + key = k_spin_lock(&ipc->lock); + ipc->task_mask &= ~IPC_TASK_IN_THREAD; + ipc_complete_cmd(ipc); + k_spin_unlock(&ipc->lock, key); + + return pdata->result; +} + +/** + * User-space thread entry point for pipeline_two_components test. + * p1 points to the ppl_test_ctx shared with the kernel launcher. + */ +static void ipc_user_thread_fn(void *p1, void *p2, void *p3) +{ + struct ipc_user *ipc_user = p1; + + ARG_UNUSED(p2); + ARG_UNUSED(p3); + + __ASSERT(k_is_user_context(), "expected user context"); + + /* Signal startup complete — unblocks init waiting on semaphore */ + k_sem_give(ipc_user->sem); + LOG_INF("IPC user-space thread started"); + + for (;;) { + uint32_t mask = k_event_wait_safe(ipc_user->event, + IPC_USER_EVENT_CMD | IPC_USER_EVENT_STOP, + false, K_MSEC(5000)); + + LOG_DBG("IPC user wake, mask %u", mask); + + if (mask & IPC_USER_EVENT_CMD) { + struct ipc4_pipeline_create pipe_msg; + + /* Reconstruct the IPC4 message from copied words */ + pipe_msg.primary.dat = ipc_user->ipc_msg_pri; + pipe_msg.extension.dat = ipc_user->ipc_msg_ext; + + /* Execute pipeline creation in user context */ + ipc_user->result = ipc_pipeline_new(ipc_user->ipc, (ipc_pipe_new *)&pipe_msg); + + /* Signal completion — kernel side will finish IPC */ + k_sem_give(ipc_user->sem); + } + + if (mask & IPC_USER_EVENT_STOP) + break; + } +} + +__cold int ipc_user_init(void) +{ + struct ipc *ipc = ipc_get(); + struct ipc_user *ipc_user = sof_heap_alloc(zephyr_ll_user_heap(), SOF_MEM_FLAG_USER, + sizeof(*ipc_user), 0); + int ret; + + ipc_user->sem = k_object_alloc(K_OBJ_SEM); + if (!ipc_user->sem) { + LOG_ERR("user IPC sem alloc failed"); + k_panic(); + } + + ret = k_mem_domain_add_partition(zephyr_ll_mem_domain(), &ipc_context_part); + + k_sem_init(ipc_user->sem, 0, 1); + + /* Allocate kernel objects for the user-space thread */ + ipc_user->event = k_object_alloc(K_OBJ_EVENT); + if (!ipc_user->event) { + LOG_ERR("user IPC event alloc failed"); + k_panic(); + } + k_event_init(ipc_user->event); + + k_thread_create(&ipc_user_thread, ipc_user_stack, IPC_USER_STACKSIZE, + ipc_user_thread_fn, ipc_user, NULL, NULL, + -1, K_USER, K_FOREVER); + + k_thread_access_grant(&ipc_user_thread, ipc_user->sem, ipc_user->event); + user_grant_dai_access_all(&ipc_user_thread); + user_grant_dma_access_all(&ipc_user_thread); + user_access_to_mailbox(zephyr_ll_mem_domain(), &ipc_user_thread); + zephyr_ll_grant_access(&ipc_user_thread); + k_mem_domain_add_thread(zephyr_ll_mem_domain(), &ipc_user_thread); + + k_thread_name_set(&ipc_user_thread, __func__); + + /* Store references in ipc struct so kernel handler can forward commands */ + ipc->ipc_user_pdata = ipc_user; + + k_thread_start(&ipc_user_thread); + + /* Wait for user thread startup — consumes the initial k_sem_give from thread */ + k_sem_take(ipc->ipc_user_pdata->sem, K_FOREVER); + + return 0; +} +#else +static int ipc_user_init(void) +{ + return 0; +} +#endif /* CONFIG_SOF_USERSPACE_LL */ + __cold int ipc_init(struct sof *sof) { + struct k_heap *heap; + struct ipc *ipc; + assert_can_be_cold(); tr_dbg(&ipc_tr, "entry"); +#ifdef CONFIG_SOF_USERSPACE_LL + heap = zephyr_ll_user_heap(); + + ipc = ipc_get(); + memset(ipc, 0, sizeof(*ipc)); +#else + heap = NULL; + /* init ipc data */ - sof->ipc = rzalloc(SOF_MEM_FLAG_USER | SOF_MEM_FLAG_COHERENT, sizeof(*sof->ipc)); - if (!sof->ipc) { + sof->ipc = rzalloc(SOF_MEM_FLAG_USER | SOF_MEM_FLAG_COHERENT, sizeof(*ipc)); + if (!ipc) { tr_err(&ipc_tr, "Unable to allocate IPC data"); return -ENOMEM; } - sof->ipc->comp_data = rzalloc(SOF_MEM_FLAG_USER | SOF_MEM_FLAG_COHERENT, - SOF_IPC_MSG_MAX_SIZE); - if (!sof->ipc->comp_data) { + ipc = sof->ipc; +#endif + + /* works? yes */ + //return 0; + + printk("ipc %p\n", ipc); + + ipc->comp_data = sof_heap_alloc(heap, SOF_MEM_FLAG_USER | SOF_MEM_FLAG_COHERENT, + SOF_IPC_MSG_MAX_SIZE, 0); + if (!ipc->comp_data) { tr_err(&ipc_tr, "Unable to allocate IPC component data"); - rfree(sof->ipc); + sof_heap_free(heap, ipc); return -ENOMEM; } + memset(ipc->comp_data, 0, SOF_IPC_MSG_MAX_SIZE); - k_spinlock_init(&sof->ipc->lock); - list_init(&sof->ipc->msg_list); - list_init(&sof->ipc->comp_list); + k_spinlock_init(&ipc->lock); + list_init(&ipc->msg_list); + list_init(&ipc->comp_list); #ifdef CONFIG_SOF_TELEMETRY_IO_PERFORMANCE_MEASUREMENTS struct io_perf_data_item init_data = {IO_PERF_IPC_ID, @@ -319,20 +523,17 @@ __cold int ipc_init(struct sof *sof) IO_PERF_POWERED_UP_ENABLED, IO_PERF_D0IX_POWER_MODE, 0, 0, 0 }; - io_perf_monitor_init_data(&sof->ipc->io_perf_in_msg_count, &init_data); + io_perf_monitor_init_data(&ipc->io_perf_in_msg_count, &init_data); init_data.direction = IO_PERF_OUTPUT_DIRECTION; - io_perf_monitor_init_data(&sof->ipc->io_perf_out_msg_count, &init_data); + io_perf_monitor_init_data(&ipc->io_perf_out_msg_count, &init_data); #endif -#if CONFIG_SOF_BOOT_TEST_STANDALONE - LOG_INF("SOF_BOOT_TEST_STANDALONE, disabling IPC."); - return 0; -#endif #ifdef __ZEPHYR__ - struct k_thread *thread = &sof->ipc->ipc_send_wq.thread; + struct k_thread *thread = &ipc->ipc_send_wq.thread; - k_work_queue_start(&sof->ipc->ipc_send_wq, ipc_send_wq_stack, + k_work_queue_init(&ipc->ipc_send_wq); + k_work_queue_start(&ipc->ipc_send_wq, ipc_send_wq_stack, K_THREAD_STACK_SIZEOF(ipc_send_wq_stack), 1, NULL); k_thread_suspend(thread); @@ -342,10 +543,17 @@ __cold int ipc_init(struct sof *sof) k_thread_resume(thread); - k_work_init_delayable(&sof->ipc->z_delayed_work, ipc_work_handler); + k_work_init_delayable(&ipc->z_delayed_work, ipc_work_handler); +#endif + + ipc_user_init(); + +#if CONFIG_SOF_BOOT_TEST_STANDALONE + LOG_INF("SOF_BOOT_TEST_STANDALONE, skipping platform IPC init."); + return 0; #endif - return platform_ipc_init(sof->ipc); + return platform_ipc_init(ipc); } /* Locking: call with ipc->lock held and interrupts disabled */ diff --git a/src/ipc/ipc4/handler.c b/src/ipc/ipc4/handler.c index fc64c904ef80..39c972294740 100644 --- a/src/ipc/ipc4/handler.c +++ b/src/ipc/ipc4/handler.c @@ -127,6 +127,7 @@ static inline const struct ipc4_pipeline_set_state_data *ipc4_get_pipeline_data( /* * Global IPC Operations. */ +#ifndef CONFIG_SOF_USERSPACE_LL __cold static int ipc4_new_pipeline(struct ipc4_message_request *ipc4) { struct ipc *ipc = ipc_get(); @@ -135,6 +136,7 @@ __cold static int ipc4_new_pipeline(struct ipc4_message_request *ipc4) return ipc_pipeline_new(ipc, (ipc_pipe_new *)ipc4); } +#endif __cold static int ipc4_delete_pipeline(struct ipc4_message_request *ipc4) { @@ -805,7 +807,12 @@ static int ipc4_process_glb_message(struct ipc4_message_request *ipc4) /* pipeline settings */ case SOF_IPC4_GLB_CREATE_PIPELINE: + /* Implementation in progress: forward only CREATE_PIPELINE for now */ +#ifdef CONFIG_SOF_USERSPACE_LL + ret = ipc_user_forward_cmd(ipc4); +#else ret = ipc4_new_pipeline(ipc4); +#endif break; case SOF_IPC4_GLB_DELETE_PIPELINE: ret = ipc4_delete_pipeline(ipc4); diff --git a/src/ipc/ipc4/helper.c b/src/ipc/ipc4/helper.c index 3404906b9771..0bbc2567f1d6 100644 --- a/src/ipc/ipc4/helper.c +++ b/src/ipc/ipc4/helper.c @@ -340,9 +340,16 @@ __cold static int ipc4_create_pipeline(struct ipc4_pipeline_create *pipe_desc, struct ipc_comp_dev *ipc_pipe; struct pipeline *pipe; struct ipc *ipc = ipc_get(); + struct k_heap *heap = NULL; assert_can_be_cold(); +#ifdef CONFIG_SOF_USERSPACE_LL + heap = zephyr_ll_user_heap(); +#endif + + LOG_INF("pipe_desc %x, instance %u", pipe_desc, pipe_desc->primary.r.instance_id); + /* check whether pipeline id is already taken or in use */ ipc_pipe = ipc_get_pipeline_by_id(ipc, pipe_desc->primary.r.instance_id); if (ipc_pipe) { @@ -352,8 +359,9 @@ __cold static int ipc4_create_pipeline(struct ipc4_pipeline_create *pipe_desc, } /* create the pipeline */ - pipe = pipeline_new(NULL, pipe_desc->primary.r.instance_id, + pipe = pipeline_new(heap, pipe_desc->primary.r.instance_id, pipe_desc->primary.r.ppl_priority, 0, pparams); + LOG_INF("pipeline_new() -> %p", pipe); if (!pipe) { tr_err(&ipc_tr, "ipc: pipeline_new() failed"); return IPC4_OUT_OF_MEMORY; @@ -368,12 +376,13 @@ __cold static int ipc4_create_pipeline(struct ipc4_pipeline_create *pipe_desc, pipe->core = pipe_desc->extension.r.core_id; /* allocate the IPC pipeline container */ - ipc_pipe = rzalloc(SOF_MEM_FLAG_USER | SOF_MEM_FLAG_COHERENT, - sizeof(struct ipc_comp_dev)); + ipc_pipe = sof_heap_alloc(heap, SOF_MEM_FLAG_USER | SOF_MEM_FLAG_COHERENT, + sizeof(struct ipc_comp_dev), 0); if (!ipc_pipe) { pipeline_free(pipe); return IPC4_OUT_OF_MEMORY; } + memset(ipc_pipe, 0, sizeof(*ipc_pipe)); ipc_pipe->pipeline = pipe; ipc_pipe->type = COMP_TYPE_PIPELINE; @@ -384,6 +393,8 @@ __cold static int ipc4_create_pipeline(struct ipc4_pipeline_create *pipe_desc, /* add new pipeline to the list */ list_item_append(&ipc_pipe->list, &ipc->comp_list); + LOG_INF("success"); + return IPC4_SUCCESS; } diff --git a/zephyr/include/rtos/sof.h b/zephyr/include/rtos/sof.h index 1de60c5cd6ff..2c55d08a9d80 100644 --- a/zephyr/include/rtos/sof.h +++ b/zephyr/include/rtos/sof.h @@ -46,8 +46,10 @@ struct sof { int argc; char **argv; +#ifndef CONFIG_SOF_USERSPACE_LL /* ipc */ struct ipc *ipc; +#endif /* system agent */ struct sa *sa; From 71fbedd9c8e0e65ccf8df4203aa87bba82d6482c Mon Sep 17 00:00:00 2001 From: Kai Vehmanen Date: Tue, 17 Mar 2026 19:47:17 +0200 Subject: [PATCH 49/54] WIP: audio: pipeline: disable pipeline position registers Disable the code to set up pipeline position registers using the sof_get() singleton instance. This design is not feasible when pipelnes are running in user-space and an alternative implementation is needed. This is not required in initial LL user testing as position reporting via pipeline register is not used in all configurations. Signed-off-by: Kai Vehmanen --- src/audio/pipeline/pipeline-graph.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/src/audio/pipeline/pipeline-graph.c b/src/audio/pipeline/pipeline-graph.c index 39125b6346e3..24bde434b484 100644 --- a/src/audio/pipeline/pipeline-graph.c +++ b/src/audio/pipeline/pipeline-graph.c @@ -138,19 +138,28 @@ struct pipeline *pipeline_new(struct k_heap *heap, uint32_t pipeline_id, uint32_ p->pipeline_id = pipeline_id; p->status = COMP_STATE_INIT; p->trigger.cmd = COMP_TRIGGER_NO_ACTION; + +#ifdef CONFIG_SOF_USERSPACE_LL + LOG_WRN("pipeline trace settings cannot be copied"); +#else ret = memcpy_s(&p->tctx, sizeof(struct tr_ctx), &pipe_tr, sizeof(struct tr_ctx)); if (ret < 0) { pipe_err(p, "failed to copy trace settings"); goto free; } +#endif +#ifdef CONFIG_SOF_USERSPACE_LL + LOG_WRN("pipeline position reporting not available"); +#else ret = pipeline_posn_offset_get(&p->posn_offset); if (ret < 0) { pipe_err(p, "pipeline_posn_offset_get failed %d", ret); goto free; } +#endif /* just for retrieving valid ipc_msg header */ ipc_build_stream_posn(&posn, SOF_IPC_STREAM_TRIG_XRUN, p->comp_id); From e8fd63107a36d2b27bb1d2016461d104fe757e3f Mon Sep 17 00:00:00 2001 From: Kai Vehmanen Date: Fri, 13 Mar 2026 15:56:40 +0200 Subject: [PATCH 50/54] zephyr: test: userspace: add ipc4_create_pipeline_check Add a test that sends SOF_IPC4_GLB_CREATE_PIPELINE message via ipc_cmd() interface. This test can be used to test SOF when built with CONFIG_SOF_USERSPACE_LL. Handling of audio pipeline IPC messages (like CREATE_PIPELINE) will be routed to a user-space IPC thread. Signed-off-by: Kai Vehmanen --- zephyr/test/userspace/test_ll_task.c | 78 ++++++++++++++++++++++++++++ 1 file changed, 78 insertions(+) diff --git a/zephyr/test/userspace/test_ll_task.c b/zephyr/test/userspace/test_ll_task.c index 5a4f5e058131..4bef2a953f61 100644 --- a/zephyr/test/userspace/test_ll_task.c +++ b/zephyr/test/userspace/test_ll_task.c @@ -24,6 +24,7 @@ #include #include #include +#include #include #include #include @@ -148,6 +149,83 @@ ZTEST(userspace_ll, pipeline_check) pipeline_check(); } +/** + * Test creating a pipeline via IPC4 GLB_CREATE_PIPELINE message. + * + * Unlike pipeline_check() which calls pipeline_new() directly, + * this test constructs an ipc4_pipeline_create message and sends + * it through ipc_cmd(), exercising the full IPC4 command dispatch + * path: ipc_cmd() -> ipc4_process_glb_message() -> + * ipc_user_forward_cmd() (userspace) or ipc4_new_pipeline(). + */ +static void ipc4_create_pipeline_check(void) +{ + struct ipc4_pipeline_create pipe_desc = {0}; + struct ipc *ipc = ipc_get(); + struct ipc_cmd_hdr *hdr; + struct ipc_comp_dev *ipc_pipe; + uint32_t pipeline_id = 10; + uint32_t priority = 3; + int ret; + + /* Construct IPC4 CREATE_PIPELINE message */ + pipe_desc.primary.r.type = SOF_IPC4_GLB_CREATE_PIPELINE; + pipe_desc.primary.r.msg_tgt = SOF_IPC4_MESSAGE_TARGET_FW_GEN_MSG; + pipe_desc.primary.r.rsp = SOF_IPC4_MESSAGE_DIR_MSG_REQUEST; + pipe_desc.primary.r.instance_id = pipeline_id; + pipe_desc.primary.r.ppl_priority = priority; + pipe_desc.primary.r.ppl_mem_size = 0; + + pipe_desc.extension.r.core_id = 0; + pipe_desc.extension.r.lp = 0; + pipe_desc.extension.r.payload = 0; + + /* + * Populate handler.c's internal IPC message buffer. + * ipc_compact_read_msg() returns a pointer to the static + * msg_data.msg_in used by ipc_cmd() via ipc4_get_message_request(). + * Overwriting through this pointer sets up the message for dispatch. + */ + hdr = ipc_compact_read_msg(); + hdr->pri = pipe_desc.primary.dat; + hdr->ext = pipe_desc.extension.dat; + + /* Send through the full IPC command dispatch path */ + ipc_cmd(hdr); + + LOG_INF("ipc_cmd() returned for pipeline id=%u", pipeline_id); + + /* Verify pipeline is registered in IPC component list */ + ipc_pipe = ipc_get_pipeline_by_id(ipc, pipeline_id); + zassert_not_null(ipc_pipe, "pipeline not found in IPC comp list"); + zassert_equal(ipc_pipe->type, COMP_TYPE_PIPELINE, "wrong comp type"); + zassert_equal(ipc_pipe->id, pipeline_id, "pipeline id mismatch"); + zassert_not_null(ipc_pipe->pipeline, "pipeline struct is NULL"); + zassert_equal(ipc_pipe->pipeline->pipeline_id, pipeline_id, + "pipeline->pipeline_id mismatch"); + zassert_equal(ipc_pipe->pipeline->priority, priority, + "pipeline priority mismatch"); + zassert_equal(ipc_pipe->pipeline->time_domain, SOF_TIME_DOMAIN_TIMER, + "time_domain not set"); + + LOG_INF("pipeline verified in IPC comp list"); + + /* Clean up through IPC free path */ + ret = ipc_pipeline_free(ipc, pipeline_id); + zassert_equal(ret, 0, "ipc_pipeline_free failed: %%d", ret); + + /* Verify pipeline is removed from IPC component list */ + ipc_pipe = ipc_get_pipeline_by_id(ipc, pipeline_id); + zassert_is_null(ipc_pipe, "pipeline still in IPC comp list after free"); + + LOG_INF("ipc4 create pipeline test complete"); +} + +ZTEST(userspace_ll, ipc4_create_pipeline_check) +{ + ipc4_create_pipeline_check(); +} + /* Copier UUID: 9ba00c83-ca12-4a83-943c-1fa2e82f9dda */ static const uint8_t copier_uuid[16] = { 0x83, 0x0c, 0xa0, 0x9b, 0x12, 0xca, 0x83, 0x4a, From 945b06b7cc5cfb1e95b238d3e15d90a62efd79bb Mon Sep 17 00:00:00 2001 From: Kai Vehmanen Date: Thu, 19 Feb 2026 16:25:31 +0200 Subject: [PATCH 51/54] (section WIP mandatory changes START) From 2e65960c4aa8054090b4d76fd9f0cc259581ca12 Mon Sep 17 00:00:00 2001 From: Kai Vehmanen Date: Tue, 17 Feb 2026 18:40:27 +0200 Subject: [PATCH 52/54] WIP: audio: comp_buffer: do not free the module heap in free() The logic in comp_buffer_free() to check the module heap reference count, and if it goes to zero, free the heap itself - this does not belong in the buffer free method. TODO: needs cleanup before submission. Signed-off-by: Kai Vehmanen --- src/audio/buffers/comp_buffer.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/audio/buffers/comp_buffer.c b/src/audio/buffers/comp_buffer.c index 56c9ec56f6f4..328b4ba571f6 100644 --- a/src/audio/buffers/comp_buffer.c +++ b/src/audio/buffers/comp_buffer.c @@ -166,12 +166,18 @@ static void comp_buffer_free(struct sof_audio_buffer *audio_buffer) sof_heap_free(heap, buffer->stream.addr); sof_heap_free(heap, buffer); +#ifndef CONFIG_SOF_USERSPACE_LL + /* + * TODO: this is not the correct place to free up + * mod_heap_user + */ if (heap) { struct dp_heap_user *mod_heap_user = container_of(heap, struct dp_heap_user, heap); if (!--mod_heap_user->client_count) rfree(mod_heap_user); } +#endif } APP_TASK_DATA static const struct source_ops comp_buffer_source_ops = { From 48d8ea90df75ccd3af7010e300c1386424939235 Mon Sep 17 00:00:00 2001 From: Kai Vehmanen Date: Thu, 26 Feb 2026 17:14:39 +0200 Subject: [PATCH 53/54] HACK: audio: collection of feature limitations to run LL in user This is a set of temporary changes to audio code to remove calls to privileged interfaces that are not mandatory to run simple audio tests. These need proper solutions to be able to run all use-cases in user LL version. Signed-off-by: Kai Vehmanen --- src/audio/pipeline/pipeline-graph.c | 6 +++++- src/audio/pipeline/pipeline-schedule.c | 4 ++++ zephyr/include/sof/lib/cpu.h | 4 ++++ 3 files changed, 13 insertions(+), 1 deletion(-) diff --git a/src/audio/pipeline/pipeline-graph.c b/src/audio/pipeline/pipeline-graph.c index 24bde434b484..1a2f5bf3f833 100644 --- a/src/audio/pipeline/pipeline-graph.c +++ b/src/audio/pipeline/pipeline-graph.c @@ -506,6 +506,10 @@ struct comp_dev *pipeline_get_dai_comp(uint32_t pipeline_id, int dir) */ struct comp_dev *pipeline_get_dai_comp_latency(uint32_t pipeline_id, uint32_t *latency) { +#ifdef CONFIG_SOF_USERSPACE_LL + LOG_WRN("latency cannot be computed in user-space pipelines!"); + *latency = 0; +#else struct ipc_comp_dev *ipc_sink; struct ipc_comp_dev *ipc_source; struct comp_dev *source; @@ -573,7 +577,7 @@ struct comp_dev *pipeline_get_dai_comp_latency(uint32_t pipeline_id, uint32_t *l /* Get a next sink component */ ipc_sink = ipc_get_ppl_sink_comp(ipc, source->pipeline->pipeline_id); } - +#endif return NULL; } EXPORT_SYMBOL(pipeline_get_dai_comp_latency); diff --git a/src/audio/pipeline/pipeline-schedule.c b/src/audio/pipeline/pipeline-schedule.c index 45fd1eed639c..76b509e2c648 100644 --- a/src/audio/pipeline/pipeline-schedule.c +++ b/src/audio/pipeline/pipeline-schedule.c @@ -154,7 +154,11 @@ static enum task_state pipeline_task_cmd(struct pipeline *p, p->trigger.cmd = COMP_TRIGGER_NO_ACTION; +#ifndef CONFIG_SOF_USERSPACE_LL ipc_msg_reply(reply); +#else + LOG_WRN("IPC reply from pipeline thread missing in user-space."); +#endif return err; } diff --git a/zephyr/include/sof/lib/cpu.h b/zephyr/include/sof/lib/cpu.h index c23405e85121..533cb29f3602 100644 --- a/zephyr/include/sof/lib/cpu.h +++ b/zephyr/include/sof/lib/cpu.h @@ -55,7 +55,11 @@ static inline bool cpu_is_primary(int id) static inline bool cpu_is_me(int id) { +#ifdef CONFIG_SOF_USERSPACE_LL + return true; +#else return id == cpu_get_id(); +#endif } int cpu_enable_core(int id); From 7317b18a86e7a5d023ccaf56f47c6118b368017c Mon Sep 17 00:00:00 2001 From: Kai Vehmanen Date: Thu, 26 Feb 2026 17:39:22 +0200 Subject: [PATCH 54/54] WIP: audio: pipeline: do not disable irqs if schedule in user-space pipeline_schedule_triggered() cannot use irq_local_disable() and irq_local_enable() when running in user-space. Comment out this for now as, but this needs a better solution for multicore scenarios. Signed-off-by: Kai Vehmanen --- src/audio/pipeline/pipeline-schedule.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/audio/pipeline/pipeline-schedule.c b/src/audio/pipeline/pipeline-schedule.c index 76b509e2c648..0e9a721f8180 100644 --- a/src/audio/pipeline/pipeline-schedule.c +++ b/src/audio/pipeline/pipeline-schedule.c @@ -286,6 +286,8 @@ void pipeline_schedule_triggered(struct pipeline_walk_context *ctx, struct pipeline_data *ppl_data = ctx->comp_data; struct list_item *tlist; struct pipeline *p; + +#ifndef CONFIG_SOF_USERSPACE_LL uint32_t flags; /* @@ -294,6 +296,7 @@ void pipeline_schedule_triggered(struct pipeline_walk_context *ctx, * immediately before all pipelines achieved a consistent state. */ irq_local_disable(flags); +#endif switch (cmd) { case COMP_TRIGGER_PAUSE: @@ -349,8 +352,9 @@ void pipeline_schedule_triggered(struct pipeline_walk_context *ctx, p->xrun_bytes = 1; } } - +#ifndef CONFIG_SOF_USERSPACE_LL irq_local_enable(flags); +#endif } int pipeline_comp_ll_task_init(struct pipeline *p)