Browse Source

tests: ipc_service: Test restarting session in different scenarios

This commit adds a test that checks if disconnecting
and restarting the IPC session works correctly. The
test is also focused on the "unbound" callback.

Signed-off-by: Dominik Kilian <Dominik.Kilian@nordicsemi.no>
Co-authored-by: Radoslaw Koppel <radoslaw.koppel@nordicsemi.no>
pull/85789/head
Dominik Kilian 6 months ago committed by Benjamin Cabé
parent
commit
0e4efdb2a6
  1. 2
      scripts/ci/check_compliance.py
  2. 16
      tests/subsys/ipc/ipc_sessions/CMakeLists.txt
  3. 37
      tests/subsys/ipc/ipc_sessions/Kconfig
  4. 13
      tests/subsys/ipc/ipc_sessions/Kconfig.sysbuild
  5. 7
      tests/subsys/ipc/ipc_sessions/boards/nrf5340dk_nrf5340_cpuapp.conf
  6. 37
      tests/subsys/ipc/ipc_sessions/boards/nrf5340dk_nrf5340_cpuapp.overlay
  7. 12
      tests/subsys/ipc/ipc_sessions/boards/nrf54h20dk_nrf54h20_cpuapp.overlay
  8. 26
      tests/subsys/ipc/ipc_sessions/boards/nrf54h20dk_nrf54h20_cpuapp_cpuppr.overlay
  9. 81
      tests/subsys/ipc/ipc_sessions/common/test_commands.h
  10. 29
      tests/subsys/ipc/ipc_sessions/interoperability/Kconfig
  11. 90
      tests/subsys/ipc/ipc_sessions/interoperability/Kconfig.icmsg_v1
  12. 392
      tests/subsys/ipc/ipc_sessions/interoperability/icmsg_v1.c
  13. 168
      tests/subsys/ipc/ipc_sessions/interoperability/icmsg_v1.h
  14. 86
      tests/subsys/ipc/ipc_sessions/interoperability/ipc_icmsg_v1.c
  15. 5
      tests/subsys/ipc/ipc_sessions/interoperability/ipc_icmsg_v1.h
  16. 255
      tests/subsys/ipc/ipc_sessions/interoperability/pbuf_v1.c
  17. 234
      tests/subsys/ipc/ipc_sessions/interoperability/pbuf_v1.h
  18. 10
      tests/subsys/ipc/ipc_sessions/prj.conf
  19. 19
      tests/subsys/ipc/ipc_sessions/remote/CMakeLists.txt
  20. 11
      tests/subsys/ipc/ipc_sessions/remote/Kconfig
  21. 36
      tests/subsys/ipc/ipc_sessions/remote/boards/nrf5340dk_nrf5340_cpunet.overlay
  22. 1
      tests/subsys/ipc/ipc_sessions/remote/boards/nrf54h20dk_nrf54h20_cpuppr.conf
  23. 31
      tests/subsys/ipc/ipc_sessions/remote/boards/nrf54h20dk_nrf54h20_cpuppr.overlay
  24. 15
      tests/subsys/ipc/ipc_sessions/remote/boards/nrf54h20dk_nrf54h20_cpurad.overlay
  25. 23
      tests/subsys/ipc/ipc_sessions/remote/prj.conf
  26. 452
      tests/subsys/ipc/ipc_sessions/remote/src/remote.c
  27. 65
      tests/subsys/ipc/ipc_sessions/src/data_queue.c
  28. 25
      tests/subsys/ipc/ipc_sessions/src/data_queue.h
  29. 469
      tests/subsys/ipc/ipc_sessions/src/main.c
  30. 26
      tests/subsys/ipc/ipc_sessions/sysbuild.cmake
  31. 4
      tests/subsys/ipc/ipc_sessions/sysbuild_cpuppr.conf
  32. 50
      tests/subsys/ipc/ipc_sessions/testcase.yaml
  33. 12
      tests/subsys/ipc/pbuf/src/main.c

2
scripts/ci/check_compliance.py

@ -1059,6 +1059,8 @@ flagged. @@ -1059,6 +1059,8 @@ flagged.
"FOO_SETTING_2",
"HEAP_MEM_POOL_ADD_SIZE_", # Used as an option matching prefix
"HUGETLBFS", # Linux, in boards/xtensa/intel_adsp_cavs25/doc
"IPC_SERVICE_ICMSG_BOND_NOTIFY_REPEAT_TO_MS", # Used in ICMsg tests for intercompatibility
# with older versions of the ICMsg.
"LIBGCC_RTLIB",
"LLVM_USE_LD", # Both LLVM_USE_* are in cmake/toolchain/llvm/Kconfig
"LLVM_USE_LLD", # which are only included if LLVM is selected but

16
tests/subsys/ipc/ipc_sessions/CMakeLists.txt

@ -0,0 +1,16 @@ @@ -0,0 +1,16 @@
# Copyright 2021 Google LLC
# SPDX-License-Identifier: Apache-2.0
cmake_minimum_required(VERSION 3.20.0)
find_package(Zephyr REQUIRED HINTS $ENV{ZEPHYR_BASE})
project(ipc_service)
zephyr_include_directories(./common)
FILE(GLOB app_sources src/*.c)
target_sources(app PRIVATE ${app_sources})
zephyr_sources_ifdef(CONFIG_IPC_SERVICE_ICMSG_V1 interoperability/icmsg_v1.c)
zephyr_sources_ifdef(CONFIG_PBUF_V1 interoperability/pbuf_v1.c)
zephyr_sources_ifdef(CONFIG_IPC_SERVICE_BACKEND_ICMSG_V1 interoperability/ipc_icmsg_v1.c)

37
tests/subsys/ipc/ipc_sessions/Kconfig

@ -0,0 +1,37 @@ @@ -0,0 +1,37 @@
#
# Copyright (c) 2024 Nordic Semiconductor ASA
#
# SPDX-License-Identifier: Apache-2.0
#
rsource "interoperability/Kconfig"
menu "Zephyr"
source "Kconfig.zephyr"
endmenu
config IPC_TEST_MSG_HEAP_SIZE
int "The heap to copy processed messages"
default 512
help
Internal heap where all the message data would be copied to be processed
linearry in tests.
config IPC_TEST_SKIP_CORE_RESET
bool "Skip the tests that includes core resetting"
help
Some of the cores cannot be safely restarted.
Skip the tests that require it in such a cases.
config IPC_TEST_BLOCK_SIZE
int "Block size for multiple transfers test"
default 32
config IPC_TEST_BLOCK_CNT
int "Number of blocks for multiple transfers test"
default 8000
config IPC_TEST_SKIP_UNBOUND
bool "Skip unbound tests"
help
Whether to skip tests that requires unbound callback functionality.

13
tests/subsys/ipc/ipc_sessions/Kconfig.sysbuild

@ -0,0 +1,13 @@ @@ -0,0 +1,13 @@
#
# Copyright (c) 2024 Nordic Semiconductor ASA
#
# SPDX-License-Identifier: Apache-2.0
#
source "${ZEPHYR_BASE}/share/sysbuild/Kconfig"
config REMOTE_BOARD
string "The board used for remote target"
default "nrf5340dk/nrf5340/cpunet" if BOARD_NRF5340DK_NRF5340_CPUAPP
default "nrf5340dk/nrf5340/cpunet" if BOARD_NRF5340DK_NRF5340_CPUAPP_NS
default "nrf54h20dk/nrf54h20/cpurad" if BOARD_NRF54H20DK_NRF54H20_CPUAPP

7
tests/subsys/ipc/ipc_sessions/boards/nrf5340dk_nrf5340_cpuapp.conf

@ -0,0 +1,7 @@ @@ -0,0 +1,7 @@
#
# Copyright (c) 2024 Nordic Semiconductor ASA
#
# SPDX-License-Identifier: Apache-2.0
#
CONFIG_SOC_NRF53_CPUNET_ENABLE=y

37
tests/subsys/ipc/ipc_sessions/boards/nrf5340dk_nrf5340_cpuapp.overlay

@ -0,0 +1,37 @@ @@ -0,0 +1,37 @@
/*
* Copyright (c) 2022 Nordic Semiconductor ASA
*
* SPDX-License-Identifier: Apache-2.0
*/
/delete-node/ &ipc0;
/ {
chosen {
/delete-property/ zephyr,ipc_shm;
/delete-property/ zephyr,bt-hci;
};
reserved-memory {
/delete-node/ memory@20070000;
sram_tx: memory@20070000 {
reg = <0x20070000 0x8000>;
};
sram_rx: memory@20078000 {
reg = <0x20078000 0x8000>;
};
};
ipc0: ipc0 {
compatible = "zephyr,ipc-icmsg";
tx-region = <&sram_tx>;
rx-region = <&sram_rx>;
mboxes = <&mbox 0>, <&mbox 1>;
mbox-names = "tx", "rx";
dcache-alignment = <8>;
unbound = "detect";
status = "okay";
};
};

12
tests/subsys/ipc/ipc_sessions/boards/nrf54h20dk_nrf54h20_cpuapp.overlay

@ -0,0 +1,12 @@ @@ -0,0 +1,12 @@
/*
* Copyright (c) 2024 Nordic Semiconductor ASA
* SPDX-License-Identifier: Apache-2.0
*/
/* Replace default ipc0 instance */
&ipc0 {
compatible = "zephyr,ipc-icmsg";
/delete-property/ tx-blocks;
/delete-property/ rx-blocks;
unbound = "enable";
};

26
tests/subsys/ipc/ipc_sessions/boards/nrf54h20dk_nrf54h20_cpuapp_cpuppr.overlay

@ -0,0 +1,26 @@ @@ -0,0 +1,26 @@
/*
* Copyright (c) 2024 Nordic Semiconductor ASA
* SPDX-License-Identifier: Apache-2.0
*/
/* Replace default ipc0 instance */
/delete-node/ &ipc0;
ipc0: &cpuapp_cpuppr_ipc {
status = "okay";
unbound = "detect";
};
&cpuppr_vevif {
status = "okay";
};
&cpuapp_bellboard {
status = "okay";
};
/ {
chosen {
/delete-property/ zephyr,bt-hci;
};
};

81
tests/subsys/ipc/ipc_sessions/common/test_commands.h

@ -0,0 +1,81 @@ @@ -0,0 +1,81 @@
/*
* Copyright (c) 2024 Nordic Semiconductor ASA
*
* SPDX-License-Identifier: Apache-2.0
*/
#ifndef TEST_COMMANDS_H
#include <stdint.h>
/**
* @brief Test commands executable by remote
*/
enum ipc_test_commands {
IPC_TEST_CMD_NONE, /**< Command to be ingored */
IPC_TEST_CMD_PING, /**< Respond with the @ref IPC_TEST_CMD_PONG message */
IPC_TEST_CMD_PONG, /**< Expected response to IPC_TEST_CMD_PING */
IPC_TEST_CMD_ECHO, /**< Respond with the same data */
IPC_TEST_CMD_ECHO_RSP, /**< Echo respond */
IPC_TEST_CMD_REBOND, /**< Unbond and rebond back whole interface */
IPC_TEST_CMD_REBOOT, /**< Restart remote CPU after a given delay */
/* Commands used for data transfer test */
IPC_TEST_CMD_RXSTART, /**< Start receiving data */
IPC_TEST_CMD_TXSTART, /**< Start sending data */
IPC_TEST_CMD_RXGET, /**< Get rx status */
IPC_TEST_CMD_TXGET, /**< Get tx status */
IPC_TEST_CMD_XSTAT, /**< rx/tx status response */
IPC_TEST_CMD_XDATA, /**< Transfer data block */
/* End of commands used for data transfer test */
};
/**
* @brief Base command structure
*/
struct ipc_test_cmd {
uint32_t cmd; /**< The command of @ref ipc_test_command type */
uint8_t data[]; /**< Command data depending on the command itself */
};
/**
* @brief Rebond command structure
*/
struct ipc_test_cmd_rebond {
struct ipc_test_cmd base;
uint32_t timeout_ms;
};
/**
* @brief Reboot command structure
*/
struct ipc_test_cmd_reboot {
struct ipc_test_cmd base;
uint32_t timeout_ms;
};
/**
* @brief Start the rx or tx transfer
*/
struct ipc_test_cmd_xstart {
struct ipc_test_cmd base;
uint32_t blk_size;
uint32_t blk_cnt;
uint32_t seed;
};
/**
* @brief Get the status of rx or tx transfer
*/
struct ipc_test_cmd_xstat {
struct ipc_test_cmd base;
uint32_t blk_cnt; /**< Transfers left */
int32_t result; /**< Current result */
};
/**
* @brief The result of rx or tx transfer
*/
struct ipc_test_cmd_xrsp {
struct ipc_test_cmd base;
int32_t result;
};
#endif /* TEST_COMMANDS_H */

29
tests/subsys/ipc/ipc_sessions/interoperability/Kconfig

@ -0,0 +1,29 @@ @@ -0,0 +1,29 @@
#
# Copyright (c) 2024 Nordic Semiconductor ASA
#
# SPDX-License-Identifier: Apache-2.0
#
config IPC_SERVICE_BACKEND_ICMSG
default n if IPC_SERVICE_BACKEND_ICMSG_V1
config IPC_SERVICE_ICMSG
default n if IPC_SERVICE_ICMSG_V1
config IPC_SERVICE_BACKEND_ICMSG_V1
bool "ICMSG backend with SPSC packet buffer (old implementation)"
depends on MBOX
select IPC_SERVICE_ICMSG_V1
help
Chosing this backend results in single endpoint implementation based
on circular packet buffer.
menuconfig IPC_SERVICE_ICMSG_V1
bool "icmsg IPC library (old implementation)"
select PBUF_V1
help
Icmsg library
if IPC_SERVICE_ICMSG_V1
rsource "Kconfig.icmsg_v1"
endif

90
tests/subsys/ipc/ipc_sessions/interoperability/Kconfig.icmsg_v1

@ -0,0 +1,90 @@ @@ -0,0 +1,90 @@
# Copyright (c) 2022 Nordic Semiconductor (ASA)
# SPDX-License-Identifier: Apache-2.0
config IPC_SERVICE_ICMSG_SHMEM_ACCESS_SYNC_V1
bool "Synchronize access to shared memory"
depends on MULTITHREADING
default y
help
Provide synchronization access to shared memory at a library level.
This option is enabled by default to allow to use sending API from
multiple contexts. Mutex is used to guard access to the memory.
This option can be safely disabled if an application ensures data
are sent from single context.
config IPC_SERVICE_ICMSG_SHMEM_ACCESS_TO_MS_V1
int "Mutex lock timeout in milliseconds"
depends on IPC_SERVICE_ICMSG_SHMEM_ACCESS_SYNC_V1
range 1 5
default 1
help
Maximum time to wait, in milliseconds, for access to send data with
backends basing on icmsg library. This time should be relatively low.
config IPC_SERVICE_ICMSG_BOND_NOTIFY_REPEAT_TO_MS_V1
int "Bond notification timeout in miliseconds"
range 1 100
default 1
help
Time to wait for remote bonding notification before the
notification is repeated.
config IPC_SERVICE_BACKEND_ICMSG_WQ_ENABLE_V1
bool "Use dedicated workqueue"
depends on MULTITHREADING
default y
help
Enable dedicated workqueue thread for the ICMsg backend.
Disabling this configuration will cause the ICMsg backend to
process incoming data through the system workqueue context, and
therefore reduces the RAM footprint of the backend.
Disabling this config may result in deadlocks in certain usage
scenarios, such as when synchronous IPC is executed from the system
workqueue context.
The callbacks coming from the backend are executed from the workqueue
context.
When the option is disabled, the user must obey the restrictions
imposed by the system workqueue, such as never performing blocking
operations from within the callback.
if IPC_SERVICE_BACKEND_ICMSG_WQ_ENABLE_V1
config IPC_SERVICE_BACKEND_ICMSG_WQ_STACK_SIZE_V1
int "Size of RX work queue stack"
default 1280
help
Size of stack used by work queue RX thread. This work queue is
created to prevent notifying service users about received data
from the system work queue. The queue is shared among instances.
config IPC_SERVICE_BACKEND_ICMSG_WQ_PRIORITY_V1
int "Priority of RX work queue thread"
default -1
range -256 -1
help
Priority of the ICMSG RX work queue thread.
The ICMSG library in its simplicity requires the workqueue to execute
at a cooperative priority.
endif
# The Icmsg library in its simplicity requires the system workqueue to execute
# at a cooperative priority.
config SYSTEM_WORKQUEUE_PRIORITY
range -256 -1 if !IPC_SERVICE_BACKEND_ICMSG_WQ_ENABLE_V1
config PBUF_V1
bool "Packed buffer support library (old implementation)"
help
The packet buffer implements lightweight unidirectional packet buffer
with read/write semantics on top of a memory region shared by the
reader and writer. It optionally embeds cache and memory barrier
management to ensure correct data access.
if PBUF_V1
config PBUF_RX_READ_BUF_SIZE_V1
int "Size of PBUF read buffer in bytes"
default 128
endif # PBUF

392
tests/subsys/ipc/ipc_sessions/interoperability/icmsg_v1.c

@ -0,0 +1,392 @@ @@ -0,0 +1,392 @@
/*
* Copyright (c) 2022 Nordic Semiconductor ASA
*
* SPDX-License-Identifier: Apache-2.0
*/
#include "icmsg_v1.h"
#include <string.h>
#include <zephyr/drivers/mbox.h>
#include <zephyr/sys/atomic.h>
#include "pbuf_v1.h"
#include <zephyr/init.h>
#define BOND_NOTIFY_REPEAT_TO K_MSEC(CONFIG_IPC_SERVICE_ICMSG_BOND_NOTIFY_REPEAT_TO_MS)
#define SHMEM_ACCESS_TO K_MSEC(CONFIG_IPC_SERVICE_ICMSG_SHMEM_ACCESS_TO_MS)
static const uint8_t magic[] = {0x45, 0x6d, 0x31, 0x6c, 0x31, 0x4b,
0x30, 0x72, 0x6e, 0x33, 0x6c, 0x69, 0x34};
#ifdef CONFIG_MULTITHREADING
#if defined(CONFIG_IPC_SERVICE_BACKEND_ICMSG_WQ_ENABLE)
static K_THREAD_STACK_DEFINE(icmsg_stack, CONFIG_IPC_SERVICE_BACKEND_ICMSG_WQ_STACK_SIZE);
static struct k_work_q icmsg_workq;
static struct k_work_q *const workq = &icmsg_workq;
#else
static struct k_work_q *const workq = &k_sys_work_q;
#endif
static void mbox_callback_process(struct k_work *item);
#else
static void mbox_callback_process(struct icmsg_data_t *dev_data);
#endif
static int mbox_deinit(const struct icmsg_config_t *conf,
struct icmsg_data_t *dev_data)
{
int err;
err = mbox_set_enabled_dt(&conf->mbox_rx, 0);
if (err != 0) {
return err;
}
err = mbox_register_callback_dt(&conf->mbox_rx, NULL, NULL);
if (err != 0) {
return err;
}
#ifdef CONFIG_MULTITHREADING
(void)k_work_cancel(&dev_data->mbox_work);
(void)k_work_cancel_delayable(&dev_data->notify_work);
#endif
return 0;
}
static bool is_endpoint_ready(struct icmsg_data_t *dev_data)
{
return atomic_get(&dev_data->state) == ICMSG_STATE_READY;
}
#ifdef CONFIG_MULTITHREADING
static void notify_process(struct k_work *item)
{
struct k_work_delayable *dwork = k_work_delayable_from_work(item);
struct icmsg_data_t *dev_data =
CONTAINER_OF(dwork, struct icmsg_data_t, notify_work);
(void)mbox_send_dt(&dev_data->cfg->mbox_tx, NULL);
atomic_t state = atomic_get(&dev_data->state);
if (state != ICMSG_STATE_READY) {
int ret;
ret = k_work_reschedule_for_queue(workq, dwork, BOND_NOTIFY_REPEAT_TO);
__ASSERT_NO_MSG(ret >= 0);
(void)ret;
}
}
#else
static void notify_process(struct icmsg_data_t *dev_data)
{
(void)mbox_send_dt(&dev_data->cfg->mbox_tx, NULL);
#if defined(CONFIG_SYS_CLOCK_EXISTS)
int64_t start = k_uptime_get();
#endif
while (false == is_endpoint_ready(dev_data)) {
mbox_callback_process(dev_data);
#if defined(CONFIG_SYS_CLOCK_EXISTS)
if ((k_uptime_get() - start) > CONFIG_IPC_SERVICE_ICMSG_BOND_NOTIFY_REPEAT_TO_MS) {
#endif
(void)mbox_send_dt(&dev_data->cfg->mbox_tx, NULL);
#if defined(CONFIG_SYS_CLOCK_EXISTS)
start = k_uptime_get();
};
#endif
}
}
#endif
#ifdef CONFIG_IPC_SERVICE_ICMSG_SHMEM_ACCESS_SYNC
static int reserve_tx_buffer_if_unused(struct icmsg_data_t *dev_data)
{
int ret = k_mutex_lock(&dev_data->tx_lock, SHMEM_ACCESS_TO);
if (ret < 0) {
return ret;
}
return 0;
}
static int release_tx_buffer(struct icmsg_data_t *dev_data)
{
return k_mutex_unlock(&dev_data->tx_lock);
}
#endif
static uint32_t data_available(struct icmsg_data_t *dev_data)
{
return pbuf_read(dev_data->rx_pb, NULL, 0);
}
#ifdef CONFIG_MULTITHREADING
static void submit_mbox_work(struct icmsg_data_t *dev_data)
{
if (k_work_submit_to_queue(workq, &dev_data->mbox_work) < 0) {
/* The mbox processing work is never canceled.
* The negative error code should never be seen.
*/
__ASSERT_NO_MSG(false);
}
}
static void submit_work_if_buffer_free(struct icmsg_data_t *dev_data)
{
submit_mbox_work(dev_data);
}
static void submit_work_if_buffer_free_and_data_available(
struct icmsg_data_t *dev_data)
{
if (!data_available(dev_data)) {
return;
}
submit_mbox_work(dev_data);
}
#else
static void submit_if_buffer_free(struct icmsg_data_t *dev_data)
{
mbox_callback_process(dev_data);
}
static void submit_if_buffer_free_and_data_available(
struct icmsg_data_t *dev_data)
{
if (!data_available(dev_data)) {
return;
}
mbox_callback_process(dev_data);
}
#endif
#ifdef CONFIG_MULTITHREADING
static void mbox_callback_process(struct k_work *item)
#else
static void mbox_callback_process(struct icmsg_data_t *dev_data)
#endif
{
#ifdef CONFIG_MULTITHREADING
struct icmsg_data_t *dev_data = CONTAINER_OF(item, struct icmsg_data_t, mbox_work);
#endif
uint8_t rx_buffer[CONFIG_PBUF_RX_READ_BUF_SIZE] __aligned(4);
atomic_t state = atomic_get(&dev_data->state);
uint32_t len = data_available(dev_data);
if (len == 0) {
/* Unlikely, no data in buffer. */
return;
}
__ASSERT_NO_MSG(len <= sizeof(rx_buffer));
if (sizeof(rx_buffer) < len) {
return;
}
len = pbuf_read(dev_data->rx_pb, rx_buffer, sizeof(rx_buffer));
if (state == ICMSG_STATE_READY) {
if (dev_data->cb->received) {
dev_data->cb->received(rx_buffer, len, dev_data->ctx);
}
} else {
__ASSERT_NO_MSG(state == ICMSG_STATE_BUSY);
/* Allow magic number longer than sizeof(magic) for future protocol version. */
bool endpoint_invalid = (len < sizeof(magic) ||
memcmp(magic, rx_buffer, sizeof(magic)));
if (endpoint_invalid) {
__ASSERT_NO_MSG(false);
return;
}
if (dev_data->cb->bound) {
dev_data->cb->bound(dev_data->ctx);
}
atomic_set(&dev_data->state, ICMSG_STATE_READY);
}
#ifdef CONFIG_MULTITHREADING
submit_work_if_buffer_free_and_data_available(dev_data);
#else
submit_if_buffer_free_and_data_available(dev_data);
#endif
}
static void mbox_callback(const struct device *instance, uint32_t channel,
void *user_data, struct mbox_msg *msg_data)
{
struct icmsg_data_t *dev_data = user_data;
#ifdef CONFIG_MULTITHREADING
submit_work_if_buffer_free(dev_data);
#else
submit_if_buffer_free(dev_data);
#endif
}
static int mbox_init(const struct icmsg_config_t *conf,
struct icmsg_data_t *dev_data)
{
int err;
#ifdef CONFIG_MULTITHREADING
k_work_init(&dev_data->mbox_work, mbox_callback_process);
k_work_init_delayable(&dev_data->notify_work, notify_process);
#endif
err = mbox_register_callback_dt(&conf->mbox_rx, mbox_callback, dev_data);
if (err != 0) {
return err;
}
return mbox_set_enabled_dt(&conf->mbox_rx, 1);
}
int icmsg_open(const struct icmsg_config_t *conf,
struct icmsg_data_t *dev_data,
const struct ipc_service_cb *cb, void *ctx)
{
if (!atomic_cas(&dev_data->state, ICMSG_STATE_OFF, ICMSG_STATE_BUSY)) {
/* Already opened. */
return -EALREADY;
}
dev_data->cb = cb;
dev_data->ctx = ctx;
dev_data->cfg = conf;
#ifdef CONFIG_IPC_SERVICE_ICMSG_SHMEM_ACCESS_SYNC
k_mutex_init(&dev_data->tx_lock);
#endif
int ret = pbuf_tx_init(dev_data->tx_pb);
if (ret < 0) {
__ASSERT(false, "Incorrect configuration");
return ret;
}
(void)pbuf_rx_init(dev_data->rx_pb);
ret = pbuf_write(dev_data->tx_pb, magic, sizeof(magic));
if (ret < 0) {
__ASSERT_NO_MSG(false);
return ret;
}
if (ret < (int)sizeof(magic)) {
__ASSERT_NO_MSG(ret == sizeof(magic));
return ret;
}
ret = mbox_init(conf, dev_data);
if (ret) {
return ret;
}
#ifdef CONFIG_MULTITHREADING
ret = k_work_schedule_for_queue(workq, &dev_data->notify_work, K_NO_WAIT);
if (ret < 0) {
return ret;
}
#else
notify_process(dev_data);
#endif
return 0;
}
int icmsg_close(const struct icmsg_config_t *conf,
struct icmsg_data_t *dev_data)
{
int ret;
ret = mbox_deinit(conf, dev_data);
if (ret) {
return ret;
}
atomic_set(&dev_data->state, ICMSG_STATE_OFF);
return 0;
}
int icmsg_send(const struct icmsg_config_t *conf,
struct icmsg_data_t *dev_data,
const void *msg, size_t len)
{
int ret;
int write_ret;
#ifdef CONFIG_IPC_SERVICE_ICMSG_SHMEM_ACCESS_SYNC
int release_ret;
#endif
int sent_bytes;
if (!is_endpoint_ready(dev_data)) {
return -EBUSY;
}
/* Empty message is not allowed */
if (len == 0) {
return -ENODATA;
}
#ifdef CONFIG_IPC_SERVICE_ICMSG_SHMEM_ACCESS_SYNC
ret = reserve_tx_buffer_if_unused(dev_data);
if (ret < 0) {
return -ENOBUFS;
}
#endif
write_ret = pbuf_write(dev_data->tx_pb, msg, len);
#ifdef CONFIG_IPC_SERVICE_ICMSG_SHMEM_ACCESS_SYNC
release_ret = release_tx_buffer(dev_data);
__ASSERT_NO_MSG(!release_ret);
#endif
if (write_ret < 0) {
return write_ret;
} else if (write_ret < len) {
return -EBADMSG;
}
sent_bytes = write_ret;
__ASSERT_NO_MSG(conf->mbox_tx.dev != NULL);
ret = mbox_send_dt(&conf->mbox_tx, NULL);
if (ret) {
return ret;
}
return sent_bytes;
}
#if defined(CONFIG_IPC_SERVICE_BACKEND_ICMSG_WQ_ENABLE)
static int work_q_init(void)
{
struct k_work_queue_config cfg = {
.name = "icmsg_workq",
};
k_work_queue_start(&icmsg_workq,
icmsg_stack,
K_KERNEL_STACK_SIZEOF(icmsg_stack),
CONFIG_IPC_SERVICE_BACKEND_ICMSG_WQ_PRIORITY, &cfg);
return 0;
}
SYS_INIT(work_q_init, POST_KERNEL, CONFIG_KERNEL_INIT_PRIORITY_DEFAULT);
#endif

168
tests/subsys/ipc/ipc_sessions/interoperability/icmsg_v1.h

@ -0,0 +1,168 @@ @@ -0,0 +1,168 @@
/*
* Copyright (c) 2022 Nordic Semiconductor ASA
*
* SPDX-License-Identifier: Apache-2.0
*/
#ifndef ZEPHYR_INCLUDE_IPC_ICMSG_H_
#define ZEPHYR_INCLUDE_IPC_ICMSG_H_
#include <stddef.h>
#include <stdint.h>
#include <zephyr/kernel.h>
#include <zephyr/drivers/mbox.h>
#include <zephyr/ipc/ipc_service.h>
#include "pbuf_v1.h"
#include <zephyr/sys/atomic.h>
/* Config aliases that prevenets from config collisions: */
#undef CONFIG_IPC_SERVICE_ICMSG_SHMEM_ACCESS_SYNC
#ifdef CONFIG_IPC_SERVICE_ICMSG_SHMEM_ACCESS_SYNC_V1
#define CONFIG_IPC_SERVICE_ICMSG_SHMEM_ACCESS_SYNC CONFIG_IPC_SERVICE_ICMSG_SHMEM_ACCESS_SYNC_V1
#endif
#undef CONFIG_IPC_SERVICE_ICMSG_SHMEM_ACCESS_TO_MS
#ifdef CONFIG_IPC_SERVICE_ICMSG_SHMEM_ACCESS_TO_MS_V1
#define CONFIG_IPC_SERVICE_ICMSG_SHMEM_ACCESS_TO_MS CONFIG_IPC_SERVICE_ICMSG_SHMEM_ACCESS_TO_MS_V1
#endif
#undef CONFIG_IPC_SERVICE_ICMSG_BOND_NOTIFY_REPEAT_TO_MS
#ifdef CONFIG_IPC_SERVICE_ICMSG_BOND_NOTIFY_REPEAT_TO_MS_V1
#define CONFIG_IPC_SERVICE_ICMSG_BOND_NOTIFY_REPEAT_TO_MS \
CONFIG_IPC_SERVICE_ICMSG_BOND_NOTIFY_REPEAT_TO_MS_V1
#endif
#undef CONFIG_IPC_SERVICE_BACKEND_ICMSG_WQ_ENABLE
#ifdef CONFIG_IPC_SERVICE_BACKEND_ICMSG_WQ_ENABLE_V1
#define CONFIG_IPC_SERVICE_BACKEND_ICMSG_WQ_ENABLE CONFIG_IPC_SERVICE_BACKEND_ICMSG_WQ_ENABLE_V1
#endif
#undef CONFIG_IPC_SERVICE_BACKEND_ICMSG_WQ_STACK_SIZE
#ifdef CONFIG_IPC_SERVICE_BACKEND_ICMSG_WQ_STACK_SIZE_V1
#define CONFIG_IPC_SERVICE_BACKEND_ICMSG_WQ_STACK_SIZE \
CONFIG_IPC_SERVICE_BACKEND_ICMSG_WQ_STACK_SIZE_V1
#endif
#undef CONFIG_IPC_SERVICE_BACKEND_ICMSG_WQ_PRIORITY
#ifdef CONFIG_IPC_SERVICE_BACKEND_ICMSG_WQ_PRIORITY_V1
#define CONFIG_IPC_SERVICE_BACKEND_ICMSG_WQ_PRIORITY CONFIG_IPC_SERVICE_BACKEND_ICMSG_WQ_PRIORITY_V1
#endif
#undef CONFIG_PBUF_RX_READ_BUF_SIZE
#ifdef CONFIG_PBUF_RX_READ_BUF_SIZE_V1
#define CONFIG_PBUF_RX_READ_BUF_SIZE CONFIG_PBUF_RX_READ_BUF_SIZE_V1
#endif
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief Icmsg IPC library API
* @defgroup ipc_icmsg_api Icmsg IPC library API
* @ingroup ipc
* @{
*/
enum icmsg_state {
ICMSG_STATE_OFF,
ICMSG_STATE_BUSY,
ICMSG_STATE_READY,
};
struct icmsg_config_t {
struct mbox_dt_spec mbox_tx;
struct mbox_dt_spec mbox_rx;
};
struct icmsg_data_t {
/* Tx/Rx buffers. */
struct pbuf *tx_pb;
struct pbuf *rx_pb;
#ifdef CONFIG_IPC_SERVICE_ICMSG_SHMEM_ACCESS_SYNC
struct k_mutex tx_lock;
#endif
/* Callbacks for an endpoint. */
const struct ipc_service_cb *cb;
void *ctx;
/* General */
const struct icmsg_config_t *cfg;
#ifdef CONFIG_MULTITHREADING
struct k_work_delayable notify_work;
struct k_work mbox_work;
#endif
atomic_t state;
};
/** @brief Open an icmsg instance
*
* Open an icmsg instance to be able to send and receive messages to a remote
* instance.
* This function is blocking until the handshake with the remote instance is
* completed.
* This function is intended to be called late in the initialization process,
* possibly from a thread which can be safely blocked while handshake with the
* remote instance is being pefromed.
*
* @param[in] conf Structure containing configuration parameters for the icmsg
* instance.
* @param[inout] dev_data Structure containing run-time data used by the icmsg
* instance.
* @param[in] cb Structure containing callback functions to be called on
* events generated by this icmsg instance. The pointed memory
* must be preserved while the icmsg instance is active.
* @param[in] ctx Pointer to context passed as an argument to callbacks.
*
*
* @retval 0 on success.
* @retval -EALREADY when the instance is already opened.
* @retval other errno codes from dependent modules.
*/
int icmsg_open(const struct icmsg_config_t *conf,
struct icmsg_data_t *dev_data,
const struct ipc_service_cb *cb, void *ctx);
/** @brief Close an icmsg instance
*
* Closing an icmsg instance results in releasing all resources used by given
* instance including the shared memory regions and mbox devices.
*
* @param[in] conf Structure containing configuration parameters for the icmsg
* instance being closed. Its content must be the same as used
* for creating this instance with @ref icmsg_open.
* @param[inout] dev_data Structure containing run-time data used by the icmsg
* instance.
*
* @retval 0 on success.
* @retval other errno codes from dependent modules.
*/
int icmsg_close(const struct icmsg_config_t *conf,
struct icmsg_data_t *dev_data);
/** @brief Send a message to the remote icmsg instance.
*
* @param[in] conf Structure containing configuration parameters for the icmsg
* instance.
* @param[inout] dev_data Structure containing run-time data used by the icmsg
* instance.
* @param[in] msg Pointer to a buffer containing data to send.
* @param[in] len Size of data in the @p msg buffer.
*
*
* @retval Number of sent bytes.
* @retval -EBUSY when the instance has not finished handshake with the remote
* instance.
* @retval -ENODATA when the requested data to send is empty.
* @retval -EBADMSG when the requested data to send is too big.
* @retval -ENOBUFS when there are no TX buffers available.
* @retval other errno codes from dependent modules.
*/
int icmsg_send(const struct icmsg_config_t *conf,
struct icmsg_data_t *dev_data,
const void *msg, size_t len);
/**
* @}
*/
#ifdef __cplusplus
}
#endif
#endif /* ZEPHYR_INCLUDE_IPC_ICMSG_H_ */

86
tests/subsys/ipc/ipc_sessions/interoperability/ipc_icmsg_v1.c

@ -0,0 +1,86 @@ @@ -0,0 +1,86 @@
/*
* Copyright (c) 2022 Nordic Semiconductor ASA
*
* SPDX-License-Identifier: Apache-2.0
*/
#include "ipc_icmsg_v1.h"
#include <zephyr/kernel.h>
#include <zephyr/device.h>
#include "icmsg_v1.h"
#include <zephyr/ipc/ipc_service_backend.h>
#define DT_DRV_COMPAT zephyr_ipc_icmsg
static int register_ept(const struct device *instance, void **token,
const struct ipc_ept_cfg *cfg)
{
const struct icmsg_config_t *conf = instance->config;
struct icmsg_data_t *dev_data = instance->data;
/* Only one endpoint is supported. No need for a token. */
*token = NULL;
return icmsg_open(conf, dev_data, &cfg->cb, cfg->priv);
}
static int deregister_ept(const struct device *instance, void *token)
{
const struct icmsg_config_t *conf = instance->config;
struct icmsg_data_t *dev_data = instance->data;
return icmsg_close(conf, dev_data);
}
static int send(const struct device *instance, void *token,
const void *msg, size_t len)
{
const struct icmsg_config_t *conf = instance->config;
struct icmsg_data_t *dev_data = instance->data;
return icmsg_send(conf, dev_data, msg, len);
}
const static struct ipc_service_backend backend_ops = {
.register_endpoint = register_ept,
.deregister_endpoint = deregister_ept,
.send = send,
};
static int backend_init(const struct device *instance)
{
return 0;
}
#define DEFINE_BACKEND_DEVICE(i) \
static const struct icmsg_config_t backend_config_##i = { \
.mbox_tx = MBOX_DT_SPEC_INST_GET(i, tx), \
.mbox_rx = MBOX_DT_SPEC_INST_GET(i, rx), \
}; \
\
PBUF_DEFINE(tx_pb_##i, \
DT_REG_ADDR(DT_INST_PHANDLE(i, tx_region)), \
DT_REG_SIZE(DT_INST_PHANDLE(i, tx_region)), \
DT_INST_PROP_OR(i, dcache_alignment, 0)); \
PBUF_DEFINE(rx_pb_##i, \
DT_REG_ADDR(DT_INST_PHANDLE(i, rx_region)), \
DT_REG_SIZE(DT_INST_PHANDLE(i, rx_region)), \
DT_INST_PROP_OR(i, dcache_alignment, 0)); \
\
static struct icmsg_data_t backend_data_##i = { \
.tx_pb = &tx_pb_##i, \
.rx_pb = &rx_pb_##i, \
}; \
\
DEVICE_DT_INST_DEFINE(i, \
&backend_init, \
NULL, \
&backend_data_##i, \
&backend_config_##i, \
POST_KERNEL, \
CONFIG_IPC_SERVICE_REG_BACKEND_PRIORITY, \
&backend_ops);
DT_INST_FOREACH_STATUS_OKAY(DEFINE_BACKEND_DEVICE)

5
tests/subsys/ipc/ipc_sessions/interoperability/ipc_icmsg_v1.h

@ -0,0 +1,5 @@ @@ -0,0 +1,5 @@
/*
* Copyright (c) 2022 Nordic Semiconductor ASA
*
* SPDX-License-Identifier: Apache-2.0
*/

255
tests/subsys/ipc/ipc_sessions/interoperability/pbuf_v1.c

@ -0,0 +1,255 @@ @@ -0,0 +1,255 @@
/*
* Copyright (c) 2023 Nordic Semiconductor ASA
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <zephyr/kernel.h>
#include <string.h>
#include <errno.h>
#include <zephyr/cache.h>
#include "pbuf_v1.h"
#include <zephyr/sys/byteorder.h>
#if defined(CONFIG_ARCH_POSIX)
#include <soc.h>
#endif
/* Helper funciton for getting number of bytes being written to the buffer. */
static uint32_t idx_occupied(uint32_t len, uint32_t wr_idx, uint32_t rd_idx)
{
/* It is implicitly assumed wr_idx and rd_idx cannot differ by more then len. */
return (rd_idx > wr_idx) ? (len - (rd_idx - wr_idx)) : (wr_idx - rd_idx);
}
/* Helper function for wrapping the index from the begging if above buffer len. */
static uint32_t idx_wrap(uint32_t len, uint32_t idx)
{
return (idx >= len) ? (idx % len) : (idx);
}
static int validate_cfg(const struct pbuf_cfg *cfg)
{
/* Validate pointers. */
if (!cfg || !cfg->rd_idx_loc || !cfg->wr_idx_loc || !cfg->data_loc) {
return -EINVAL;
}
/* Validate pointer alignment. */
if (!IS_PTR_ALIGNED_BYTES(cfg->rd_idx_loc, MAX(cfg->dcache_alignment, _PBUF_IDX_SIZE)) ||
!IS_PTR_ALIGNED_BYTES(cfg->wr_idx_loc, MAX(cfg->dcache_alignment, _PBUF_IDX_SIZE)) ||
!IS_PTR_ALIGNED_BYTES(cfg->data_loc, _PBUF_IDX_SIZE)) {
return -EINVAL;
}
/* Validate len. */
if (cfg->len < _PBUF_MIN_DATA_LEN || !IS_PTR_ALIGNED_BYTES(cfg->len, _PBUF_IDX_SIZE)) {
return -EINVAL;
}
/* Validate pointer values. */
if (!(cfg->rd_idx_loc < cfg->wr_idx_loc) ||
!((uint8_t *)cfg->wr_idx_loc < cfg->data_loc) ||
!(((uint8_t *)cfg->rd_idx_loc + MAX(_PBUF_IDX_SIZE, cfg->dcache_alignment)) ==
(uint8_t *)cfg->wr_idx_loc)) {
return -EINVAL;
}
return 0;
}
#if defined(CONFIG_ARCH_POSIX)
void pbuf_native_addr_remap(struct pbuf *pb)
{
native_emb_addr_remap((void **)&pb->cfg->rd_idx_loc);
native_emb_addr_remap((void **)&pb->cfg->wr_idx_loc);
native_emb_addr_remap((void **)&pb->cfg->data_loc);
}
#endif
int pbuf_tx_init(struct pbuf *pb)
{
if (validate_cfg(pb->cfg) != 0) {
return -EINVAL;
}
#if defined(CONFIG_ARCH_POSIX)
pbuf_native_addr_remap(pb);
#endif
/* Initialize local copy of indexes. */
pb->data.wr_idx = 0;
pb->data.rd_idx = 0;
/* Clear shared memory. */
*(pb->cfg->wr_idx_loc) = pb->data.wr_idx;
*(pb->cfg->rd_idx_loc) = pb->data.rd_idx;
__sync_synchronize();
/* Take care cache. */
sys_cache_data_flush_range((void *)(pb->cfg->wr_idx_loc), sizeof(*(pb->cfg->wr_idx_loc)));
sys_cache_data_flush_range((void *)(pb->cfg->rd_idx_loc), sizeof(*(pb->cfg->rd_idx_loc)));
return 0;
}
int pbuf_rx_init(struct pbuf *pb)
{
if (validate_cfg(pb->cfg) != 0) {
return -EINVAL;
}
#if defined(CONFIG_ARCH_POSIX)
pbuf_native_addr_remap(pb);
#endif
/* Initialize local copy of indexes. */
pb->data.wr_idx = 0;
pb->data.rd_idx = 0;
return 0;
}
int pbuf_write(struct pbuf *pb, const char *data, uint16_t len)
{
if (pb == NULL || len == 0 || data == NULL) {
/* Incorrect call. */
return -EINVAL;
}
/* Invalidate rd_idx only, local wr_idx is used to increase buffer security. */
sys_cache_data_invd_range((void *)(pb->cfg->rd_idx_loc), sizeof(*(pb->cfg->rd_idx_loc)));
__sync_synchronize();
uint8_t *const data_loc = pb->cfg->data_loc;
const uint32_t blen = pb->cfg->len;
uint32_t rd_idx = *(pb->cfg->rd_idx_loc);
uint32_t wr_idx = pb->data.wr_idx;
/* wr_idx must always be aligned. */
__ASSERT_NO_MSG(IS_PTR_ALIGNED_BYTES(wr_idx, _PBUF_IDX_SIZE));
/* rd_idx shall always be aligned, but its value is received from the reader.
* Can not assert.
*/
if (!IS_PTR_ALIGNED_BYTES(rd_idx, _PBUF_IDX_SIZE)) {
return -EINVAL;
}
uint32_t free_space = blen - idx_occupied(blen, wr_idx, rd_idx) - _PBUF_IDX_SIZE;
/* Packet length, data + packet length size. */
uint32_t plen = len + PBUF_PACKET_LEN_SZ;
/* Check if packet will fit into the buffer. */
if (free_space < plen) {
return -ENOMEM;
}
/* Clear packet len with zeros and update. Clearing is done for possible versioning in the
* future. Writing is allowed now, because shared wr_idx value is updated at the very end.
*/
*((uint32_t *)(&data_loc[wr_idx])) = 0;
sys_put_be16(len, &data_loc[wr_idx]);
__sync_synchronize();
sys_cache_data_flush_range(&data_loc[wr_idx], PBUF_PACKET_LEN_SZ);
wr_idx = idx_wrap(blen, wr_idx + PBUF_PACKET_LEN_SZ);
/* Write until end of the buffer, if data will be wrapped. */
uint32_t tail = MIN(len, blen - wr_idx);
memcpy(&data_loc[wr_idx], data, tail);
sys_cache_data_flush_range(&data_loc[wr_idx], tail);
if (len > tail) {
/* Copy remaining data to buffer front. */
memcpy(&data_loc[0], data + tail, len - tail);
sys_cache_data_flush_range(&data_loc[0], len - tail);
}
wr_idx = idx_wrap(blen, ROUND_UP(wr_idx + len, _PBUF_IDX_SIZE));
/* Update wr_idx. */
pb->data.wr_idx = wr_idx;
*(pb->cfg->wr_idx_loc) = wr_idx;
__sync_synchronize();
sys_cache_data_flush_range((void *)pb->cfg->wr_idx_loc, sizeof(*(pb->cfg->wr_idx_loc)));
return len;
}
int pbuf_read(struct pbuf *pb, char *buf, uint16_t len)
{
if (pb == NULL) {
/* Incorrect call. */
return -EINVAL;
}
/* Invalidate wr_idx only, local rd_idx is used to increase buffer security. */
sys_cache_data_invd_range((void *)(pb->cfg->wr_idx_loc), sizeof(*(pb->cfg->wr_idx_loc)));
__sync_synchronize();
uint8_t *const data_loc = pb->cfg->data_loc;
const uint32_t blen = pb->cfg->len;
uint32_t wr_idx = *(pb->cfg->wr_idx_loc);
uint32_t rd_idx = pb->data.rd_idx;
/* rd_idx must always be aligned. */
__ASSERT_NO_MSG(IS_PTR_ALIGNED_BYTES(rd_idx, _PBUF_IDX_SIZE));
/* wr_idx shall always be aligned, but its value is received from the
* writer. Can not assert.
*/
if (!IS_PTR_ALIGNED_BYTES(wr_idx, _PBUF_IDX_SIZE)) {
return -EINVAL;
}
if (rd_idx == wr_idx) {
/* Buffer is empty. */
return 0;
}
/* Get packet len.*/
sys_cache_data_invd_range(&data_loc[rd_idx], PBUF_PACKET_LEN_SZ);
uint16_t plen = sys_get_be16(&data_loc[rd_idx]);
if (!buf) {
return (int)plen;
}
if (plen > len) {
return -ENOMEM;
}
uint32_t occupied_space = idx_occupied(blen, wr_idx, rd_idx);
if (occupied_space < plen + PBUF_PACKET_LEN_SZ) {
/* This should never happen. */
return -EAGAIN;
}
rd_idx = idx_wrap(blen, rd_idx + PBUF_PACKET_LEN_SZ);
/* Packet will fit into provided buffer, truncate len if provided len
* is bigger than necessary.
*/
len = MIN(plen, len);
/* Read until end of the buffer, if data are wrapped. */
uint32_t tail = MIN(blen - rd_idx, len);
sys_cache_data_invd_range(&data_loc[rd_idx], tail);
memcpy(buf, &data_loc[rd_idx], tail);
if (len > tail) {
sys_cache_data_invd_range(&data_loc[0], len - tail);
memcpy(&buf[tail], &pb->cfg->data_loc[0], len - tail);
}
/* Update rd_idx. */
rd_idx = idx_wrap(blen, ROUND_UP(rd_idx + len, _PBUF_IDX_SIZE));
pb->data.rd_idx = rd_idx;
*(pb->cfg->rd_idx_loc) = rd_idx;
__sync_synchronize();
sys_cache_data_flush_range((void *)pb->cfg->rd_idx_loc, sizeof(*(pb->cfg->rd_idx_loc)));
return len;
}

234
tests/subsys/ipc/ipc_sessions/interoperability/pbuf_v1.h

@ -0,0 +1,234 @@ @@ -0,0 +1,234 @@
/*
* Copyright (c) 2023 Nordic Semiconductor ASA
*
* SPDX-License-Identifier: Apache-2.0
*/
#ifndef ZEPHYR_INCLUDE_IPC_PBUF_H_
#define ZEPHYR_INCLUDE_IPC_PBUF_H_
#include <zephyr/cache.h>
#include <zephyr/devicetree.h>
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief Packed buffer API
* @defgroup pbuf Packed Buffer API
* @ingroup ipc
* @{
*/
/** @brief Size of packet length field. */
#define PBUF_PACKET_LEN_SZ sizeof(uint32_t)
/* Amount of data that is left unused to distinguish between empty and full. */
#define _PBUF_IDX_SIZE sizeof(uint32_t)
/* Minimal length of the data field in the buffer to store the smalest packet
* possible.
* (+1) for at least one byte of data.
* (+_PBUF_IDX_SIZE) to distinguish buffer full and buffer empty.
* Rounded up to keep wr/rd indexes pointing to aligned address.
*/
#define _PBUF_MIN_DATA_LEN ROUND_UP(PBUF_PACKET_LEN_SZ + 1 + _PBUF_IDX_SIZE, _PBUF_IDX_SIZE)
#if defined(CONFIG_ARCH_POSIX)
/* For the native simulated boards we need to modify some pointers at init */
#define PBUF_MAYBE_CONST
#else
#define PBUF_MAYBE_CONST const
#endif
/** @brief Control block of packet buffer.
*
* The structure contains configuration data.
*/
struct pbuf_cfg {
volatile uint32_t *rd_idx_loc; /* Address of the variable holding
* index value of the first valid byte
* in data[].
*/
volatile uint32_t *wr_idx_loc; /* Address of the variable holding
* index value of the first free byte
* in data[].
*/
uint32_t dcache_alignment; /* CPU data cache line size in bytes.
* Used for validation - TODO: To be
* replaced by flags.
*/
uint32_t len; /* Length of data[] in bytes. */
uint8_t *data_loc; /* Location of the data[]. */
};
/**
* @brief Data block of the packed buffer.
*
* The structure contains local copies of wr and rd indexes used by writer and
* reader respectively.
*/
struct pbuf_data {
volatile uint32_t wr_idx; /* Index of the first holding first
* free byte in data[]. Used for
* writing.
*/
volatile uint32_t rd_idx; /* Index of the first holding first
* valid byte in data[]. Used for
* reading.
*/
};
/**
* @brief Scure packed buffer.
*
* The packet buffer implements lightweight unidirectional packet
* buffer with read/write semantics on top of a memory region shared
* by the reader and writer. It embeds cache and memory barrier management to
* ensure correct data access.
*
* This structure supports single writer and reader. Data stored in the buffer
* is encapsulated to a message (with length header). The read/write API is
* written in a way to protect the data from being corrupted.
*/
struct pbuf {
PBUF_MAYBE_CONST struct pbuf_cfg *const cfg; /* Configuration of the
* buffer.
*/
struct pbuf_data data; /* Data used to read and write
* to the buffer
*/
};
/**
* @brief Macro for configuration initialization.
*
* It is recommended to use this macro to initialize packed buffer
* configuration.
*
* @param mem_addr Memory address for pbuf.
* @param size Size of the memory.
* @param dcache_align Data cache alignment.
*/
#define PBUF_CFG_INIT(mem_addr, size, dcache_align) \
{ \
.rd_idx_loc = (uint32_t *)(mem_addr), \
.wr_idx_loc = (uint32_t *)((uint8_t *)(mem_addr) + \
MAX(dcache_align, _PBUF_IDX_SIZE)), \
.data_loc = (uint8_t *)((uint8_t *)(mem_addr) + \
MAX(dcache_align, _PBUF_IDX_SIZE) + _PBUF_IDX_SIZE), \
.len = (uint32_t)((uint32_t)(size) - MAX(dcache_align, _PBUF_IDX_SIZE) - \
_PBUF_IDX_SIZE), \
.dcache_alignment = (dcache_align), \
}
/**
* @brief Macro calculates memory overhead taken by the header in shared memory.
*
* It contains the read index, write index and padding.
*
* @param dcache_align Data cache alignment.
*/
#define PBUF_HEADER_OVERHEAD(dcache_align) \
(MAX(dcache_align, _PBUF_IDX_SIZE) + _PBUF_IDX_SIZE)
/**
* @brief Statically define and initialize pbuf.
*
* @param name Name of the pbuf.
* @param mem_addr Memory address for pbuf.
* @param size Size of the memory.
* @param dcache_align Data cache line size.
*/
#define PBUF_DEFINE(name, mem_addr, size, dcache_align) \
BUILD_ASSERT(dcache_align >= 0, \
"Cache line size must be non negative."); \
BUILD_ASSERT((size) > 0 && IS_PTR_ALIGNED_BYTES(size, _PBUF_IDX_SIZE), \
"Incorrect size."); \
BUILD_ASSERT(IS_PTR_ALIGNED_BYTES(mem_addr, MAX(dcache_align, _PBUF_IDX_SIZE)), \
"Misaligned memory."); \
BUILD_ASSERT(size >= (MAX(dcache_align, _PBUF_IDX_SIZE) + _PBUF_IDX_SIZE + \
_PBUF_MIN_DATA_LEN), "Insufficient size."); \
static PBUF_MAYBE_CONST struct pbuf_cfg cfg_##name = \
PBUF_CFG_INIT(mem_addr, size, dcache_align); \
static struct pbuf name = { \
.cfg = &cfg_##name, \
}
/**
* @brief Initialize the Tx packet buffer.
*
* This function initializes the Tx packet buffer based on provided configuration.
* If the configuration is incorrect, the function will return error.
*
* It is recommended to use PBUF_DEFINE macro for build time initialization.
*
* @param pb Pointer to the packed buffer containing
* configuration and data. Configuration has to be
* fixed before the initialization.
* @retval 0 on success.
* @retval -EINVAL when the input parameter is incorrect.
*/
int pbuf_tx_init(struct pbuf *pb);
/**
* @brief Initialize the Rx packet buffer.
*
* This function initializes the Rx packet buffer.
* If the configuration is incorrect, the function will return error.
*
* It is recommended to use PBUF_DEFINE macro for build time initialization.
*
* @param pb Pointer to the packed buffer containing
* configuration and data. Configuration has to be
* fixed before the initialization.
* @retval 0 on success.
* @retval -EINVAL when the input parameter is incorrect.
*/
int pbuf_rx_init(struct pbuf *pb);
/**
* @brief Write specified amount of data to the packet buffer.
*
* This function call writes specified amount of data to the packet buffer if
* the buffer will fit the data.
*
* @param pb A buffer to which to write.
* @param buf Pointer to the data to be written to the buffer.
* @param len Number of bytes to be written to the buffer. Must be positive.
* @retval int Number of bytes written, negative error code on fail.
* -EINVAL, if any of input parameter is incorrect.
* -ENOMEM, if len is bigger than the buffer can fit.
*/
int pbuf_write(struct pbuf *pb, const char *buf, uint16_t len);
/**
* @brief Read specified amount of data from the packet buffer.
*
* Single read allows to read the message send by the single write.
* The provided %p buf must be big enough to store the whole message.
*
* @param pb A buffer from which data will be read.
* @param buf Data pointer to which read data will be written.
* If NULL, len of stored message is returned.
* @param len Number of bytes to be read from the buffer.
* @retval int Bytes read, negative error code on fail.
* Bytes to be read, if buf == NULL.
* -EINVAL, if any of input parameter is incorrect.
* -ENOMEM, if message can not fit in provided buf.
* -EAGAIN, if not whole message is ready yet.
*/
int pbuf_read(struct pbuf *pb, char *buf, uint16_t len);
/**
* @}
*/
#ifdef __cplusplus
}
#endif
#endif /* ZEPHYR_INCLUDE_IPC_PBUF_H_ */

10
tests/subsys/ipc/ipc_sessions/prj.conf

@ -0,0 +1,10 @@ @@ -0,0 +1,10 @@
# Copyright 2021 Carlo Caione <ccaione@baylibre.com>
# SPDX-License-Identifier: Apache-2.0
# We need rand_r function
CONFIG_GNU_C_EXTENSIONS=y
CONFIG_ZTEST=y
CONFIG_MMU=y
CONFIG_IPC_SERVICE=y
CONFIG_MBOX=y

19
tests/subsys/ipc/ipc_sessions/remote/CMakeLists.txt

@ -0,0 +1,19 @@ @@ -0,0 +1,19 @@
#
# Copyright (c) 2024 Nordic Semiconductor ASA
#
# SPDX-License-Identifier: Apache-2.0
#
cmake_minimum_required(VERSION 3.20.0)
find_package(Zephyr REQUIRED HINTS $ENV{ZEPHYR_BASE})
project(remote_icmsg)
zephyr_include_directories(../common)
FILE(GLOB remote_sources src/*.c)
target_sources(app PRIVATE ${remote_sources})
zephyr_sources_ifdef(CONFIG_IPC_SERVICE_ICMSG_V1 ../interoperability/icmsg_v1.c)
zephyr_sources_ifdef(CONFIG_PBUF_V1 ../interoperability/pbuf_v1.c)
zephyr_sources_ifdef(CONFIG_IPC_SERVICE_BACKEND_ICMSG_V1 ../interoperability/ipc_icmsg_v1.c)

11
tests/subsys/ipc/ipc_sessions/remote/Kconfig

@ -0,0 +1,11 @@ @@ -0,0 +1,11 @@
#
# Copyright (c) 2024 Nordic Semiconductor ASA
#
# SPDX-License-Identifier: Apache-2.0
#
rsource "../interoperability/Kconfig"
menu "Zephyr"
source "Kconfig.zephyr"
endmenu

36
tests/subsys/ipc/ipc_sessions/remote/boards/nrf5340dk_nrf5340_cpunet.overlay

@ -0,0 +1,36 @@ @@ -0,0 +1,36 @@
/*
* Copyright (c) 2024 Nordic Semiconductor ASA
*
* SPDX-License-Identifier: Apache-2.0
*/
/delete-node/ &ipc0;
/ {
chosen {
/delete-property/ zephyr,ipc_shm;
};
reserved-memory {
/delete-node/ memory@20070000;
sram_rx: memory@20070000 {
reg = <0x20070000 0x8000>;
};
sram_tx: memory@20078000 {
reg = <0x20078000 0x8000>;
};
};
ipc0: ipc0 {
compatible = "zephyr,ipc-icmsg";
tx-region = <&sram_tx>;
rx-region = <&sram_rx>;
mboxes = <&mbox 0>, <&mbox 1>;
mbox-names = "rx", "tx";
dcache-alignment = <8>;
unbound = "detect";
status = "okay";
};
};

1
tests/subsys/ipc/ipc_sessions/remote/boards/nrf54h20dk_nrf54h20_cpuppr.conf

@ -0,0 +1 @@ @@ -0,0 +1 @@
CONFIG_WATCHDOG=y

31
tests/subsys/ipc/ipc_sessions/remote/boards/nrf54h20dk_nrf54h20_cpuppr.overlay

@ -0,0 +1,31 @@ @@ -0,0 +1,31 @@
/*
* Copyright (c) 2024 Nordic Semiconductor ASA
* SPDX-License-Identifier: Apache-2.0
*/
ipc0: &cpuapp_cpuppr_ipc {
status = "okay";
unbound = "detect";
};
&cpuppr_vevif {
status = "okay";
};
&cpuapp_bellboard {
status = "okay";
};
&wdt131 {
status = "okay";
};
/ {
chosen {
/delete-property/ zephyr,bt-hci;
};
aliases {
watchdog0 = &wdt131;
};
};

15
tests/subsys/ipc/ipc_sessions/remote/boards/nrf54h20dk_nrf54h20_cpurad.overlay

@ -0,0 +1,15 @@ @@ -0,0 +1,15 @@
/*
* Copyright (c) 2024 Nordic Semiconductor ASA
* SPDX-License-Identifier: Apache-2.0
*/
&uart135 {
/delete-property/ hw-flow-control;
};
&ipc0 {
compatible = "zephyr,ipc-icmsg";
/delete-property/ tx-blocks;
/delete-property/ rx-blocks;
unbound = "enable";
};

23
tests/subsys/ipc/ipc_sessions/remote/prj.conf

@ -0,0 +1,23 @@ @@ -0,0 +1,23 @@
# Copyright (c) 2024 Nordic Semiconductor ASA
# SPDX-License-Identifier: Apache-2.0
# We need rand_r function
CONFIG_GNU_C_EXTENSIONS=y
CONFIG_PRINTK=y
CONFIG_EVENTS=y
CONFIG_LOG=y
CONFIG_LOG_ALWAYS_RUNTIME=y
CONFIG_LOG_MODE_MINIMAL=y
#CONFIG_LOG_PROCESS_THREAD_PRIORITY=-15
#CONFIG_LOG_PROCESS_THREAD_CUSTOM_PRIORITY=y
CONFIG_HEAP_MEM_POOL_SIZE=2048
CONFIG_IPC_SERVICE=y
CONFIG_IPC_SERVICE_LOG_LEVEL_INF=y
CONFIG_MBOX=y
CONFIG_WATCHDOG=y
CONFIG_REBOOT=y

452
tests/subsys/ipc/ipc_sessions/remote/src/remote.c

@ -0,0 +1,452 @@ @@ -0,0 +1,452 @@
/*
* Copyright (c) 2024 Nordic Semiconductor ASA
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <stdlib.h>
#include <zephyr/kernel.h>
#include <zephyr/device.h>
#include <zephyr/devicetree.h>
#include <zephyr/ipc/ipc_service.h>
#include <zephyr/drivers/watchdog.h>
#include <zephyr/sys/reboot.h>
#include <test_commands.h>
#include <zephyr/logging/log.h>
LOG_MODULE_REGISTER(remote, LOG_LEVEL_INF);
#define IPC_TEST_EV_REBOND 0x01
#define IPC_TEST_EV_BOND 0x02
#define IPC_TEST_EV_TXTEST 0x04
static const struct device *ipc0_instance = DEVICE_DT_GET(DT_NODELABEL(ipc0));
static volatile bool ipc0_bounded;
K_SEM_DEFINE(bound_sem, 0, 1);
K_EVENT_DEFINE(ipc_ev_req);
struct ipc_xfer_params {
uint32_t blk_size;
uint32_t blk_cnt;
unsigned int seed;
int result;
};
static struct ipc_xfer_params ipc_rx_params;
static struct ipc_xfer_params ipc_tx_params;
static struct k_timer timer_reboot;
static struct k_timer timer_rebond;
static void ep_bound(void *priv);
static void ep_unbound(void *priv);
static void ep_recv(const void *data, size_t len, void *priv);
static void ep_error(const char *message, void *priv);
static struct ipc_ept_cfg ep_cfg = {
.cb = {
.bound = ep_bound,
.unbound = ep_unbound,
.received = ep_recv,
.error = ep_error
},
};
/**
* @brief Trying to reset by WDT
*
* @note If this function return, it means it fails
*/
static int reboot_by_wdt(void)
{
int err;
static const struct device *const wdt =
COND_CODE_1(DT_NODE_HAS_STATUS_OKAY(DT_ALIAS(watchdog0)),
(DEVICE_DT_GET(DT_ALIAS(watchdog0))), (NULL));
static const struct wdt_timeout_cfg m_cfg_wdt = {
.callback = NULL,
.flags = WDT_FLAG_RESET_SOC,
.window.max = 10,
};
static const uint8_t wdt_options[] = {
WDT_OPT_PAUSE_HALTED_BY_DBG | WDT_OPT_PAUSE_IN_SLEEP,
WDT_OPT_PAUSE_IN_SLEEP,
0
};
if (!wdt) {
return -ENOTSUP;
}
if (!device_is_ready(wdt)) {
LOG_ERR("WDT device is not ready");
return -EIO;
}
err = wdt_install_timeout(wdt, &m_cfg_wdt);
if (err < 0) {
LOG_ERR("WDT install error");
return -EIO;
}
for (size_t i = 0; i < ARRAY_SIZE(wdt_options); ++i) {
err = wdt_setup(wdt, wdt_options[i]);
if (err < 0) {
LOG_ERR("Failed WDT setup with options = %u", wdt_options[i]);
} else {
/* We are ok with the configuration:
* just wait for the WDT to trigger
*/
for (;;) {
k_cpu_idle();
}
}
}
return -EIO;
}
/**
* @brief Just force to reboot, anyway you find possible
*/
FUNC_NORETURN static void reboot_anyway(void)
{
reboot_by_wdt();
/* If WDT restart fails - try another way */
sys_reboot(SYS_REBOOT_COLD);
}
static void ep_bound(void *priv)
{
ipc0_bounded = true;
k_sem_give(&bound_sem);
LOG_INF("Endpoint bounded");
}
static void ep_unbound(void *priv)
{
ipc0_bounded = false;
k_sem_give(&bound_sem);
LOG_INF("Endpoint unbounded");
/* Try to restore the connection */
k_event_set(&ipc_ev_req, IPC_TEST_EV_BOND);
}
static void ep_recv(const void *data, size_t len, void *priv)
{
int ret;
const struct ipc_test_cmd *cmd = data;
struct ipc_ept *ep = priv;
if (len < sizeof(struct ipc_test_cmd)) {
LOG_ERR("The unexpected size of received data: %u < %u", len,
sizeof(struct ipc_test_cmd));
/* Dropping further processing */
return;
}
switch (cmd->cmd) {
case IPC_TEST_CMD_NONE:
LOG_INF("Command processing: NONE");
/* Ignore */
break;
case IPC_TEST_CMD_PING: {
LOG_INF("Command processing: PING");
static const struct ipc_test_cmd cmd_pong = {IPC_TEST_CMD_PONG};
ret = ipc_service_send(ep, &cmd_pong, sizeof(cmd_pong));
if (ret < 0) {
LOG_ERR("PONG response failed: %d", ret);
}
break;
}
case IPC_TEST_CMD_ECHO: {
LOG_INF("Command processing: ECHO");
struct ipc_test_cmd *cmd_rsp = k_malloc(len);
if (!cmd_rsp) {
LOG_ERR("ECHO response failed: memory allocation");
break;
}
cmd_rsp->cmd = IPC_TEST_CMD_ECHO_RSP;
memcpy(cmd_rsp->data, cmd->data, len - sizeof(struct ipc_test_cmd));
ret = ipc_service_send(ep, cmd_rsp, len);
k_free(cmd_rsp);
if (ret < 0) {
LOG_ERR("ECHO response failed: %d", ret);
}
break;
}
case IPC_TEST_CMD_REBOND: {
LOG_INF("Command processing: REBOOT");
struct ipc_test_cmd_rebond *cmd_rebond = (struct ipc_test_cmd_rebond *)cmd;
k_timer_start(&timer_rebond, K_MSEC(cmd_rebond->timeout_ms), K_FOREVER);
break;
}
case IPC_TEST_CMD_REBOOT: {
LOG_INF("Command processing: REBOOT");
struct ipc_test_cmd_reboot *cmd_reboot = (struct ipc_test_cmd_reboot *)cmd;
k_timer_start(&timer_reboot, K_MSEC(cmd_reboot->timeout_ms), K_FOREVER);
break;
}
case IPC_TEST_CMD_RXSTART: {
LOG_INF("Command processing: RXSTART");
struct ipc_test_cmd_xstart *cmd_rxstart = (struct ipc_test_cmd_xstart *)cmd;
ipc_rx_params.blk_size = cmd_rxstart->blk_size;
ipc_rx_params.blk_cnt = cmd_rxstart->blk_cnt;
ipc_rx_params.seed = cmd_rxstart->seed;
ipc_rx_params.result = 0;
break;
}
case IPC_TEST_CMD_TXSTART: {
LOG_INF("Command processing: TXSTART");
struct ipc_test_cmd_xstart *cmd_txstart = (struct ipc_test_cmd_xstart *)cmd;
ipc_tx_params.blk_size = cmd_txstart->blk_size;
ipc_tx_params.blk_cnt = cmd_txstart->blk_cnt;
ipc_tx_params.seed = cmd_txstart->seed;
ipc_tx_params.result = 0;
k_event_set(&ipc_ev_req, IPC_TEST_EV_TXTEST);
break;
}
case IPC_TEST_CMD_RXGET: {
LOG_INF("Command processing: RXGET");
int ret;
struct ipc_test_cmd_xstat cmd_stat = {
.base.cmd = IPC_TEST_CMD_XSTAT,
.blk_cnt = ipc_rx_params.blk_cnt,
.result = ipc_rx_params.result
};
ret = ipc_service_send(ep, &cmd_stat, sizeof(cmd_stat));
if (ret < 0) {
LOG_ERR("RXGET response send failed");
}
break;
}
case IPC_TEST_CMD_TXGET: {
LOG_INF("Command processing: TXGET");
int ret;
struct ipc_test_cmd_xstat cmd_stat = {
.base.cmd = IPC_TEST_CMD_XSTAT,
.blk_cnt = ipc_tx_params.blk_cnt,
.result = ipc_tx_params.result
};
ret = ipc_service_send(ep, &cmd_stat, sizeof(cmd_stat));
if (ret < 0) {
LOG_ERR("TXGET response send failed");
}
break;
}
case IPC_TEST_CMD_XDATA: {
if ((ipc_rx_params.blk_cnt % 1000) == 0) {
/* Logging only every N-th command not to slowdown the transfer too much */
LOG_INF("Command processing: XDATA (left: %u)", ipc_rx_params.blk_cnt);
}
/* Ignore if there is an error */
if (ipc_rx_params.result) {
LOG_ERR("There is error in Rx transfer already");
break;
}
if (len != ipc_rx_params.blk_size + offsetof(struct ipc_test_cmd, data)) {
LOG_ERR("Size mismatch");
ipc_rx_params.result = -EMSGSIZE;
break;
}
if (ipc_rx_params.blk_cnt <= 0) {
LOG_ERR("Data not expected");
ipc_rx_params.result = -EFAULT;
break;
}
/* Check the data */
for (size_t n = 0; n < ipc_rx_params.blk_size; ++n) {
uint8_t expected = (uint8_t)rand_r(&ipc_rx_params.seed);
if (cmd->data[n] != expected) {
LOG_ERR("Data value error at %u", n);
ipc_rx_params.result = -EINVAL;
break;
}
}
ipc_rx_params.blk_cnt -= 1;
break;
}
default:
LOG_ERR("Unhandled command: %u", cmd->cmd);
break;
}
}
static void ep_error(const char *message, void *priv)
{
LOG_ERR("EP error: \"%s\"", message);
}
static int init_ipc(void)
{
int ret;
static struct ipc_ept ep;
/* Store the pointer to the endpoint */
ep_cfg.priv = &ep;
LOG_INF("IPC-sessions test remote started");
ret = ipc_service_open_instance(ipc0_instance);
if ((ret < 0) && (ret != -EALREADY)) {
LOG_ERR("ipc_service_open_instance() failure: %d", ret);
return ret;
}
ret = ipc_service_register_endpoint(ipc0_instance, &ep, &ep_cfg);
if (ret < 0) {
LOG_ERR("ipc_service_register_endpoint() failure: %d", ret);
return ret;
}
do {
k_sem_take(&bound_sem, K_FOREVER);
} while (!ipc0_bounded);
LOG_INF("IPC connection estabilished");
return 0;
}
static void timer_rebond_cb(struct k_timer *timer)
{
(void)timer;
LOG_INF("Setting rebond request");
k_event_set(&ipc_ev_req, IPC_TEST_EV_REBOND);
}
static void timer_reboot_cb(struct k_timer *timer)
{
(void)timer;
LOG_INF("Resetting CPU");
reboot_anyway();
__ASSERT(0, "Still working after reboot request");
}
int main(void)
{
int ret;
k_timer_init(&timer_rebond, timer_rebond_cb, NULL);
k_timer_init(&timer_reboot, timer_reboot_cb, NULL);
ret = init_ipc();
if (ret) {
return ret;
}
while (1) {
uint32_t ev;
ev = k_event_wait(&ipc_ev_req, ~0U, false, K_FOREVER);
k_event_clear(&ipc_ev_req, ev);
if (ev & IPC_TEST_EV_REBOND) {
/* Rebond now */
ret = ipc_service_deregister_endpoint(ep_cfg.priv);
if (ret) {
LOG_ERR("ipc_service_deregister_endpoint() failure: %d", ret);
continue;
}
ipc0_bounded = false;
ret = ipc_service_register_endpoint(ipc0_instance, ep_cfg.priv, &ep_cfg);
if (ret < 0) {
LOG_ERR("ipc_service_register_endpoint() failure: %d", ret);
return ret;
}
do {
k_sem_take(&bound_sem, K_FOREVER);
} while (!ipc0_bounded);
}
if (ev & IPC_TEST_EV_BOND) {
LOG_INF("Bonding endpoint");
/* Bond missing endpoint */
if (!ipc0_bounded) {
ret = ipc_service_register_endpoint(ipc0_instance, ep_cfg.priv,
&ep_cfg);
if (ret < 0) {
LOG_ERR("ipc_service_register_endpoint() failure: %d", ret);
return ret;
}
do {
k_sem_take(&bound_sem, K_FOREVER);
} while (!ipc0_bounded);
}
LOG_INF("Bonding done");
}
if (ev & IPC_TEST_EV_TXTEST) {
LOG_INF("Transfer TX test started");
size_t cmd_size = ipc_tx_params.blk_size + offsetof(struct ipc_test_cmd,
data);
struct ipc_test_cmd *cmd_data = k_malloc(cmd_size);
if (!cmd_data) {
LOG_ERR("Cannot create TX test buffer");
ipc_tx_params.result = -ENOMEM;
continue;
}
LOG_INF("Initial seed: %u", ipc_tx_params.seed);
cmd_data->cmd = IPC_TEST_CMD_XDATA;
for (/* No init */; ipc_tx_params.blk_cnt > 0; --ipc_tx_params.blk_cnt) {
int ret;
if (ipc_tx_params.blk_cnt % 1000 == 0) {
LOG_INF("Sending: %u blocks left", ipc_tx_params.blk_cnt);
}
/* Generate the block data */
for (size_t n = 0; n < ipc_tx_params.blk_size; ++n) {
cmd_data->data[n] = (uint8_t)rand_r(&ipc_tx_params.seed);
}
do {
ret = ipc_service_send(ep_cfg.priv, cmd_data, cmd_size);
} while (ret == -ENOMEM);
if (ret < 0) {
LOG_ERR("Cannot send TX test buffer: %d", ret);
ipc_tx_params.result = -EIO;
continue;
}
}
k_free(cmd_data);
LOG_INF("Transfer TX test finished");
}
}
return 0;
}

65
tests/subsys/ipc/ipc_sessions/src/data_queue.c

@ -0,0 +1,65 @@ @@ -0,0 +1,65 @@
/*
* Copyright (c) 2024 Nordic Semiconductor ASA
*
* SPDX-License-Identifier: Apache-2.0
*/
#include "data_queue.h"
#define DATA_QUEUE_MEMORY_ALIGN sizeof(uint32_t)
struct data_queue_format {
uint32_t header; /* Required by kernel k_queue_append */
size_t size;
uint32_t data[];
};
void data_queue_init(struct data_queue *q, void *mem, size_t bytes)
{
k_heap_init(&q->h, mem, bytes);
k_queue_init(&q->q);
}
int data_queue_put(struct data_queue *q, const void *data, size_t bytes, k_timeout_t timeout)
{
struct data_queue_format *buffer = k_heap_aligned_alloc(
&q->h,
DATA_QUEUE_MEMORY_ALIGN,
bytes + sizeof(struct data_queue_format),
timeout);
if (!buffer) {
return -ENOMEM;
}
buffer->size = bytes;
memcpy(buffer->data, data, bytes);
k_queue_append(&q->q, buffer);
return 0;
}
void *data_queue_get(struct data_queue *q, size_t *size, k_timeout_t timeout)
{
struct data_queue_format *buffer = k_queue_get(&q->q, timeout);
if (!buffer) {
return NULL;
}
if (size) {
*size = buffer->size;
}
return buffer->data;
}
void data_queue_release(struct data_queue *q, void *data)
{
struct data_queue_format *buffer = CONTAINER_OF(data, struct data_queue_format, data);
k_heap_free(&q->h, buffer);
}
int data_queue_is_empty(struct data_queue *q)
{
return k_queue_is_empty(&q->q);
}

25
tests/subsys/ipc/ipc_sessions/src/data_queue.h

@ -0,0 +1,25 @@ @@ -0,0 +1,25 @@
/*
* Copyright (c) 2024 Nordic Semiconductor ASA
*
* SPDX-License-Identifier: Apache-2.0
*/
#ifndef DATA_QUEUE_H
#include <zephyr/kernel.h>
struct data_queue {
struct k_queue q;
struct k_heap h;
};
void data_queue_init(struct data_queue *q, void *mem, size_t bytes);
int data_queue_put(struct data_queue *q, const void *data, size_t bytes, k_timeout_t timeout);
void *data_queue_get(struct data_queue *q, size_t *size, k_timeout_t timeout);
void data_queue_release(struct data_queue *q, void *data);
int data_queue_is_empty(struct data_queue *q);
#endif /* DATA_QUEUE_H */

469
tests/subsys/ipc/ipc_sessions/src/main.c

@ -0,0 +1,469 @@ @@ -0,0 +1,469 @@
/*
* Copyright (c) 2024 Nordic Semiconductor ASA
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <stdlib.h>
#include <zephyr/kernel.h>
#include <zephyr/device.h>
#include <zephyr/ztest.h>
#include <zephyr/ipc/ipc_service.h>
#include <test_commands.h>
#include "data_queue.h"
#include <zephyr/logging/log.h>
LOG_MODULE_REGISTER(ipc_sessions, LOG_LEVEL_INF);
enum test_ipc_events {
TEST_IPC_EVENT_BOUNDED,
TEST_IPC_EVENT_UNBOUNDED,
TEST_IPC_EVENT_ERROR
};
struct test_ipc_event_state {
enum test_ipc_events ev;
struct ipc_ep *ep;
};
static const struct device *ipc0_instance = DEVICE_DT_GET(DT_NODELABEL(ipc0));
static volatile bool ipc0_bounded;
K_MSGQ_DEFINE(ipc_events, sizeof(struct test_ipc_event_state), 16, 4);
static uint32_t data_queue_memory[ROUND_UP(CONFIG_IPC_TEST_MSG_HEAP_SIZE, sizeof(uint32_t))];
static struct data_queue ipc_data_queue;
struct test_cmd_xdata {
struct ipc_test_cmd base;
uint8_t data[CONFIG_IPC_TEST_BLOCK_SIZE];
};
static void (*ep_received_override_cb)(const void *data, size_t len, void *priv);
static void ep_bound(void *priv)
{
int ret;
struct test_ipc_event_state ev = {
.ev = TEST_IPC_EVENT_BOUNDED,
.ep = priv
};
ipc0_bounded = true;
ret = k_msgq_put(&ipc_events, &ev, K_NO_WAIT);
if (ret) {
LOG_ERR("Cannot put event in queue: %d", ret);
}
}
static void ep_unbound(void *priv)
{
int ret;
struct test_ipc_event_state ev = {
.ev = TEST_IPC_EVENT_UNBOUNDED,
.ep = priv
};
ipc0_bounded = false;
ret = k_msgq_put(&ipc_events, &ev, K_NO_WAIT);
if (ret) {
LOG_ERR("Cannot put event in queue: %d", ret);
}
}
static void ep_recv(const void *data, size_t len, void *priv)
{
int ret;
if (ep_received_override_cb) {
ep_received_override_cb(data, len, priv);
} else {
ret = data_queue_put(&ipc_data_queue, data, len, K_NO_WAIT);
__ASSERT(ret >= 0, "Cannot put data into queue: %d", ret);
(void)ret;
}
}
static void ep_error(const char *message, void *priv)
{
int ret;
struct test_ipc_event_state ev = {
.ev = TEST_IPC_EVENT_ERROR,
.ep = priv
};
ret = k_msgq_put(&ipc_events, &ev, K_NO_WAIT);
if (ret) {
LOG_ERR("Cannot put event in queue: %d", ret);
}
}
static struct ipc_ept_cfg ep_cfg = {
.cb = {
.bound = ep_bound,
.unbound = ep_unbound,
.received = ep_recv,
.error = ep_error
},
};
static struct ipc_ept ep;
/**
* @brief Estabilish connection before any test run
*/
void *test_suite_setup(void)
{
int ret;
struct test_ipc_event_state ev;
data_queue_init(&ipc_data_queue, data_queue_memory, sizeof(data_queue_memory));
ret = ipc_service_open_instance(ipc0_instance);
zassert_true((ret >= 0) || ret == -EALREADY, "ipc_service_open_instance() failure: %d",
ret);
/* Store the pointer to the endpoint */
ep_cfg.priv = &ep;
ret = ipc_service_register_endpoint(ipc0_instance, &ep, &ep_cfg);
zassert_true((ret >= 0), "ipc_service_register_endpoint() failure: %d", ret);
do {
ret = k_msgq_get(&ipc_events, &ev, K_MSEC(1000));
zassert_ok(ret, "Cannot bound to the remote interface");
} while (!ipc0_bounded);
return NULL;
}
/**
* @brief Prepare the test structures
*/
void test_suite_before(void *fixture)
{
ep_received_override_cb = NULL;
k_msgq_purge(&ipc_events);
}
static void execute_test_ping_pong(void)
{
int ret;
static const struct ipc_test_cmd cmd_ping = { IPC_TEST_CMD_PING };
struct ipc_test_cmd *cmd_rsp;
size_t cmd_rsp_size;
zassert_not_ok(data_queue_is_empty(&ipc_data_queue),
"IPC data queue contains unexpected data");
/* Sending data */
ret = ipc_service_send(&ep, &cmd_ping, sizeof(cmd_ping));
zassert_equal(ret, sizeof(cmd_ping), "ipc_service_send failed: %d, expected: %u", ret,
sizeof(cmd_ping));
/* Waiting for response */
cmd_rsp = data_queue_get(&ipc_data_queue, &cmd_rsp_size, K_MSEC(1000));
zassert_not_null(cmd_rsp, "No command response on time");
zassert_equal(cmd_rsp_size, sizeof(struct ipc_test_cmd),
"Unexpected response size: %u, expected: %u", cmd_rsp_size,
sizeof(struct ipc_test_cmd));
zassert_equal(cmd_rsp->cmd, IPC_TEST_CMD_PONG,
"Unexpected response cmd value: %u, expected: %u", cmd_rsp->cmd,
IPC_TEST_CMD_PONG);
data_queue_release(&ipc_data_queue, cmd_rsp);
}
ZTEST(ipc_sessions, test_ping_pong)
{
execute_test_ping_pong();
}
ZTEST(ipc_sessions, test_echo)
{
int ret;
static const struct ipc_test_cmd cmd_echo = {
IPC_TEST_CMD_ECHO, {'H', 'e', 'l', 'l', 'o', '!'}
};
struct ipc_test_cmd *cmd_rsp;
size_t cmd_rsp_size;
zassert_not_ok(data_queue_is_empty(&ipc_data_queue),
"IPC data queue contains unexpected data");
/* Sending data */
ret = ipc_service_send(&ep, &cmd_echo, sizeof(cmd_echo));
zassert_equal(ret, sizeof(cmd_echo), "ipc_service_send failed: %d, expected: %u", ret,
sizeof(cmd_echo));
/* Waiting for response */
cmd_rsp = data_queue_get(&ipc_data_queue, &cmd_rsp_size, K_MSEC(1000));
zassert_not_null(cmd_rsp, "No command response on time");
/* Checking response */
zassert_equal(cmd_rsp_size, sizeof(cmd_echo), "Unexpected response size: %u, expected: %u",
cmd_rsp_size, sizeof(cmd_echo));
zassert_equal(cmd_rsp->cmd, IPC_TEST_CMD_ECHO_RSP,
"Unexpected response cmd value: %u, expected: %u", cmd_rsp->cmd,
IPC_TEST_CMD_ECHO_RSP);
zassert_mem_equal(cmd_rsp->data, cmd_echo.data,
sizeof(cmd_echo) - sizeof(struct ipc_test_cmd),
"Unexpected response content");
data_queue_release(&ipc_data_queue, cmd_rsp);
}
ZTEST(ipc_sessions, test_reboot)
{
Z_TEST_SKIP_IFDEF(CONFIG_IPC_TEST_SKIP_UNBOUND);
Z_TEST_SKIP_IFDEF(CONFIG_IPC_TEST_SKIP_CORE_RESET);
int ret;
struct test_ipc_event_state ev;
static const struct ipc_test_cmd_reboot cmd_rebond = { { IPC_TEST_CMD_REBOOT }, 10 };
zassert_not_ok(data_queue_is_empty(&ipc_data_queue),
"IPC data queue contains unexpected data");
/* Sending data */
ret = ipc_service_send(&ep, &cmd_rebond, sizeof(cmd_rebond));
zassert_equal(ret, sizeof(cmd_rebond), "ipc_service_send failed: %d, expected: %u", ret,
sizeof(cmd_rebond));
/* Waiting for IPC to unbound */
ret = k_msgq_get(&ipc_events, &ev, K_MSEC(1000));
zassert_ok(ret, "No IPC unbound event on time");
zassert_equal(ev.ev, TEST_IPC_EVENT_UNBOUNDED, "Unexpected IPC event: %u, expected: %u",
ev.ev, TEST_IPC_EVENT_UNBOUNDED);
zassert_equal_ptr(ev.ep, &ep, "Unexpected endpoint (unbound)");
/* Reconnecting */
ret = ipc_service_register_endpoint(ipc0_instance, &ep, &ep_cfg);
zassert_true((ret >= 0), "ipc_service_register_endpoint() failure: %d", ret);
/* Waiting for bound */
ret = k_msgq_get(&ipc_events, &ev, K_MSEC(1000));
zassert_ok(ret, "No IPC bound event on time");
zassert_equal(ev.ev, TEST_IPC_EVENT_BOUNDED, "Unexpected IPC event: %u, expected: %u",
ev.ev, TEST_IPC_EVENT_UNBOUNDED);
zassert_equal_ptr(ev.ep, &ep, "Unexpected endpoint (bound)");
/* After reconnection - test communication */
execute_test_ping_pong();
}
ZTEST(ipc_sessions, test_rebond)
{
Z_TEST_SKIP_IFDEF(CONFIG_IPC_TEST_SKIP_UNBOUND);
int ret;
struct test_ipc_event_state ev;
static const struct ipc_test_cmd_reboot cmd_rebond = { { IPC_TEST_CMD_REBOND }, 10 };
zassert_not_ok(data_queue_is_empty(&ipc_data_queue),
"IPC data queue contains unexpected data");
/* Sending data */
ret = ipc_service_send(&ep, &cmd_rebond, sizeof(cmd_rebond));
zassert_equal(ret, sizeof(cmd_rebond), "ipc_service_send failed: %d, expected: %u", ret,
sizeof(cmd_rebond));
/* Waiting for IPC to unbound */
ret = k_msgq_get(&ipc_events, &ev, K_MSEC(1000));
zassert_ok(ret, "No IPC unbound event on time");
zassert_equal(ev.ev, TEST_IPC_EVENT_UNBOUNDED, "Unexpected IPC event: %u, expected: %u",
ev.ev, TEST_IPC_EVENT_UNBOUNDED);
zassert_equal_ptr(ev.ep, &ep, "Unexpected endpoint (unbound)");
/* Reconnecting */
ret = ipc_service_register_endpoint(ipc0_instance, &ep, &ep_cfg);
zassert_true((ret >= 0), "ipc_service_register_endpoint() failure: %d", ret);
/* Waiting for bound */
ret = k_msgq_get(&ipc_events, &ev, K_MSEC(1000));
zassert_ok(ret, "No IPC bound event on time");
zassert_equal(ev.ev, TEST_IPC_EVENT_BOUNDED, "Unexpected IPC event: %u, expected: %u",
ev.ev, TEST_IPC_EVENT_UNBOUNDED);
zassert_equal_ptr(ev.ep, &ep, "Unexpected endpoint (bound)");
/* After reconnection - test communication */
execute_test_ping_pong();
}
ZTEST(ipc_sessions, test_local_rebond)
{
Z_TEST_SKIP_IFDEF(CONFIG_IPC_TEST_SKIP_UNBOUND);
int ret;
struct test_ipc_event_state ev;
zassert_not_ok(data_queue_is_empty(&ipc_data_queue),
"IPC data queue contains unexpected data");
/* Rebond locally */
ret = ipc_service_deregister_endpoint(ep_cfg.priv);
zassert_ok(ret, "ipc_service_deregister_endpoint() failure: %d", ret);
ipc0_bounded = false;
ret = ipc_service_register_endpoint(ipc0_instance, &ep, &ep_cfg);
zassert_true((ret >= 0), "ipc_service_register_endpoint() failure: %d", ret);
do {
ret = k_msgq_get(&ipc_events, &ev, K_MSEC(1000));
zassert_ok(ret, "Cannot bound to the remote interface");
} while (!ipc0_bounded);
/* After reconnection - test communication */
execute_test_ping_pong();
}
ZTEST(ipc_sessions, test_tx_long)
{
#define SEED_TXSTART_VALUE 1
int ret;
static const struct ipc_test_cmd_xstart cmd_rxstart = {
.base = { .cmd = IPC_TEST_CMD_RXSTART },
.blk_size = CONFIG_IPC_TEST_BLOCK_SIZE,
.blk_cnt = CONFIG_IPC_TEST_BLOCK_CNT,
.seed = SEED_TXSTART_VALUE };
static const struct ipc_test_cmd cmd_rxget = { IPC_TEST_CMD_RXGET };
struct test_cmd_xdata cmd_txdata = { .base = { .cmd = IPC_TEST_CMD_XDATA } };
unsigned int seed = SEED_TXSTART_VALUE;
struct ipc_test_cmd_xstat *cmd_rxstat;
size_t cmd_rsp_size;
zassert_not_ok(data_queue_is_empty(&ipc_data_queue),
"IPC data queue contains unexpected data");
/* Sending command for the remote to start receiving the data */
ret = ipc_service_send(&ep, &cmd_rxstart, sizeof(cmd_rxstart));
zassert_equal(ret, sizeof(cmd_rxstart), "ipc_service_send failed: %d, expected: %u", ret,
sizeof(cmd_rxstart));
/* Check current status */
ret = ipc_service_send(&ep, &cmd_rxget, sizeof(cmd_rxget));
zassert_equal(ret, sizeof(cmd_rxget), "ipc_service_send failed: %d, expected: %u", ret,
sizeof(cmd_rxget));
cmd_rxstat = data_queue_get(&ipc_data_queue, &cmd_rsp_size, K_MSEC(1000));
zassert_not_null(cmd_rxstat, "No command response on time");
zassert_equal(cmd_rsp_size, sizeof(*cmd_rxstat),
"Unexpected response size: %u, expected: %u", cmd_rsp_size,
sizeof(cmd_rxstat));
zassert_equal(cmd_rxstat->base.cmd, IPC_TEST_CMD_XSTAT,
"Unexpected command in response: %u", cmd_rxstat->base.cmd);
zassert_ok(cmd_rxstat->result, "RX result not ok: %d", cmd_rxstat->result);
zassert_equal(cmd_rxstat->blk_cnt, cmd_rxstart.blk_cnt,
"RX blk_cnt in status does not match start command: %u vs %u",
cmd_rxstat->blk_cnt, cmd_rxstart.blk_cnt);
data_queue_release(&ipc_data_queue, cmd_rxstat);
/* Sending data */
for (size_t blk = 0; blk < cmd_rxstart.blk_cnt; ++blk) {
for (size_t n = 0; n < cmd_rxstart.blk_size; ++n) {
cmd_txdata.data[n] = (uint8_t)rand_r(&seed);
}
do {
ret = ipc_service_send(&ep, &cmd_txdata, sizeof(cmd_txdata));
} while (ret == -ENOMEM);
if ((blk % 1000) == 0) {
LOG_INF("Transfer number: %u of %u", blk, cmd_rxstart.blk_cnt);
}
zassert_equal(ret, sizeof(cmd_txdata), "ipc_service_send failed: %d, expected: %u",
ret, sizeof(cmd_txdata));
}
/* Check current status */
ret = ipc_service_send(&ep, &cmd_rxget, sizeof(cmd_rxget));
zassert_equal(ret, sizeof(cmd_rxget), "ipc_service_send failed: %d, expected: %u", ret,
sizeof(cmd_rxget));
cmd_rxstat = data_queue_get(&ipc_data_queue, &cmd_rsp_size, K_MSEC(1000));
zassert_not_null(cmd_rxstat, "No command response on time");
zassert_equal(cmd_rsp_size, sizeof(*cmd_rxstat),
"Unexpected response size: %u, expected: %u", cmd_rsp_size,
sizeof(cmd_rxstat));
zassert_equal(cmd_rxstat->base.cmd, IPC_TEST_CMD_XSTAT,
"Unexpected command in response: %u", cmd_rxstat->base.cmd);
zassert_ok(cmd_rxstat->result, "RX result not ok: %d", cmd_rxstat->result);
zassert_equal(cmd_rxstat->blk_cnt, 0,
"RX blk_cnt in status does not match start command: %u vs %u",
cmd_rxstat->blk_cnt, 0);
data_queue_release(&ipc_data_queue, cmd_rxstat);
}
static struct {
unsigned int seed;
size_t blk_left;
} test_rx_long_data;
K_SEM_DEFINE(test_rx_long_sem, 0, 1);
static void test_rx_long_rec_cb(const void *data, size_t len, void *priv)
{
const struct test_cmd_xdata *cmd_rxdata = data;
zassert_true(test_rx_long_data.blk_left > 0, "No data left to interpret");
zassert_equal(len, sizeof(*cmd_rxdata),
"Unexpected response size: %u, expected: %u", len, sizeof(*cmd_rxdata));
zassert_equal(cmd_rxdata->base.cmd, IPC_TEST_CMD_XDATA,
"Unexpected command in response: %u", cmd_rxdata->base.cmd);
for (size_t n = 0; n < CONFIG_IPC_TEST_BLOCK_SIZE; ++n) {
uint8_t expected = (uint8_t)rand_r(&test_rx_long_data.seed);
zassert_equal(cmd_rxdata->data[n], expected,
"Data mismatch at %u while %u blocks left", n,
test_rx_long_data.blk_left);
}
if (test_rx_long_data.blk_left % 1000 == 0) {
LOG_INF("Receivng left: %u", test_rx_long_data.blk_left);
}
test_rx_long_data.blk_left -= 1;
if (test_rx_long_data.blk_left <= 0) {
LOG_INF("Interpretation marked finished");
ep_received_override_cb = NULL;
k_sem_give(&test_rx_long_sem);
}
}
ZTEST(ipc_sessions, test_rx_long)
{
#define SEED_RXSTART_VALUE 1
int ret;
static const struct ipc_test_cmd_xstart cmd_txstart = {
.base = { .cmd = IPC_TEST_CMD_TXSTART },
.blk_size = CONFIG_IPC_TEST_BLOCK_SIZE,
.blk_cnt = CONFIG_IPC_TEST_BLOCK_CNT,
.seed = SEED_RXSTART_VALUE };
static const struct ipc_test_cmd cmd_txget = { IPC_TEST_CMD_TXGET };
struct ipc_test_cmd_xstat *cmd_txstat;
size_t cmd_rsp_size;
zassert_not_ok(data_queue_is_empty(&ipc_data_queue),
"IPC data queue contains unexpected data");
/* Configuring the callback to interpret the incoming data */
test_rx_long_data.seed = SEED_RXSTART_VALUE;
test_rx_long_data.blk_left = cmd_txstart.blk_cnt;
ep_received_override_cb = test_rx_long_rec_cb;
/* Sending command for the remote to start sending the data */
ret = ipc_service_send(&ep, &cmd_txstart, sizeof(cmd_txstart));
zassert_equal(ret, sizeof(cmd_txstart), "ipc_service_send failed: %d, expected: %u", ret,
sizeof(cmd_txstart));
/* Waiting for all the data */
ret = k_sem_take(&test_rx_long_sem, K_SECONDS(30));
LOG_INF("Interpretation finished");
zassert_ok(ret, "Incoming packet interpretation timeout");
zassert_is_null(ep_received_override_cb, "Seems like interpretation callback failed");
/* Check current status */
ret = ipc_service_send(&ep, &cmd_txget, sizeof(cmd_txget));
zassert_equal(ret, sizeof(cmd_txget), "ipc_service_send failed: %d, expected: %u", ret,
sizeof(cmd_txget));
cmd_txstat = data_queue_get(&ipc_data_queue, &cmd_rsp_size, K_MSEC(1000));
zassert_not_null(cmd_txstat, "No command response on time");
zassert_equal(cmd_rsp_size, sizeof(*cmd_txstat),
"Unexpected response size: %u, expected: %u", cmd_rsp_size,
sizeof(cmd_txstat));
zassert_equal(cmd_txstat->base.cmd, IPC_TEST_CMD_XSTAT,
"Unexpected command in response: %u", cmd_txstat->base.cmd);
zassert_ok(cmd_txstat->result, "RX result not ok: %d", cmd_txstat->result);
zassert_equal(cmd_txstat->blk_cnt, 0,
"RX blk_cnt in status does not match start command: %u vs %u",
cmd_txstat->blk_cnt, 0);
data_queue_release(&ipc_data_queue, cmd_txstat);
}
ZTEST_SUITE(
/* suite_name */ ipc_sessions,
/* ztest_suite_predicate_t */ NULL,
/* ztest_suite_setup_t */ test_suite_setup,
/* ztest_suite_before_t */ test_suite_before,
/* ztest_suite_after_t */ NULL,
/* ztest_suite_teardown_t */ NULL
);

26
tests/subsys/ipc/ipc_sessions/sysbuild.cmake

@ -0,0 +1,26 @@ @@ -0,0 +1,26 @@
#
# Copyright (c) 2024 Nordic Semiconductor ASA
#
# SPDX-License-Identifier: Apache-2.0
#
if("${SB_CONFIG_REMOTE_BOARD}" STREQUAL "")
message(FATAL_ERROR "REMOTE_BOARD must be set to a valid board name")
endif()
# Add remote project
ExternalZephyrProject_Add(
APPLICATION remote
SOURCE_DIR ${APP_DIR}/remote
BOARD ${SB_CONFIG_REMOTE_BOARD}
BOARD_REVISION ${BOARD_REVISION}
)
set_property(GLOBAL APPEND PROPERTY PM_DOMAINS CPUNET)
set_property(GLOBAL APPEND PROPERTY PM_CPUNET_IMAGES remote)
set_property(GLOBAL PROPERTY DOMAIN_APP_CPUNET remote)
set(CPUNET_PM_DOMAIN_DYNAMIC_PARTITION remote CACHE INTERNAL "")
# Add a dependency so that the remote sample will be built and flashed first
sysbuild_add_dependencies(CONFIGURE ${DEFAULT_IMAGE} remote)
# Add dependency so that the remote image is flashed first.
sysbuild_add_dependencies(FLASH ${DEFAULT_IMAGE} remote)

4
tests/subsys/ipc/ipc_sessions/sysbuild_cpuppr.conf

@ -0,0 +1,4 @@ @@ -0,0 +1,4 @@
# Copyright (c) 2024 Nordic Semiconductor ASA
# SPDX-License-Identifier: Apache-2.0
SB_CONFIG_REMOTE_BOARD="nrf54h20dk/nrf54h20/cpuppr"

50
tests/subsys/ipc/ipc_sessions/testcase.yaml

@ -0,0 +1,50 @@ @@ -0,0 +1,50 @@
sample:
name: IPC Service integration test
description: IPC Service integration and efficiency test
common:
sysbuild: true
tags: ipc ipc_sessions
harness: ztest
tests:
sample.ipc.ipc_sessions.nrf5340dk:
platform_allow:
- nrf5340dk/nrf5340/cpuapp
integration_platforms:
- nrf5340dk/nrf5340/cpuapp
sample.ipc.ipc_sessions.nrf54h20dk_cpuapp_cpurad:
platform_allow:
- nrf54h20dk/nrf54h20/cpuapp
integration_platforms:
- nrf54h20dk/nrf54h20/cpuapp
extra_args:
- CONFIG_IPC_TEST_SKIP_CORE_RESET=y
sample.ipc.ipc_sessions.nrf54h20dk_cpuapp_cpuppr:
platform_allow:
- nrf54h20dk/nrf54h20/cpuapp
integration_platforms:
- nrf54h20dk/nrf54h20/cpuapp
extra_args:
- FILE_SUFFIX=cpuppr
- ipc_sessions_SNIPPET=nordic-ppr
sample.ipc.ipc_sessions.nrf54h20dk_cpuapp_no_unbound_cpuppr:
platform_allow:
- nrf54h20dk/nrf54h20/cpuapp
integration_platforms:
- nrf54h20dk/nrf54h20/cpuapp
extra_args:
- FILE_SUFFIX=cpuppr
- ipc_sessions_SNIPPET=nordic-ppr
- CONFIG_IPC_TEST_SKIP_UNBOUND=y
- CONFIG_IPC_SERVICE_BACKEND_ICMSG_V1=y
sample.ipc.ipc_sessions.nrf54h20dk_cpuapp_cpuppr_no_unbound:
platform_allow:
- nrf54h20dk/nrf54h20/cpuapp
integration_platforms:
- nrf54h20dk/nrf54h20/cpuapp
extra_args:
- FILE_SUFFIX=cpuppr
- ipc_sessions_SNIPPET=nordic-ppr
- CONFIG_IPC_TEST_SKIP_UNBOUND=y
- remote_CONFIG_IPC_SERVICE_BACKEND_ICMSG_V1=y

12
tests/subsys/ipc/pbuf/src/main.c

@ -48,7 +48,7 @@ ZTEST(test_pbuf, test_rw) @@ -48,7 +48,7 @@ ZTEST(test_pbuf, test_rw)
* order to avoid clang complains about memory_area not being constant
* expression.
*/
static PBUF_MAYBE_CONST struct pbuf_cfg cfg = PBUF_CFG_INIT(memory_area, MEM_AREA_SZ, 0);
static PBUF_MAYBE_CONST struct pbuf_cfg cfg = PBUF_CFG_INIT(memory_area, MEM_AREA_SZ, 0, 0);
static struct pbuf pb = {
.cfg = &cfg,
@ -115,9 +115,11 @@ ZTEST(test_pbuf, test_retcodes) @@ -115,9 +115,11 @@ ZTEST(test_pbuf, test_retcodes)
* order to avoid clang complains about memory_area not being constant
* expression.
*/
static PBUF_MAYBE_CONST struct pbuf_cfg cfg0 = PBUF_CFG_INIT(memory_area, MEM_AREA_SZ, 32);
static PBUF_MAYBE_CONST struct pbuf_cfg cfg1 = PBUF_CFG_INIT(memory_area, MEM_AREA_SZ, 0);
static PBUF_MAYBE_CONST struct pbuf_cfg cfg2 = PBUF_CFG_INIT(memory_area, 20, 4);
static PBUF_MAYBE_CONST struct pbuf_cfg cfg0 = PBUF_CFG_INIT(memory_area, MEM_AREA_SZ,
32, 0);
static PBUF_MAYBE_CONST struct pbuf_cfg cfg1 = PBUF_CFG_INIT(memory_area, MEM_AREA_SZ,
0, 0);
static PBUF_MAYBE_CONST struct pbuf_cfg cfg2 = PBUF_CFG_INIT(memory_area, 20, 4, 0);
static struct pbuf pb0 = {
.cfg = &cfg0,
@ -268,7 +270,7 @@ ZTEST(test_pbuf, test_stress) @@ -268,7 +270,7 @@ ZTEST(test_pbuf, test_stress)
* order to avoid clang complains about buffer not being constant
* expression.
*/
static PBUF_MAYBE_CONST struct pbuf_cfg cfg = PBUF_CFG_INIT(buffer, MEM_AREA_SZ, 4);
static PBUF_MAYBE_CONST struct pbuf_cfg cfg = PBUF_CFG_INIT(buffer, MEM_AREA_SZ, 4, 0);
static struct pbuf pb = {
.cfg = &cfg,

Loading…
Cancel
Save