demo工程暂存 优化菜单界面UI和功能

This commit is contained in:
2024-04-29 16:32:24 +08:00
commit 330cd25cf1
3310 changed files with 2163318 additions and 0 deletions

View File

@ -0,0 +1,113 @@
/*
* Copyright (c) 2014, Mentor Graphics Corporation
* Copyright 2019 NXP
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of Mentor Graphics Corporation nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/**************************************************************************
* FILE NAME
*
* llist.c
*
* COMPONENT
*
* OpenAMP stack.
*
* DESCRIPTION
*
* Source file for basic linked list service.
*
**************************************************************************/
#include "llist.h"
#define LIST_NULL ((void *)0)
/*!
* add_to_list
*
* Places new element at the start of the list.
*
* @param head - list head
* @param node - new element to add
*
*/
void add_to_list(struct llist **head, struct llist *node)
{
if (node == LIST_NULL)
{
return;
}
if (*head != LIST_NULL)
{
/* Place the new element at the start of list. */
node->next = *head;
node->prev = LIST_NULL;
(*head)->prev = node;
*head = node;
}
else
{
/* List is empty - assign new element to list head. */
*head = node;
(*head)->next = LIST_NULL;
(*head)->prev = LIST_NULL;
}
}
/*!
* remove_from_list
*
* Removes the given element from the list.
*
* @param head - list head
* @param element - element to remove from list
*
*/
void remove_from_list(struct llist **head, struct llist *node)
{
if ((*head == LIST_NULL) || (node == LIST_NULL))
{
return;
}
if (node == *head)
{
/* First element has to be removed. */
*head = (*head)->next;
}
else if (node->next == LIST_NULL)
{
/* Last element has to be removed. */
node->prev->next = node->next;
}
else
{
/* Intermediate element has to be removed. */
node->prev->next = node->next;
node->next->prev = node->prev;
}
}

View File

@ -0,0 +1,62 @@
/*
* Copyright (c) 2014, Mentor Graphics Corporation
* Copyright 2019 NXP
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of Mentor Graphics Corporation nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/**************************************************************************
* FILE NAME
*
* llist.h
*
* COMPONENT
*
* OpenAMP stack.
*
* DESCRIPTION
*
* Header file for linked list service.
*
**************************************************************************/
#ifndef LLIST_H_
#define LLIST_H_
#include <stdint.h>
struct llist
{
void *data;
uint32_t attr;
struct llist *next;
struct llist *prev;
};
void add_to_list(struct llist **head, struct llist *node);
void remove_from_list(struct llist **head, struct llist *node);
#endif /* LLIST_H_ */

View File

@ -0,0 +1,59 @@
/*
* Copyright 2019-2020 NXP
* All rights reserved.
*
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef RPMSG_PLATFORM_H_
#define RPMSG_PLATFORM_H_
#include <stdint.h>
/*
* No need to align the VRING as defined in Linux because RT500 is not intended
* to run the Linux
*/
#ifndef VRING_ALIGN
#define VRING_ALIGN (0x10U)
#endif
/* contains pool of descriptos and two circular buffers */
#ifndef VRING_SIZE
#define VRING_SIZE (0x80UL)
#endif
/* size of shared memory + 2*VRING size */
#define RL_VRING_OVERHEAD (2UL * VRING_SIZE)
#define RL_GET_VQ_ID(link_id, queue_id) (((queue_id)&0x1U) | (((link_id) << 1U) & 0xFFFFFFFEU))
#define RL_GET_LINK_ID(id) (((id)&0xFFFFFFFEU) >> 1U)
#define RL_GET_Q_ID(id) ((id)&0x1U)
#define RL_PLATFORM_IMXRT500_LINK_ID (0U)
#define RL_PLATFORM_HIGHEST_LINK_ID (0U)
/* platform interrupt related functions */
int32_t platform_init_interrupt(uint32_t vector_id, void *isr_data);
int32_t platform_deinit_interrupt(uint32_t vector_id);
int32_t platform_interrupt_enable(uint32_t vector_id);
int32_t platform_interrupt_disable(uint32_t vector_id);
int32_t platform_in_isr(void);
void platform_notify(uint32_t vector_id);
/* platform low-level time-delay (busy loop) */
void platform_time_delay(uint32_t num_msec);
/* platform memory functions */
void platform_map_mem_region(uint32_t vrt_addr, uint32_t phy_addr, uint32_t size, uint32_t flags);
void platform_cache_all_flush_invalidate(void);
void platform_cache_disable(void);
uint32_t platform_vatopa(void *addr);
void *platform_patova(uint32_t addr);
/* platform init/deinit */
int32_t platform_init(void);
int32_t platform_deinit(void);
int32_t platform_reinit(void);
#endif /* RPMSG_PLATFORM_H_ */

View File

@ -0,0 +1,58 @@
/*
* Copyright 2019-2020 NXP
* All rights reserved.
*
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef RPMSG_PLATFORM_H_
#define RPMSG_PLATFORM_H_
#include <stdint.h>
/*
* No need to align the VRING as defined in Linux because RT500 is not intended
* to run the Linux
*/
#ifndef VRING_ALIGN
#define VRING_ALIGN (0x10U)
#endif
/* contains pool of descriptos and two circular buffers */
#ifndef VRING_SIZE
#define VRING_SIZE (0x400UL)
#endif
/* size of shared memory + 2*VRING size */
#define RL_VRING_OVERHEAD (2UL * VRING_SIZE)
#define RL_GET_VQ_ID(link_id, queue_id) (((queue_id)&0x1U) | (((link_id) << 1U) & 0xFFFFFFFEU))
#define RL_GET_LINK_ID(id) (((id)&0xFFFFFFFEU) >> 1U)
#define RL_GET_Q_ID(id) ((id)&0x1U)
#define RL_PLATFORM_IMXRT500_LINK_ID (0U)
#define RL_PLATFORM_HIGHEST_LINK_ID (0U)
/* platform interrupt related functions */
int32_t platform_init_interrupt(uint32_t vector_id, void *isr_data);
int32_t platform_deinit_interrupt(uint32_t vector_id);
int32_t platform_interrupt_enable(uint32_t vector_id);
int32_t platform_interrupt_disable(uint32_t vector_id);
int32_t platform_in_isr(void);
void platform_notify(uint32_t vector_id);
/* platform low-level time-delay (busy loop) */
void platform_time_delay(uint32_t num_msec);
/* platform memory functions */
void platform_map_mem_region(uint32_t vrt_addr, uint32_t phy_addr, uint32_t size, uint32_t flags);
void platform_cache_all_flush_invalidate(void);
void platform_cache_disable(void);
uint32_t platform_vatopa(void *addr);
void *platform_patova(uint32_t addr);
/* platform init/deinit */
int32_t platform_init(void);
int32_t platform_deinit(void);
#endif /* RPMSG_PLATFORM_H_ */

View File

@ -0,0 +1,58 @@
/*
* Copyright 2019-2020 NXP
* All rights reserved.
*
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef RPMSG_PLATFORM_H_
#define RPMSG_PLATFORM_H_
#include <stdint.h>
/*
* No need to align the VRING as defined in Linux because RT600 is not intended
* to run the Linux
*/
#ifndef VRING_ALIGN
#define VRING_ALIGN (0x10U)
#endif
/* contains pool of descriptos and two circular buffers */
#ifndef VRING_SIZE
#define VRING_SIZE (0x400UL)
#endif
/* size of shared memory + 2*VRING size */
#define RL_VRING_OVERHEAD (2UL * VRING_SIZE)
#define RL_GET_VQ_ID(link_id, queue_id) (((queue_id)&0x1U) | (((link_id) << 1U) & 0xFFFFFFFEU))
#define RL_GET_LINK_ID(id) (((id)&0xFFFFFFFEU) >> 1U)
#define RL_GET_Q_ID(id) ((id)&0x1U)
#define RL_PLATFORM_IMXRT600_LINK_ID (0U)
#define RL_PLATFORM_HIGHEST_LINK_ID (0U)
/* platform interrupt related functions */
int32_t platform_init_interrupt(uint32_t vector_id, void *isr_data);
int32_t platform_deinit_interrupt(uint32_t vector_id);
int32_t platform_interrupt_enable(uint32_t vector_id);
int32_t platform_interrupt_disable(uint32_t vector_id);
int32_t platform_in_isr(void);
void platform_notify(uint32_t vector_id);
/* platform low-level time-delay (busy loop) */
void platform_time_delay(uint32_t num_msec);
/* platform memory functions */
void platform_map_mem_region(uint32_t vrt_addr, uint32_t phy_addr, uint32_t size, uint32_t flags);
void platform_cache_all_flush_invalidate(void);
void platform_cache_disable(void);
uint32_t platform_vatopa(void *addr);
void *platform_patova(uint32_t addr);
/* platform init/deinit */
int32_t platform_init(void);
int32_t platform_deinit(void);
#endif /* RPMSG_PLATFORM_H_ */

View File

@ -0,0 +1,92 @@
#ifndef _RPMSG_H
#define _RPMSG_H
#include <stdint.h>
#include "rpmsg_lite.h"
/* Exported macro ------------------------------------------------------------*/
#define RPMSG_MSG_TYPE_MASTER_READY 0x00000001
#define RPMSG_MSG_TYPE_REMOTE_READY 0x00000002
#define RPMSG_MSG_TYPE_SYNC_INVOKE 0x00000003
#define RPMSG_MSG_TYPE_SYNC_RETURN 0x00000004
#define RPMSG_MSG_TYPE_ASYNC_MSG 0x00000005
#define RPMSG_SYNC_FUNC_MSG(type, sub_type) (((type)<<16) | (sub_type))
#define RPSMG_SYNC_FUNC_TYPE(func_id) ((func_id)>>16)
#define RPSMG_SYNC_FUNC_SUB_TYPE(func_id) ((func_id) & 0xffff)
#define RPMSG_SYNC_FUNC_TYPE_TEST 0x0001
#define RPMSG_SYNC_FUNC_TYPE_DSP 0x0002
#define RPMSG_SYNC_FUNC_TYPE_AUDIO 0x0003
#define RPMSG_SYNC_FUNC_TYPE_LVGL 0x0004
#define RPMSG_SYNC_FUNC_TYPE_FREETYPE 0x0005
#define RPMSG_SYNC_FUNC_SUM RPMSG_SYNC_FUNC_MSG(RPMSG_SYNC_FUNC_TYPE_TEST, 0x0001)
/** @addtogroup rpmsg syncronize invoke message definations
* @{
*/
struct rpmsg_sync_msg_sum_t {
uint32_t x;
uint32_t y;
};
/**
* @}
*/
/** @addtogroup rpmsg asyncronize message definations
* @{
*/
struct rpmsg_async_msg_t {
uint32_t msg_id;
union {
void *param;
uint32_t dsp_req_frq;
} p;
};
/**
* @}
*/
struct rpmsg_msg_t {
uint32_t msg_type;
union {
struct {
uint32_t func_id;
void *param;
} sync_func;
struct {
uint32_t status;
uint32_t result;
} sync_ret;
struct rpmsg_async_msg_t async_msg;
} p;
};
/*-----------------------------------------------------------------------------------*/
/* Exported functions ---------------------------------------------------------------*/
/*-----------------------------------------------------------------------------------*/
uint32_t rpmsg_sync_invoke(struct rpmsg_lite_instance *rpmsg, uint32_t func_id, void *param, uint32_t *ret);
uint32_t rpmsg_send_async(struct rpmsg_lite_instance *rpmsg, struct rpmsg_async_msg_t *async_msg);
uint32_t rpmsg_send_sync_ret(struct rpmsg_lite_instance *rpmsg, uint32_t status, uint32_t ret);
struct rpmsg_lite_instance *rpmsg_master_init(void (*recv)(struct rpmsg_lite_instance *rpmsg, struct rpmsg_msg_t *msg));
struct rpmsg_lite_instance *rpmsg_remote_init(void (*recv)(struct rpmsg_lite_instance *rpmsg, struct rpmsg_msg_t *msg));
void rpmsg_wait_master_ready(struct rpmsg_lite_instance *rpmsg);
void rpmsg_destroy(struct rpmsg_lite_instance *rpmsg);
uint32_t rpmsg_recv_msg(struct rpmsg_lite_instance *rpmsg, struct rpmsg_msg_t **msg, uint32_t *msg_len);
struct rpmsg_lite_instance *rpmsg_get_remote_instance(void);
struct rpmsg_lite_instance *rpmsg_get_master_instance(void);
void rpmsg_remote_recover(void);
#endif // _RPMSG_H

View File

@ -0,0 +1,114 @@
/*
* Copyright (c) 2014, Mentor Graphics Corporation
* Copyright (c) 2016 Freescale Semiconductor, Inc.
* Copyright 2016 NXP
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/**************************************************************************
* FILE NAME
*
* rpmsg_compiler.h
*
* DESCRIPTION
*
* This file defines compiler-specific macros.
*
***************************************************************************/
#ifndef RPMSG_COMPILER_H_
#define RPMSG_COMPILER_H_
/* IAR ARM build tools */
#if defined(__ICCARM__)
#include <intrinsics.h>
#define MEM_BARRIER() __DSB()
#ifndef RL_PACKED_BEGIN
#define RL_PACKED_BEGIN __packed
#endif
#ifndef RL_PACKED_END
#define RL_PACKED_END
#endif
/* ARM GCC */
#elif defined(__CC_ARM) || (defined(__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050))
#if (defined(__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050))
#include <arm_compat.h>
#endif
#define MEM_BARRIER() __schedule_barrier()
#ifndef RL_PACKED_BEGIN
#define RL_PACKED_BEGIN _Pragma("pack(1U)")
#endif
#ifndef RL_PACKED_END
#define RL_PACKED_END _Pragma("pack()")
#endif
/* XCC HiFi4 */
#elif defined(__XCC__)
/*
* The XCC HiFi4 compiler is compatible with GNU compiler, with restrictions.
* For ARM __schedule_barrier, there's no identical intrinsic in HiFi4.
* A complete synchronization barrier would require initialize and wait ops.
* Here use NOP instead, similar to ARM __nop.
*/
#define MEM_BARRIER() __asm__ __volatile__("nop" : : : "memory")
#ifndef RL_PACKED_BEGIN
#define RL_PACKED_BEGIN
#endif
#ifndef RL_PACKED_END
#define RL_PACKED_END __attribute__((__packed__))
#endif
/* GNUC */
#elif defined(__GNUC__)
#define MEM_BARRIER() __asm__ volatile("dsb" : : : "memory")
#ifndef RL_PACKED_BEGIN
#define RL_PACKED_BEGIN
#endif
#ifndef RL_PACKED_END
#define RL_PACKED_END __attribute__((__packed__))
#endif
#else
/* There is no default definition here to avoid wrong structures packing in case of not supported compiler */
#error Please implement the structure packing macros for your compiler here!
#endif
#endif /* RPMSG_COMPILER_H_ */

View File

@ -0,0 +1,169 @@
/*
* Copyright (c) 2014, Mentor Graphics Corporation
* Copyright (c) 2015 Xilinx, Inc.
* Copyright (c) 2016 Freescale Semiconductor, Inc.
* Copyright 2016-2021 NXP
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef RPMSG_DEFAULT_CONFIG_H_
#define RPMSG_DEFAULT_CONFIG_H_
#define RL_USE_CUSTOM_CONFIG (1)
#if RL_USE_CUSTOM_CONFIG
#include "rpmsg_config.h"
#endif
/*!
* @addtogroup config
* @{
* @file
*/
//! @name Configuration options
//@{
//! @def RL_MS_PER_INTERVAL
//!
//! Delay in milliseconds used in non-blocking API functions for polling.
//! The default value is 1.
#ifndef RL_MS_PER_INTERVAL
#define RL_MS_PER_INTERVAL (1)
#endif
//! @def RL_BUFFER_PAYLOAD_SIZE
//!
//! Size of the buffer payload, it must be equal to (240, 496, 1008, ...)
//! [2^n - 16]. Ensure the same value is defined on both sides of rpmsg
//! communication. The default value is 496U.
#ifndef RL_BUFFER_PAYLOAD_SIZE
#define RL_BUFFER_PAYLOAD_SIZE (496U)
#endif
//! @def RL_BUFFER_COUNT
//!
//! Number of the buffers, it must be power of two (2, 4, ...).
//! The default value is 2U.
//! Note this value defines the buffer count for one direction of the rpmsg
//! communication only, i.e. if the default value of 2 is used
//! in rpmsg_config.h files for the master and the remote side, 4 buffers
//! in total are created in the shared memory.
#ifndef RL_BUFFER_COUNT
#define RL_BUFFER_COUNT (2U)
#endif
//! @def RL_API_HAS_ZEROCOPY
//!
//! Zero-copy API functions enabled/disabled.
//! The default value is 1 (enabled).
#ifndef RL_API_HAS_ZEROCOPY
#define RL_API_HAS_ZEROCOPY (1)
#endif
//! @def RL_USE_STATIC_API
//!
//! Static API functions (no dynamic allocation) enabled/disabled.
//! The default value is 0 (static API disabled).
#ifndef RL_USE_STATIC_API
#define RL_USE_STATIC_API (0)
#endif
//! @def RL_CLEAR_USED_BUFFERS
//!
//! Clearing used buffers before returning back to the pool of free buffers
//! enabled/disabled.
//! The default value is 0 (disabled).
#ifndef RL_CLEAR_USED_BUFFERS
#define RL_CLEAR_USED_BUFFERS (0)
#endif
//! @def RL_USE_MCMGR_IPC_ISR_HANDLER
//!
//! When enabled IPC interrupts are managed by the Multicore Manager (IPC
//! interrupts router), when disabled RPMsg-Lite manages IPC interrupts
//! by itself.
//! The default value is 0 (no MCMGR IPC ISR handler used).
#ifndef RL_USE_MCMGR_IPC_ISR_HANDLER
#define RL_USE_MCMGR_IPC_ISR_HANDLER (0)
#endif
//! @def RL_USE_ENVIRONMENT_CONTEXT
//!
//! When enabled the environment layer uses its own context.
//! Added for QNX port mainly, but can be used if required.
//! The default value is 0 (no context, saves some RAM).
#ifndef RL_USE_ENVIRONMENT_CONTEXT
#define RL_USE_ENVIRONMENT_CONTEXT (0)
#endif
//! @def RL_DEBUG_CHECK_BUFFERS
//!
//! Do not use in RPMsg-Lite to Linux configuration
#ifndef RL_DEBUG_CHECK_BUFFERS
#define RL_DEBUG_CHECK_BUFFERS (0)
#endif
//! @def RL_ALLOW_CONSUMED_BUFFERS_NOTIFICATION
//!
//! When enabled the opposite side is notified each time received buffers
//! are consumed and put into the queue of available buffers.
//! Enable this option in RPMsg-Lite to Linux configuration to allow unblocking
//! of the Linux blocking send.
//! The default value is 0 (RPMsg-Lite to RPMsg-Lite communication).
#ifndef RL_ALLOW_CONSUMED_BUFFERS_NOTIFICATION
#define RL_ALLOW_CONSUMED_BUFFERS_NOTIFICATION (0)
#endif
//! @def RL_HANG
//!
//! Default implementation of hang assert function
static inline void RL_HANG(void)
{
for (;;)
{
}
}
//! @def RL_ASSERT
//!
//! Assert implementation.
#ifndef RL_ASSERT
#define RL_ASSERT_BOOL(b) \
do \
{ \
if (!(b)) \
{ \
RL_HANG(); \
} \
} while (0 == 1);
#define RL_ASSERT(x) RL_ASSERT_BOOL((int32_t)(x) != 0)
#endif
//@}
#endif /* RPMSG_DEFAULT_CONFIG_H_ */

View File

@ -0,0 +1,583 @@
/*
* Copyright (c) 2014, Mentor Graphics Corporation
* Copyright (c) 2015 Xilinx, Inc.
* Copyright (c) 2016 Freescale Semiconductor, Inc.
* Copyright 2016-2019 NXP
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/**************************************************************************
* FILE NAME
*
* rpmsg_env.h
*
* COMPONENT
*
* OpenAMP stack.
*
* DESCRIPTION
*
* This file defines abstraction layer for OpenAMP stack. The implementor
* must provide definition of all the functions.
*
* DATA STRUCTURES
*
* none
*
* FUNCTIONS
*
* env_allocate_memory
* env_free_memory
* env_memset
* env_memcpy
* env_strncpy
* env_print
* env_map_vatopa
* env_map_patova
* env_mb
* env_rmb
* env_wmb
* env_create_mutex
* env_delete_mutex
* env_lock_mutex
* env_unlock_mutex
* env_sleep_msec
* env_disable_interrupt
* env_enable_interrupt
* env_create_queue
* env_delete_queue
* env_put_queue
* env_get_queue
*
**************************************************************************/
#ifndef RPMSG_ENV_H_
#define RPMSG_ENV_H_
#include <stdint.h>
#include "rpmsg_default_config.h"
#include "rpmsg_platform.h"
/*!
* env_init
*
* Initializes OS/BM environment.
*
* @param env_context Pointer to preallocated environment context data
* @param env_init_data Initialization data for the environment layer
*
* @returns - execution status
*/
#if defined(RL_USE_ENVIRONMENT_CONTEXT) && (RL_USE_ENVIRONMENT_CONTEXT == 1)
int32_t env_init(void **env_context, void *env_init_data);
#else
int32_t env_init(void);
#endif
/*!
* env_deinit
*
* Uninitializes OS/BM environment.
*
* @param env_context Pointer to environment context data
*
* @returns - execution status
*/
#if defined(RL_USE_ENVIRONMENT_CONTEXT) && (RL_USE_ENVIRONMENT_CONTEXT == 1)
int32_t env_deinit(void *env_context);
#else
int32_t env_deinit(void);
#endif
/*!
* -------------------------------------------------------------------------
*
* Dynamic memory management functions. The parameters
* are similar to standard c functions.
*
*-------------------------------------------------------------------------
**/
/*!
* env_allocate_memory
*
* Allocates memory with the given size.
*
* @param size - size of memory to allocate
*
* @return - pointer to allocated memory
*/
void *env_allocate_memory(uint32_t size);
/*!
* env_free_memory
*
* Frees memory pointed by the given parameter.
*
* @param ptr - pointer to memory to free
*/
void env_free_memory(void *ptr);
/*!
* -------------------------------------------------------------------------
*
* RTL Functions
*
*-------------------------------------------------------------------------
*/
void env_memset(void *ptr, int32_t value, uint32_t size);
void env_memcpy(void *dst, void const *src, uint32_t len);
int32_t env_strcmp(const char *dst, const char *src);
void env_strncpy(char *dest, const char *src, uint32_t len);
int32_t env_strncmp(char *dest, const char *src, uint32_t len);
#ifdef MCUXPRESSO_SDK
/* MCUXpresso_SDK's PRINTF used in SDK examples */
#include "fsl_debug_console.h"
#if defined SDK_DEBUGCONSOLE && (SDK_DEBUGCONSOLE != DEBUGCONSOLE_DISABLE)
#define env_print(...) (void)PRINTF(__VA_ARGS__)
#else
#define env_print(...)
#endif
#else
/* When RPMsg_Lite being used outside of MCUXpresso_SDK use your own env_print
implemenetation to avoid conflict with Misra 21.6 rule */
#include <stdio.h>
#define env_print(...) printf(__VA_ARGS__)
#endif /* MCUXPRESSO_SDK */
/*!
*-----------------------------------------------------------------------------
*
* Functions to convert physical address to virtual address and vice versa.
*
*-----------------------------------------------------------------------------
*/
/*!
* env_map_vatopa
*
* Converts logical address to physical address
*
* @param env Pointer to environment context data
* @param address Pointer to logical address
*
* @return - physical address
*/
#if defined(RL_USE_ENVIRONMENT_CONTEXT) && (RL_USE_ENVIRONMENT_CONTEXT == 1)
uint32_t env_map_vatopa(void *env, void *address);
#else
uint32_t env_map_vatopa(void *address);
#endif
/*!
* env_map_patova
*
* Converts physical address to logical address
*
* @param env_context Pointer to environment context data
* @param address Pointer to physical address
*
* @return - logical address
*
*/
#if defined(RL_USE_ENVIRONMENT_CONTEXT) && (RL_USE_ENVIRONMENT_CONTEXT == 1)
void *env_map_patova(void *env, uint32_t address);
#else
void *env_map_patova(uint32_t address);
#endif
/*!
*-----------------------------------------------------------------------------
*
* Abstractions for memory barrier instructions.
*
*-----------------------------------------------------------------------------
*/
/*!
* env_mb
*
* Inserts memory barrier.
*/
void env_mb(void);
/*!
* env_rmb
*
* Inserts read memory barrier
*/
void env_rmb(void);
/*!
* env_wmb
*
* Inserts write memory barrier
*/
void env_wmb(void);
/*!
*-----------------------------------------------------------------------------
*
* Abstractions for OS lock primitives.
*
*-----------------------------------------------------------------------------
*/
/*!
* env_create_mutex
*
* Creates a mutex with given initial count.
*
* @param lock - pointer to created mutex
* @param count - initial count 0 or 1
*
* @return - status of function execution
*/
int32_t env_create_mutex(void **lock, int32_t count);
/*!
* env_delete_mutex
*
* Deletes the given lock.
*
* @param lock - mutex to delete
*/
void env_delete_mutex(void *lock);
/*!
* env_lock_mutex
*
* Tries to acquire the lock, if lock is not available then call to
* this function will suspend.
*
* @param lock - mutex to lock
*
*/
void env_lock_mutex(void *lock);
/*!
* env_unlock_mutex
*
* Releases the given lock.
*
* @param lock - mutex to unlock
*/
void env_unlock_mutex(void *lock);
/*!
* env_create_sync_lock
*
* Creates a synchronization lock primitive. It is used
* when signal has to be sent from the interrupt context to main
* thread context.
*
* @param lock - pointer to created sync lock object
* @param state - initial state , lock or unlocked
*
* @returns - status of function execution
*/
//#define LOCKED 0
//#define UNLOCKED 1
int32_t env_create_sync_lock(void **lock, int32_t state);
/*!
* env_create_sync_lock
*
* Deletes given sync lock object.
*
* @param lock - sync lock to delete.
*
*/
void env_delete_sync_lock(void *lock);
/*!
* env_acquire_sync_lock
*
* Tries to acquire the sync lock.
*
* @param lock - sync lock to acquire.
*/
void env_acquire_sync_lock(void *lock);
/*!
* env_release_sync_lock
*
* Releases synchronization lock.
*
* @param lock - sync lock to release.
*/
void env_release_sync_lock(void *lock);
/*!
* env_sleep_msec
*
* Suspends the calling thread for given time in msecs.
*
* @param num_msec - delay in msecs
*/
void env_sleep_msec(uint32_t num_msec);
/*!
* env_register_isr
*
* Registers interrupt handler data for the given interrupt vector.
*
* @param env Pointer to environment context data
* @param vector_id Virtual interrupt vector number
* @param data Interrupt handler data (virtqueue)
*/
#if defined(RL_USE_ENVIRONMENT_CONTEXT) && (RL_USE_ENVIRONMENT_CONTEXT == 1)
void env_register_isr(void *env, uint32_t vector_id, void *data);
#else
void env_register_isr(uint32_t vector_id, void *data);
#endif
/*!
* env_unregister_isr
*
* Unregisters interrupt handler data for the given interrupt vector.
*
* @param env Pointer to environment context data
* @param vector_id Virtual interrupt vector number
*/
#if defined(RL_USE_ENVIRONMENT_CONTEXT) && (RL_USE_ENVIRONMENT_CONTEXT == 1)
void env_unregister_isr(void *env, uint32_t vector_id);
#else
void env_unregister_isr(uint32_t vector_id);
#endif
/*!
* env_enable_interrupt
*
* Enables the given interrupt
*
* @param env Pointer to environment context data
* @param vector_id Virtual interrupt vector number
*/
#if defined(RL_USE_ENVIRONMENT_CONTEXT) && (RL_USE_ENVIRONMENT_CONTEXT == 1)
void env_enable_interrupt(void *env, uint32_t vector_id);
#else
void env_enable_interrupt(uint32_t vector_id);
#endif
/*!
* env_disable_interrupt
*
* Disables the given interrupt.
*
* @param env Pointer to environment context data
* @param vector_id Virtual interrupt vector number
*/
#if defined(RL_USE_ENVIRONMENT_CONTEXT) && (RL_USE_ENVIRONMENT_CONTEXT == 1)
void env_disable_interrupt(void *env, uint32_t vector_id);
#else
void env_disable_interrupt(uint32_t vector_id);
#endif
/*!
* env_map_memory
*
* Enables memory mapping for given memory region.
*
* @param pa - physical address of memory
* @param va - logical address of memory
* @param size - memory size
* param flags - flags for cache/uncached and access type
*
* Currently only first byte of flag parameter is used and bits mapping is defined as follow;
*
* Cache bits
* 0x0000_0001 = No cache
* 0x0000_0010 = Write back
* 0x0000_0100 = Write through
* 0x0000_x000 = Not used
*
* Memory types
*
* 0x0001_xxxx = Memory Mapped
* 0x0010_xxxx = IO Mapped
* 0x0100_xxxx = Shared
* 0x1000_xxxx = TLB
*/
/* Macros for caching scheme used by the shared memory */
#define UNCACHED (1 << 0)
#define WB_CACHE (1 << 1)
#define WT_CACHE (1 << 2)
/* Memory Types */
#define MEM_MAPPED (1 << 4)
#define IO_MAPPED (1 << 5)
#define SHARED_MEM (1 << 6)
#define TLB_MEM (1 << 7)
void env_map_memory(uint32_t pa, uint32_t va, uint32_t size, uint32_t flags);
/*!
* env_get_timestamp
*
* Returns a 64 bit time stamp.
*
*
*/
uint64_t env_get_timestamp(void);
/*!
* env_disable_cache
*
* Disables system caches.
*
*/
void env_disable_cache(void);
typedef void LOCK;
/*!
* env_create_queue
*
* Creates a message queue.
*
* @param queue Pointer to created queue
* @param length Maximum number of elements in the queue
* @param item_size Queue element size in bytes
*
* @return - status of function execution
*/
int32_t env_create_queue(void **queue, int32_t length, int32_t element_size);
/*!
* env_delete_queue
*
* Deletes the message queue.
*
* @param queue Queue to delete
*/
void env_delete_queue(void *queue);
/*!
* env_put_queue
*
* Put an element in a queue.
*
* @param queue Queue to put element in
* @param msg Pointer to the message to be put into the queue
* @param timeout_ms Timeout in ms
*
* @return - status of function execution
*/
int32_t env_put_queue(void *queue, void *msg, uint32_t timeout_ms);
/*!
* env_get_queue
*
* Get an element out of a queue.
*
* @param queue Queue to get element from
* @param msg Pointer to a memory to save the message
* @param timeout_ms Timeout in ms
*
* @return - status of function execution
*/
int32_t env_get_queue(void *queue, void *msg, uint32_t timeout_ms);
/*!
* env_get_current_queue_size
*
* Get current queue size.
*
* @param queue Queue pointer
*
* @return - Number of queued items in the queue
*/
int32_t env_get_current_queue_size(void *queue);
/*!
* env_isr
*
* Invoke RPMSG/IRQ callback
*
* @param env Pointer to environment context data
* @param vector RPMSG IRQ vector ID.
*/
#if defined(RL_USE_ENVIRONMENT_CONTEXT) && (RL_USE_ENVIRONMENT_CONTEXT == 1)
void env_isr(void *env, uint32_t vector);
#else
void env_isr(uint32_t vector);
#endif
#if defined(RL_USE_ENVIRONMENT_CONTEXT) && (RL_USE_ENVIRONMENT_CONTEXT == 1)
/*!
* env_get_platform_context
*
* Get the platform layer context from the environment platform context
*
* @param env Pointer to environment context data
*
* @return Pointer to platform context data
*/
void *env_get_platform_context(void *env_context);
/*!
* env_init_interrupt
*
* Initialize the ISR data for given virtqueue interrupt
*
* @param env Pointer to environment context data
* @param vq_id Virtqueue ID
* @param isr_data Pointer to initial ISR data
*
* @return Execution status, 0 on success
*/
int32_t env_init_interrupt(void *env, int32_t vq_id, void *isr_data);
/*!
* env_deinit_interrupt
*
* Deinitialize the ISR data for given virtqueue interrupt
*
* @param env Pointer to environment context data
* @param vq_id Virtqueue ID
*
* @return Execution status, 0 on success
*/
int32_t env_deinit_interrupt(void *env, int32_t vq_id);
#endif
#endif /* RPMSG_ENV_H_ */

View File

@ -0,0 +1,372 @@
/*
* Copyright (c) 2014, Mentor Graphics Corporation
* Copyright (c) 2015 Xilinx, Inc.
* Copyright (c) 2016 Freescale Semiconductor, Inc.
* Copyright 2016-2020 NXP
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef RPMSG_LITE_H_
#define RPMSG_LITE_H_
#if defined(__cplusplus)
extern "C" {
#endif
#include <stddef.h>
#include "virtqueue.h"
#include "rpmsg_env.h"
#include "llist.h"
#include "rpmsg_compiler.h"
#include "rpmsg_default_config.h"
//! @addtogroup rpmsg_lite
//! @{
/*******************************************************************************
* Definitions
******************************************************************************/
#define RL_VERSION "3.1.2" /*!< Current RPMsg Lite version */
/* Shared memory "allocator" parameters */
#define RL_WORD_SIZE (sizeof(uint32_t))
#define RL_WORD_ALIGN_UP(a) \
(((((uint32_t)a) & (RL_WORD_SIZE - 1U)) != 0U) ? ((((uint32_t)a) & (~(RL_WORD_SIZE - 1U))) + 4U) : ((uint32_t)a))
#define RL_WORD_ALIGN_DOWN(a) \
(((((uint32_t)a) & (RL_WORD_SIZE - 1U)) != 0U) ? (((uint32_t)a) & (~(RL_WORD_SIZE - 1U))) : ((uint32_t)a))
/* Definitions for device types , null pointer, etc.*/
#define RL_SUCCESS (0)
#define RL_NULL ((void *)0)
#define RL_REMOTE (0)
#define RL_MASTER (1)
#define RL_TRUE (1U)
#define RL_FALSE (0U)
#define RL_ADDR_ANY (0xFFFFFFFFU)
#define RL_RELEASE (0)
#define RL_HOLD (1)
#define RL_DONT_BLOCK (0)
#define RL_BLOCK (0xFFFFFFFFU)
/* Error macros. */
#define RL_ERRORS_BASE (-5000)
#define RL_ERR_NO_MEM (RL_ERRORS_BASE - 1)
#define RL_ERR_BUFF_SIZE (RL_ERRORS_BASE - 2)
#define RL_ERR_PARAM (RL_ERRORS_BASE - 3)
#define RL_ERR_DEV_ID (RL_ERRORS_BASE - 4)
#define RL_ERR_MAX_VQ (RL_ERRORS_BASE - 5)
#define RL_ERR_NO_BUFF (RL_ERRORS_BASE - 6)
#define RL_NOT_READY (RL_ERRORS_BASE - 7)
#define RL_ALREADY_DONE (RL_ERRORS_BASE - 8)
/* Init flags */
#define RL_NO_FLAGS (0)
/*! \typedef rl_ept_rx_cb_t
\brief Receive callback function type.
*/
typedef int32_t (*rl_ept_rx_cb_t)(void *payload, uint32_t payload_len, uint32_t src, void *priv);
/*!
* RPMsg Lite Endpoint structure
*/
struct rpmsg_lite_endpoint
{
uint32_t addr; /*!< endpoint address */
rl_ept_rx_cb_t rx_cb; /*!< ISR callback function */
void *rx_cb_data; /*!< ISR callback data */
void *rfu; /*!< reserved for future usage */
/* 16 bytes aligned on 32bit architecture */
};
/*!
* RPMsg Lite Endpoint static context
*/
struct rpmsg_lite_ept_static_context
{
struct rpmsg_lite_endpoint ept; /*!< memory for endpoint structure */
struct llist node; /*!< memory for linked list node structure */
};
/*!
* Structure describing the local instance
* of RPMSG lite communication stack and
* holds all runtime variables needed internally
* by the stack.
*/
struct rpmsg_lite_instance
{
struct virtqueue *rvq; /*!< receive virtqueue */
struct virtqueue *tvq; /*!< transmit virtqueue */
struct llist *rl_endpoints; /*!< linked list of endpoints */
LOCK *lock; /*!< local RPMsg Lite mutex lock */
uint32_t link_state; /*!< state of the link, up/down*/
char *sh_mem_base; /*!< base address of the shared memory */
uint32_t sh_mem_remaining; /*!< amount of remaining unused buffers in shared memory */
uint32_t sh_mem_total; /*!< total amount of buffers in shared memory */
struct virtqueue_ops const *vq_ops; /*!< ops functions table pointer */
#if defined(RL_USE_ENVIRONMENT_CONTEXT) && (RL_USE_ENVIRONMENT_CONTEXT == 1)
void *env; /*!< pointer to the environment layer context */
#endif
#if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1)
struct vq_static_context vq_ctxt[2];
#endif
};
/*******************************************************************************
* API
******************************************************************************/
/* Exported API functions */
/*!
* @brief Initializes the RPMsg-Lite communication stack.
* Must be called prior to any other RPMSG lite API.
* To be called by the master side.
*
* @param shmem_addr Shared memory base used for this instance of RPMsg-Lite
* @param shmem_length Length of memory area given by previous parameter
* @param link_id Link ID used to define the rpmsg-lite instance, see rpmsg_platform.h
* @param init_flags Initialization flags
* @param env_cfg Initialization data for the environement RPMsg-Lite layer, used when
* the environment layer uses its own context (RL_USE_ENVIRONMENT_CONTEXT)
* @param static_context RPMsg-Lite preallocated context pointer, used in case of static api (RL_USE_STATIC_API)
*
* @return New RPMsg-Lite instance pointer or RL_NULL.
*
*/
#if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1)
struct rpmsg_lite_instance *rpmsg_lite_master_init(void *shmem_addr,
size_t shmem_length,
uint32_t link_id,
uint32_t init_flags,
struct rpmsg_lite_instance *static_context);
#elif defined(RL_USE_ENVIRONMENT_CONTEXT) && (RL_USE_ENVIRONMENT_CONTEXT == 1)
struct rpmsg_lite_instance *rpmsg_lite_master_init(
void *shmem_addr, size_t shmem_length, uint32_t link_id, uint32_t init_flags, void *env_cfg);
#else
struct rpmsg_lite_instance *rpmsg_lite_master_init(void *shmem_addr,
size_t shmem_length,
uint32_t link_id,
uint32_t init_flags);
#endif
/**
* @brief Initializes the RPMsg-Lite communication stack.
* Must be called prior to any other RPMsg-Lite API.
* To be called by the remote side.
*
* @param shmem_addr Shared memory base used for this instance of RPMsg-Lite
* @param link_id Link ID used to define the rpmsg-lite instance, see rpmsg_platform.h
* @param init_flags Initialization flags
* @param env_cfg Initialization data for the environement RPMsg-Lite layer, used when
* the environment layer uses its own context (RL_USE_ENVIRONMENT_CONTEXT)
* @param static_context RPMsg-Lite preallocated context pointer, used in case of static api (RL_USE_STATIC_API)
*
* @return New RPMsg-Lite instance pointer or RL_NULL.
*
*/
#if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1)
struct rpmsg_lite_instance *rpmsg_lite_remote_init(void *shmem_addr,
uint32_t link_id,
uint32_t init_flags,
struct rpmsg_lite_instance *static_context);
#elif defined(RL_USE_ENVIRONMENT_CONTEXT) && (RL_USE_ENVIRONMENT_CONTEXT == 1)
struct rpmsg_lite_instance *rpmsg_lite_remote_init(void *shmem_addr,
uint32_t link_id,
uint32_t init_flags,
void *env_cfg);
#else
struct rpmsg_lite_instance *rpmsg_lite_remote_init(void *shmem_addr, uint32_t link_id, uint32_t init_flags);
#endif
/*!
*
* @brief Reset RPMsg-Lite communication stack. To be called by the remote side.
* When system wake up from sleep state, master side(DSP) will be reinitialized. Remote side
* should call this function to recover enviroment to reset state.
*
* @param rpmsg_lite_dev RPMsg-Lite instance
*/
void rpmsg_lite_remote_env_reset(struct rpmsg_lite_instance *rpmsg_lite_dev);
/*!
*
* @brief Deinitialized the RPMsg-Lite communication stack
* This function always succeeds.
* rpmsg_lite_init() can be called again after this
* function has been called.
*
* @param rpmsg_lite_dev RPMsg-Lite instance
*
* @return Status of function execution, RL_SUCCESS on success.
*/
int32_t rpmsg_lite_deinit(struct rpmsg_lite_instance *rpmsg_lite_dev);
/*!
* @brief Create a new rpmsg endpoint, which can be used
* for communication.
*
* @param rpmsg_lite_dev RPMsg-Lite instance
* @param addr Desired address, RL_ADDR_ANY for automatic selection
* @param rx_cb Callback function called on receive
* @param rx_cb_data Callback data pointer, passed to rx_cb
* @param ept_context Endpoint preallocated context pointer, used in case of static api (RL_USE_STATIC_API)
*
* @return RL_NULL on error, new endpoint pointer on success.
*
*/
#if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1)
struct rpmsg_lite_endpoint *rpmsg_lite_create_ept(struct rpmsg_lite_instance *rpmsg_lite_dev,
uint32_t addr,
rl_ept_rx_cb_t rx_cb,
void *rx_cb_data,
struct rpmsg_lite_ept_static_context *ept_context);
#else
struct rpmsg_lite_endpoint *rpmsg_lite_create_ept(struct rpmsg_lite_instance *rpmsg_lite_dev,
uint32_t addr,
rl_ept_rx_cb_t rx_cb,
void *rx_cb_data);
#endif
/*!
* @brief This function deletes rpmsg endpoint and performs cleanup.
*
* @param rpmsg_lite_dev RPMsg-Lite instance
* @param rl_ept Pointer to endpoint to destroy
*
*/
int32_t rpmsg_lite_destroy_ept(struct rpmsg_lite_instance *rpmsg_lite_dev, struct rpmsg_lite_endpoint *rl_ept);
/*!
*
* @brief Sends a message contained in data field of length size
* to the remote endpoint with address dst.
* ept->addr is used as source address in the rpmsg header
* of the message being sent.
*
* @param rpmsg_lite_dev RPMsg-Lite instance
* @param ept Sender endpoint
* @param dst Remote endpoint address
* @param data Payload buffer
* @param size Size of payload, in bytes
* @param timeout Timeout in ms, 0 if nonblocking
*
* @return Status of function execution, RL_SUCCESS on success.
*
*/
int32_t rpmsg_lite_send(struct rpmsg_lite_instance *rpmsg_lite_dev,
struct rpmsg_lite_endpoint *ept,
uint32_t dst,
char *data,
uint32_t size,
uint32_t timeout);
/*!
* @brief Function to get the link state
*
* @param rpmsg_lite_dev RPMsg-Lite instance
*
* @return True when link up, false when down.
*
*/
int32_t rpmsg_lite_is_link_up(struct rpmsg_lite_instance *rpmsg_lite_dev);
#if defined(RL_API_HAS_ZEROCOPY) && (RL_API_HAS_ZEROCOPY == 1)
/*!
* @brief Releases the rx buffer for future reuse in vring.
* This API can be called at process context when the
* message in rx buffer is processed.
*
* @param rpmsg_lite_dev RPMsg-Lite instance
* @param rxbuf Rx buffer with message payload
*
* @return Status of function execution, RL_SUCCESS on success.
*/
int32_t rpmsg_lite_release_rx_buffer(struct rpmsg_lite_instance *rpmsg_lite_dev, void *rxbuf);
int32_t rpmsg_lite_release_rx_buffer_dur_recover(struct rpmsg_lite_instance *rpmsg_lite_dev, void *rxbuf);
/*!
* @brief Allocates the tx buffer for message payload.
*
* This API can only be called at process context to get the tx buffer in vring. By this way, the
* application can directly put its message into the vring tx buffer without copy from an application buffer.
* It is the application responsibility to correctly fill the allocated tx buffer by data and passing correct
* parameters to the rpmsg_lite_send_nocopy() function to perform data no-copy-send mechanism.
*
* @param rpmsg_lite_dev RPMsg-Lite instance
* @param[in] size Pointer to store maximum payload size available
* @param[in] timeout Integer, wait upto timeout ms or not for buffer to become available
*
* @return The tx buffer address on success and RL_NULL on failure.
*
* @see rpmsg_lite_send_nocopy
*/
void *rpmsg_lite_alloc_tx_buffer(struct rpmsg_lite_instance *rpmsg_lite_dev, uint32_t *size, uint32_t timeout);
/*!
* @brief Sends a message in tx buffer allocated by rpmsg_lite_alloc_tx_buffer()
*
* This function sends txbuf of length len to the remote dst address,
* and uses ept->addr as the source address.
* The application has to take the responsibility for:
* 1. tx buffer allocation (rpmsg_lite_alloc_tx_buffer())
* 2. filling the data to be sent into the pre-allocated tx buffer
* 3. not exceeding the buffer size when filling the data
* 4. data cache coherency
*
* After the rpmsg_lite_send_nocopy() function is issued the tx buffer is no more owned
* by the sending task and must not be touched anymore unless the rpmsg_lite_send_nocopy()
* function fails and returns an error.
*
* @param rpmsg_lite_dev RPMsg-Lite instance
* @param[in] ept Sender endpoint pointer
* @param[in] dst Destination address
* @param[in] data TX buffer with message filled
* @param[in] size Length of payload
*
* @return 0 on success and an appropriate error value on failure.
*
* @see rpmsg_lite_alloc_tx_buffer
*/
int32_t rpmsg_lite_send_nocopy(struct rpmsg_lite_instance *rpmsg_lite_dev,
struct rpmsg_lite_endpoint *ept,
uint32_t dst,
void *data,
uint32_t size);
#endif /* RL_API_HAS_ZEROCOPY */
//! @}
#if defined(__cplusplus)
}
#endif
#endif /* RPMSG_LITE_H_ */

View File

@ -0,0 +1,140 @@
/*
* Copyright (c) 2014, Mentor Graphics Corporation
* Copyright (c) 2015 Xilinx, Inc.
* Copyright (c) 2016 Freescale Semiconductor, Inc.
* Copyright 2016 NXP
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef RPMSG_NS_H_
#define RPMSG_NS_H_
#include "rpmsg_lite.h"
//! @addtogroup rpmsg_ns
//! @{
#define RL_NS_EPT_ADDR (0x35u)
/* Up to 32 flags available */
enum rpmsg_ns_flags
{
RL_NS_CREATE = 0,
RL_NS_DESTROY = 1,
};
/*! \typedef rpmsg_ns_new_ept_cb
\brief New endpoint NS callback function type.
*/
typedef void (*rpmsg_ns_new_ept_cb)(uint32_t new_ept, const char *new_ept_name, uint32_t flags, void *user_data);
struct rpmsg_ns_callback_data
{
rpmsg_ns_new_ept_cb cb;
void *user_data;
};
struct rpmsg_ns_context
{
struct rpmsg_lite_endpoint *ept;
struct rpmsg_ns_callback_data *cb_ctxt;
};
typedef struct rpmsg_ns_context *rpmsg_ns_handle;
struct rpmsg_ns_static_context_container
{
struct rpmsg_lite_ept_static_context ept_ctxt;
struct rpmsg_ns_callback_data cb_ctxt;
struct rpmsg_ns_context ns_ctxt;
};
typedef struct rpmsg_ns_static_context_container rpmsg_ns_static_context;
#if defined(__cplusplus)
extern "C" {
#endif
/*******************************************************************************
* API
******************************************************************************/
/* Exported API functions */
/*!
* @brief Registers application nameservice callback
*
* @param rpmsg_lite_dev RPMsg-Lite instance
* @param app_cb Application nameservice callback
* @param user_data Application nameservice callback data
*
* @return RL_NULL on error, NameService handle on success.
*
*/
#if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1)
rpmsg_ns_handle rpmsg_ns_bind(struct rpmsg_lite_instance *rpmsg_lite_dev,
rpmsg_ns_new_ept_cb app_cb,
void *user_data,
rpmsg_ns_static_context *ns_ept_ctxt);
#else
rpmsg_ns_handle rpmsg_ns_bind(struct rpmsg_lite_instance *rpmsg_lite_dev, rpmsg_ns_new_ept_cb app_cb, void *user_data);
#endif /* RL_USE_STATIC_API */
/*!
* @brief Unregisters application nameservice callback and cleans up
*
* @param rpmsg_lite_dev RPMsg-Lite instance
* @param handle NameService handle
*
* @return Status of function execution, RL_SUCCESS on success.
*
*/
int32_t rpmsg_ns_unbind(struct rpmsg_lite_instance *rpmsg_lite_dev, rpmsg_ns_handle handle);
/*!
* @brief Sends name service announcement to remote device
*
* @param rpmsg_lite_dev RPMsg-Lite instance
* @param new_ept New endpoint to announce
* @param ept_name Name for the announced endpoint
* @param flags Channel creation/deletion flags
*
* @return Status of function execution, RL_SUCCESS on success
*
*/
int32_t rpmsg_ns_announce(struct rpmsg_lite_instance *rpmsg_lite_dev,
struct rpmsg_lite_endpoint *new_ept,
const char *ept_name,
uint32_t flags);
//! @}
#if defined(__cplusplus)
}
#endif
#endif /* RPMSG_NS_H_ */

View File

@ -0,0 +1,194 @@
/*
* Copyright (c) 2014, Mentor Graphics Corporation
* Copyright (c) 2015 Xilinx, Inc.
* Copyright (c) 2016 Freescale Semiconductor, Inc.
* Copyright 2016 NXP
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef RPMSG_QUEUE_H_
#define RPMSG_QUEUE_H_
#include "rpmsg_lite.h"
//! @addtogroup rpmsg_queue
//! @{
/*! \typedef rpmsg_queue_handle
\brief Rpmsg queue handle type.
*/
typedef void *rpmsg_queue_handle;
/* RL_API_HAS_ZEROCOPY has to be enabled for RPMsg Queue to work */
#if defined(RL_API_HAS_ZEROCOPY) && (RL_API_HAS_ZEROCOPY == 1)
/*******************************************************************************
* API
******************************************************************************/
/* Exported API functions */
#if defined(__cplusplus)
extern "C" {
#endif
/*!
* @brief
* This callback needs to be registered with an endpoint
*
* @param payload Pointer to the buffer containing received data
* @param payload_len Size of data received, in bytes
* @param src Pointer to address of the endpoint from which data is received
* @param priv Private data provided during endpoint creation
*
* @return RL_HOLD or RL_RELEASE to release or hold the buffer in payload
*/
int32_t rpmsg_queue_rx_cb(void *payload, uint32_t payload_len, uint32_t src, void *priv);
/*!
* @brief
* Create a RPMsg queue which can be used
* for blocking reception.
*
* @param rpmsg_lite_dev RPMsg Lite instance
*
* @return RPMsg queue handle or RL_NULL
*
*/
rpmsg_queue_handle rpmsg_queue_create(struct rpmsg_lite_instance *rpmsg_lite_dev);
/*!
* @brief
* Destroy a queue and clean up.
* Do not destroy a queue which is registered with an active endpoint!
*
* @param rpmsg_lite_dev RPMsg-Lite instance
* @param[in] q RPMsg queue handle to destroy
*
* @return Status of function execution
*
*/
int32_t rpmsg_queue_destroy(struct rpmsg_lite_instance *rpmsg_lite_dev, rpmsg_queue_handle q);
/*!
* @brief
* blocking receive function - blocking version of the received function that can be called from an RTOS task.
* The data is copied from the receive buffer into the user supplied buffer.
*
* This is the "receive with copy" version of the RPMsg receive function. This version is simple
* to use but it requires copying data from shared memory into the user space buffer.
* The user has no obligation or burden to manage the shared memory buffers.
*
* @param rpmsg_lite_dev RPMsg-Lite instance
* @param[in] q RPMsg queue handle to listen on
* @param[in] data Pointer to the user buffer the received data are copied to
* @param[out] len Pointer to an int variable that will contain the number of bytes actually copied into the
* buffer
* @param[in] maxlen Maximum number of bytes to copy (received buffer size)
* @param[out] src Pointer to address of the endpoint from which data is received
* @param[in] timeout Timeout, in milliseconds, to wait for a message. A value of 0 means don't wait (non-blocking
* call).
* A value of 0xffffffff means wait forever (blocking call).
*
* @return Status of function execution
*
* @see rpmsg_queue_recv_nocopy
*/
int32_t rpmsg_queue_recv(struct rpmsg_lite_instance *rpmsg_lite_dev,
rpmsg_queue_handle q,
uint32_t *src,
char *data,
uint32_t maxlen,
uint32_t *len,
uint32_t timeout);
/*!
* @brief
* blocking receive function - blocking version of the received function that can be called from an RTOS task.
* The data is NOT copied into the user-app. buffer.
*
* This is the "zero-copy receive" version of the RPMsg receive function. No data is copied.
* Only the pointer to the data is returned. This version is fast, but it requires the user to manage
* buffer allocation. Specifically, the user must decide when a buffer is no longer in use and
* make the appropriate API call to free it, see rpmsg_queue_nocopy_free().
*
* @param rpmsg_lite_dev RPMsg Lite instance
* @param[in] q RPMsg queue handle to listen on
* @param[out] data Pointer to the RPMsg buffer of the shared memory where the received data is stored
* @param[out] len Pointer to an int variable that that will contain the number of valid bytes in the RPMsg
* buffer
* @param[out] src Pointer to address of the endpoint from which data is received
* @param[in] timeout Timeout, in milliseconds, to wait for a message. A value of 0 means don't wait (non-blocking
* call).
* A value of 0xffffffff means wait forever (blocking call).
*
* @return Status of function execution.
*
* @see rpmsg_queue_nocopy_free
* @see rpmsg_queue_recv
*/
int32_t rpmsg_queue_recv_nocopy(struct rpmsg_lite_instance *rpmsg_lite_dev,
rpmsg_queue_handle q,
uint32_t *src,
char **data,
uint32_t *len,
uint32_t timeout);
/*!
* @brief This function frees a buffer previously returned by rpmsg_queue_recv_nocopy().
*
* Once the zero-copy mechanism of receiving data is used, this function
* has to be called to free a buffer and to make it available for the next data
* transfer.
*
* @param rpmsg_lite_dev RPMsg-Lite instance
* @param[in] data Pointer to the RPMsg buffer of the shared memory that has to be freed
*
* @return Status of function execution.
*
* @see rpmsg_queue_recv_nocopy
*/
int32_t rpmsg_queue_nocopy_free(struct rpmsg_lite_instance *rpmsg_lite_dev, void *data);
/*!
* @brief This function returns the number of pending messages in the queue.
*
* @param[in] q RPMsg queue handle
*
* @return Number of pending messages in the queue.
*/
int32_t rpmsg_queue_get_current_size(rpmsg_queue_handle q);
//! @}
#if defined(__cplusplus)
}
#endif
#endif /* RL_API_HAS_ZEROCOPY */
#endif /* RPMSG_QUEUE_H_ */

View File

@ -0,0 +1,168 @@
/*-
* Copyright Rusty Russell IBM Corporation 2007.
* Copyright 2019 NXP
* This header is BSD licensed so anyone can use the definitions to implement
* compatible drivers/servers.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of IBM nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL IBM OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef VIRTIO_RING_H
#define VIRTIO_RING_H
/* This marks a buffer as continuing via the next field. */
#define VRING_DESC_F_NEXT 1U
/* This marks a buffer as write-only (otherwise read-only). */
#define VRING_DESC_F_WRITE 2U
/* This means the buffer contains a list of buffer descriptors. */
#define VRING_DESC_F_INDIRECT 4U
/* The Host uses this in used->flags to advise the Guest: don't kick me
* when you add a buffer. It's unreliable, so it's simply an
* optimization. Guest will still kick if it's out of buffers. */
#define VRING_USED_F_NO_NOTIFY 1U
/* The Guest uses this in avail->flags to advise the Host: don't
* interrupt me when you consume a buffer. It's unreliable, so it's
* simply an optimization. */
#define VRING_AVAIL_F_NO_INTERRUPT 1U
/* VirtIO ring descriptors: 16 bytes.
* These can chain together via "next". */
struct vring_desc
{
/* Address (guest-physical). */
uint64_t addr;
/* Length. */
uint32_t len;
/* The flags as indicated above. */
uint16_t flags;
/* We chain unused descriptors via this, too. */
uint16_t next;
};
struct vring_avail
{
uint16_t flags;
uint16_t idx;
uint16_t ring[1];
};
/* uint32_t is used here for ids for padding reasons. */
struct vring_used_elem
{
/* Index of start of used descriptor chain. */
uint32_t id;
/* Total length of the descriptor chain which was written to. */
uint32_t len;
};
struct vring_used
{
uint16_t flags;
uint16_t idx;
struct vring_used_elem ring[1];
};
struct vring
{
uint32_t num;
struct vring_desc *desc;
struct vring_avail *avail;
struct vring_used *used;
};
/* The standard layout for the ring is a continuous chunk of memory which
* looks like this. We assume num is a power of 2.
*
* struct vring {
* # The actual descriptors (16 bytes each)
* struct vring_desc desc[num];
*
* # A ring of available descriptor heads with free-running index.
* __u16 avail_flags;
* __u16 avail_idx;
* __u16 available[num];
* __u16 used_event_idx;
*
* # Padding to the next align boundary.
* char pad[];
*
* # A ring of used descriptor heads with free-running index.
* __u16 used_flags;
* __u16 used_idx;
* struct vring_used_elem used[num];
* __u16 avail_event_idx;
* };
*
* NOTE: for VirtIO PCI, align is 4096.
*/
/*
* We publish the used event index at the end of the available ring, and vice
* versa. They are at the end for backwards compatibility.
*/
#define vring_used_event(vr) ((vr)->avail->ring[(vr)->num])
#define vring_avail_event(vr) ((vr)->used->ring[(vr)->num].id)
static inline int32_t vring_size(uint32_t num, uint32_t align)
{
uint32_t size;
size = num * sizeof(struct vring_desc);
size += sizeof(struct vring_avail) + (num * sizeof(uint16_t)) + sizeof(uint16_t);
size = (size + align - 1UL) & ~(align - 1UL);
size += sizeof(struct vring_used) + (num * sizeof(struct vring_used_elem)) + sizeof(uint16_t);
return ((int32_t)size);
}
static inline void vring_init(struct vring *vr, uint32_t num, uint8_t *p, uint32_t align)
{
vr->num = num;
vr->desc = (struct vring_desc *)(void *)p;
vr->avail = (struct vring_avail *)(void *)(p + num * sizeof(struct vring_desc));
vr->used = (struct vring_used *)(((uint32_t)&vr->avail->ring[num] + align - 1UL) & ~(align - 1UL));
}
/*
* The following is used with VIRTIO_RING_F_EVENT_IDX.
*
* Assuming a given event_idx value from the other size, if we have
* just incremented index from old to new_idx, should we trigger an
* event?
*/
static inline int32_t vring_need_event(uint16_t event_idx, uint16_t new_idx, uint16_t old)
{
if ((uint16_t)(new_idx - event_idx - 1U) < (uint16_t)(new_idx - old))
{
return 1;
}
else
{
return 0;
}
}
#endif /* VIRTIO_RING_H */

View File

@ -0,0 +1,249 @@
#ifndef VIRTQUEUE_H_
#define VIRTQUEUE_H_
/*-
* Copyright (c) 2011, Bryan Venteicher <bryanv@FreeBSD.org>
* Copyright (c) 2016 Freescale Semiconductor, Inc.
* Copyright 2016-2019 NXP
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice unmodified, this list of conditions, and the following
* disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*/
#include <stdbool.h>
#include <stdint.h>
#include "rpmsg_default_config.h"
typedef uint8_t boolean;
#include "virtio_ring.h"
#include "llist.h"
/*Error Codes*/
#define VQ_ERROR_BASE (-3000)
#define ERROR_VRING_FULL (VQ_ERROR_BASE - 1)
#define ERROR_INVLD_DESC_IDX (VQ_ERROR_BASE - 2)
#define ERROR_EMPTY_RING (VQ_ERROR_BASE - 3)
#define ERROR_NO_MEM (VQ_ERROR_BASE - 4)
#define ERROR_VRING_MAX_DESC (VQ_ERROR_BASE - 5)
#define ERROR_VRING_ALIGN (VQ_ERROR_BASE - 6)
#define ERROR_VRING_NO_BUFF (VQ_ERROR_BASE - 7)
#define ERROR_VQUEUE_INVLD_PARAM (VQ_ERROR_BASE - 8)
#define VQUEUE_SUCCESS (0)
#define VQUEUE_DEBUG (false)
/* This is temporary macro to replace C NULL support.
* At the moment all the RTL specific functions are present in env.
* */
#define VQ_NULL ((void *)0)
/* The maximum virtqueue size is 2^15. Use that value as the end of
* descriptor chain terminator since it will never be a valid index
* in the descriptor table. This is used to verify we are correctly
* handling vq_free_cnt.
*/
#define VQ_RING_DESC_CHAIN_END (32768)
#define VIRTQUEUE_FLAG_INDIRECT (0x0001U)
#define VIRTQUEUE_FLAG_EVENT_IDX (0x0002U)
#define VIRTQUEUE_MAX_NAME_SZ (32) /* mind the alignment */
/* Support for indirect buffer descriptors. */
#define VIRTIO_RING_F_INDIRECT_DESC (1 << 28)
/* Support to suppress interrupt until specific index is reached. */
#define VIRTIO_RING_F_EVENT_IDX (1 << 29)
/*
* Hint on how long the next interrupt should be postponed. This is
* only used when the EVENT_IDX feature is negotiated.
*/
typedef enum
{
VQ_POSTPONE_SHORT,
VQ_POSTPONE_LONG,
VQ_POSTPONE_EMPTIED /* Until all available desc are used. */
} vq_postpone_t;
/* local virtqueue representation, not in shared memory */
struct virtqueue
{
/* 32bit aligned { */
char vq_name[VIRTQUEUE_MAX_NAME_SZ];
uint32_t vq_flags;
int32_t vq_alignment;
int32_t vq_ring_size;
void *vq_ring_mem;
void (*callback_fc)(struct virtqueue *vq);
void (*notify_fc)(struct virtqueue *vq);
int32_t vq_max_indirect_size;
int32_t vq_indirect_mem_size;
struct vring vq_ring;
/* } 32bit aligned */
/* 16bit aligned { */
uint16_t vq_queue_index;
uint16_t vq_nentries;
uint16_t vq_free_cnt;
uint16_t vq_queued_cnt;
/*
* Head of the free chain in the descriptor table. If
* there are no free descriptors, this will be set to
* VQ_RING_DESC_CHAIN_END.
*/
uint16_t vq_desc_head_idx;
/*
* Last consumed descriptor in the used table,
* trails vq_ring.used->idx.
*/
uint16_t vq_used_cons_idx;
/*
* Last consumed descriptor in the available table -
* used by the consumer side.
*/
uint16_t vq_available_idx;
/* } 16bit aligned */
boolean avail_read; /* 8bit wide */
boolean avail_write; /* 8bit wide */
boolean used_read; /* 8bit wide */
boolean used_write; /* 8bit wide */
uint16_t padd; /* aligned to 32bits after this: */
void *priv; /* private pointer, upper layer instance pointer */
#if defined(RL_USE_ENVIRONMENT_CONTEXT) && (RL_USE_ENVIRONMENT_CONTEXT == 1)
void *env; /* private pointer to environment layer internal context */
#endif
};
/* struct to hold vring specific information */
struct vring_alloc_info
{
void *phy_addr;
uint32_t align;
uint16_t num_descs;
uint16_t pad;
};
struct vq_static_context
{
struct virtqueue vq;
};
typedef void vq_callback(struct virtqueue *vq);
typedef void vq_notify(struct virtqueue *vq);
#if (VQUEUE_DEBUG == true)
#define VQASSERT_BOOL(_vq, _exp, _msg) \
do \
{ \
if (!(_exp)) \
{ \
env_print("%s: %s - " _msg, __func__, (_vq)->vq_name); \
while (1) \
; \
} \
} while (0)
#define VQASSERT(_vq, _exp, _msg) VQASSERT_BOOL(_vq, (_exp) != 0, _msg)
#define VQ_RING_ASSERT_VALID_IDX(_vq, _idx) VQASSERT((_vq), (_idx) < (_vq)->vq_nentries, "invalid ring index")
#define VQ_PARAM_CHK(condition, status_var, status_err) \
if ((status_var == 0) && (condition)) \
{ \
status_var = status_err; \
}
#define VQUEUE_BUSY(vq, dir) \
if ((vq)->dir == false) \
(vq)->dir = true; \
else \
VQASSERT(vq, (vq)->dir == false, "VirtQueue already in use")
#define VQUEUE_IDLE(vq, dir) ((vq)->dir = false)
#else
#define KASSERT(cond, str)
#define VQASSERT(_vq, _exp, _msg)
#define VQ_RING_ASSERT_VALID_IDX(_vq, _idx)
#define VQ_PARAM_CHK(condition, status_var, status_err)
#define VQUEUE_BUSY(vq, dir)
#define VQUEUE_IDLE(vq, dir)
#endif
int32_t virtqueue_create(uint16_t id,
const char *name,
struct vring_alloc_info *ring,
void (*callback_fc)(struct virtqueue *vq),
void (*notify_fc)(struct virtqueue *vq),
struct virtqueue **v_queue);
int32_t virtqueue_create_static(uint16_t id,
const char *name,
struct vring_alloc_info *ring,
void (*callback_fc)(struct virtqueue *vq),
void (*notify_fc)(struct virtqueue *vq),
struct virtqueue **v_queue,
struct vq_static_context *vq_ctxt);
void virtqueue_reinit(struct virtqueue *vq);
int32_t virtqueue_add_buffer(struct virtqueue *vq, uint16_t head_idx);
int32_t virtqueue_fill_used_buffers(struct virtqueue *vq, void *buffer, uint32_t len);
int32_t virtqueue_fill_avail_buffers(struct virtqueue *vq, void *buffer, uint32_t len);
void *virtqueue_get_buffer(struct virtqueue *vq, uint32_t *len, uint16_t *idx);
void *virtqueue_get_available_buffer(struct virtqueue *vq, uint16_t *avail_idx, uint32_t *len);
int32_t virtqueue_add_consumed_buffer(struct virtqueue *vq, uint16_t head_idx, uint32_t len);
void virtqueue_disable_cb(struct virtqueue *vq);
int32_t virtqueue_enable_cb(struct virtqueue *vq);
void virtqueue_kick(struct virtqueue *vq);
void virtqueue_free(struct virtqueue *vq);
void virtqueue_free_static(struct virtqueue *vq);
void virtqueue_dump(struct virtqueue *vq);
void virtqueue_notification(struct virtqueue *vq);
uint32_t virtqueue_get_desc_size(struct virtqueue *vq);
uint32_t virtqueue_get_buffer_length(struct virtqueue *vq, uint16_t idx);
void vq_ring_init(struct virtqueue *vq);
#endif /* VIRTQUEUE_H_ */

View File

@ -0,0 +1,414 @@
/*
* Copyright (c) 2014, Mentor Graphics Corporation
* Copyright (c) 2015 Xilinx, Inc.
* Copyright (c) 2016 Freescale Semiconductor, Inc.
* Copyright 2016-2019 NXP
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/**************************************************************************
* FILE NAME
*
* rpmsg_env_bm.c
*
*
* DESCRIPTION
*
* This file is Bare Metal Implementation of env layer for OpenAMP.
*
*
**************************************************************************/
#include "rpmsg_env.h"
#include "rpmsg_platform.h"
#include "virtqueue.h"
#include "rpmsg_compiler.h"
#include <stdlib.h>
#include <string.h>
static int32_t env_init_counter = 0;
/* Max supported ISR counts */
#define ISR_COUNT (12U) /* Change for multiple remote cores */
/*!
* Structure to keep track of registered ISR's.
*/
struct isr_info
{
void *data;
};
static struct isr_info isr_table[ISR_COUNT];
#if defined(RL_USE_ENVIRONMENT_CONTEXT) && (RL_USE_ENVIRONMENT_CONTEXT == 1)
#error "This RPMsg-Lite port requires RL_USE_ENVIRONMENT_CONTEXT set to 0"
#endif
/*!
* env_init
*
* Initializes OS/BM environment.
*
*/
int32_t env_init(void)
{
// verify 'env_init_counter'
RL_ASSERT(env_init_counter >= 0);
if (env_init_counter < 0)
{
return -1;
}
env_init_counter++;
// multiple call of 'env_init' - return ok
if (1 < env_init_counter)
{
return 0;
}
// first call
(void)memset(isr_table, 0, sizeof(isr_table));
return platform_init();
}
/*!
* env_deinit
*
* Uninitializes OS/BM environment.
*
* @returns Execution status
*/
int32_t env_deinit(void)
{
// verify 'env_init_counter'
RL_ASSERT(env_init_counter > 0);
if (env_init_counter <= 0)
{
return -1;
}
// counter on zero - call platform deinit
env_init_counter--;
// multiple call of 'env_deinit' - return ok
if (0 < env_init_counter)
{
return 0;
}
// last call
return platform_deinit();
}
/*!
* env_allocate_memory - implementation
*
* @param size
*/
void *env_allocate_memory(uint32_t size)
{
return (malloc(size));
}
/*!
* env_free_memory - implementation
*
* @param ptr
*/
void env_free_memory(void *ptr)
{
if (ptr != ((void *)0))
{
free(ptr);
}
}
/*!
*
* env_memset - implementation
*
* @param ptr
* @param value
* @param size
*/
void env_memset(void *ptr, int32_t value, uint32_t size)
{
(void)memset(ptr, value, size);
}
/*!
*
* env_memcpy - implementation
*
* @param dst
* @param src
* @param len
*/
void env_memcpy(void *dst, void const *src, uint32_t len)
{
(void)memcpy(dst, src, len);
}
/*!
*
* env_strcmp - implementation
*
* @param dst
* @param src
*/
int32_t env_strcmp(const char *dst, const char *src)
{
return (strcmp(dst, src));
}
/*!
*
* env_strncpy - implementation
*
* @param dest
* @param src
* @param len
*/
void env_strncpy(char *dest, const char *src, uint32_t len)
{
(void)strncpy(dest, src, len);
}
/*!
*
* env_strncmp - implementation
*
* @param dest
* @param src
* @param len
*/
int32_t env_strncmp(char *dest, const char *src, uint32_t len)
{
return (strncmp(dest, src, len));
}
/*!
*
* env_mb - implementation
*
*/
void env_mb(void)
{
MEM_BARRIER();
}
/*!
* env_rmb - implementation
*/
void env_rmb(void)
{
MEM_BARRIER();
}
/*!
* env_wmb - implementation
*/
void env_wmb(void)
{
MEM_BARRIER();
}
/*!
* env_map_vatopa - implementation
*
* @param address
*/
uint32_t env_map_vatopa(void *address)
{
return platform_vatopa(address);
}
/*!
* env_map_patova - implementation
*
* @param address
*/
void *env_map_patova(uint32_t address)
{
return platform_patova(address);
}
/*!
* env_create_mutex
*
* Creates a mutex with the given initial count.
*
*/
int32_t env_create_mutex(void **lock, int32_t count)
{
/* make the mutex pointer point to itself
* this marks the mutex handle as initialized.
*/
*lock = lock;
return 0;
}
/*!
* env_delete_mutex
*
* Deletes the given lock
*
*/
void env_delete_mutex(void *lock)
{
}
/*!
* env_lock_mutex
*
* Tries to acquire the lock, if lock is not available then call to
* this function will suspend.
*/
void env_lock_mutex(void *lock)
{
/* No mutex needed for RPMsg-Lite in BM environment,
* since the API is not shared with ISR context. */
}
/*!
* env_unlock_mutex
*
* Releases the given lock.
*/
void env_unlock_mutex(void *lock)
{
/* No mutex needed for RPMsg-Lite in BM environment,
* since the API is not shared with ISR context. */
}
/*!
* env_sleep_msec
*
* Suspends the calling thread for given time , in msecs.
*/
void env_sleep_msec(uint32_t num_msec)
{
platform_time_delay(num_msec);
}
/*!
* env_register_isr
*
* Registers interrupt handler data for the given interrupt vector.
*
* @param vector_id - virtual interrupt vector number
* @param data - interrupt handler data (virtqueue)
*/
void env_register_isr(uint32_t vector_id, void *data)
{
RL_ASSERT(vector_id < ISR_COUNT);
if (vector_id < ISR_COUNT)
{
isr_table[vector_id].data = data;
}
}
/*!
* env_unregister_isr
*
* Unregisters interrupt handler data for the given interrupt vector.
*
* @param vector_id - virtual interrupt vector number
*/
void env_unregister_isr(uint32_t vector_id)
{
RL_ASSERT(vector_id < ISR_COUNT);
if (vector_id < ISR_COUNT)
{
isr_table[vector_id].data = ((void *)0);
}
}
/*!
* env_enable_interrupt
*
* Enables the given interrupt
*
* @param vector_id - virtual interrupt vector number
*/
void env_enable_interrupt(uint32_t vector_id)
{
(void)platform_interrupt_enable(vector_id);
}
/*!
* env_disable_interrupt
*
* Disables the given interrupt
*
* @param vector_id - virtual interrupt vector number
*/
void env_disable_interrupt(uint32_t vector_id)
{
(void)platform_interrupt_disable(vector_id);
}
/*!
* env_map_memory
*
* Enables memory mapping for given memory region.
*
* @param pa - physical address of memory
* @param va - logical address of memory
* @param size - memory size
* param flags - flags for cache/uncached and access type
*/
void env_map_memory(uint32_t pa, uint32_t va, uint32_t size, uint32_t flags)
{
platform_map_mem_region(va, pa, size, flags);
}
/*!
* env_disable_cache
*
* Disables system caches.
*
*/
void env_disable_cache(void)
{
platform_cache_all_flush_invalidate();
platform_cache_disable();
}
/*========================================================= */
/* Util data / functions for BM */
void env_isr(uint32_t vector)
{
struct isr_info *info;
RL_ASSERT(vector < ISR_COUNT);
if (vector < ISR_COUNT)
{
info = &isr_table[vector];
virtqueue_notification((struct virtqueue *)info->data);
}
}

View File

@ -0,0 +1,703 @@
/*
* Copyright (c) 2014, Mentor Graphics Corporation
* Copyright (c) 2015 Xilinx, Inc.
* Copyright (c) 2016 Freescale Semiconductor, Inc.
* Copyright 2016-2019 NXP
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/**************************************************************************
* FILE NAME
*
* rpmsg_env_freertos.c
*
*
* DESCRIPTION
*
* This file is FreeRTOS Implementation of env layer for OpenAMP.
*
*
**************************************************************************/
#include "rpmsg_env.h"
#include "FreeRTOS.h"
#include "task.h"
#include "semphr.h"
#include "rpmsg_platform.h"
#include "virtqueue.h"
#include "rpmsg_compiler.h"
#include <stdlib.h>
#include <string.h>
static int32_t env_init_counter = 0;
static SemaphoreHandle_t env_sema = ((void *)0);
/* RL_ENV_MAX_MUTEX_COUNT is an arbitrary count greater than 'count'
if the inital count is 1, this function behaves as a mutex
if it is greater than 1, it acts as a "resource allocator" with
the maximum of 'count' resources available.
Currently, only the first use-case is applicable/applied in RPMsg-Lite.
*/
#define RL_ENV_MAX_MUTEX_COUNT (10)
/* Max supported ISR counts */
#define ISR_COUNT (32U)
/*!
* Structure to keep track of registered ISR's.
*/
struct isr_info
{
void *data;
};
static struct isr_info isr_table[ISR_COUNT];
#if defined(RL_USE_ENVIRONMENT_CONTEXT) && (RL_USE_ENVIRONMENT_CONTEXT == 1)
#error "This RPMsg-Lite port requires RL_USE_ENVIRONMENT_CONTEXT set to 0"
#endif
/*!
* env_in_isr
*
* @returns - true, if currently in ISR
*
*/
static int32_t env_in_isr(void)
{
return xPortIsInsideInterrupt();
}
/*!
* env_init
*
* Initializes OS/BM environment.
*
*/
int32_t env_init(void)
{
int32_t retval;
vTaskSuspendAll(); /* stop scheduler */
// verify 'env_init_counter'
RL_ASSERT(env_init_counter >= 0);
if (env_init_counter < 0)
{
(void)xTaskResumeAll(); /* re-enable scheduler */
return -1;
}
env_init_counter++;
// multiple call of 'env_init' - return ok
if (env_init_counter == 1)
{
// first call
env_sema = xSemaphoreCreateBinary();
(void)memset(isr_table, 0, sizeof(isr_table));
(void)xTaskResumeAll();
retval = platform_init();
(void)xSemaphoreGive(env_sema);
return retval;
}
else
{
(void)xTaskResumeAll();
/* Get the semaphore and then return it,
* this allows for platform_init() to block
* if needed and other tasks to wait for the
* blocking to be done.
* This is in ENV layer as this is ENV specific.*/
if (pdTRUE == xSemaphoreTake(env_sema, portMAX_DELAY))
{
(void)xSemaphoreGive(env_sema);
}
return 0;
}
}
/*!
* env_deinit
*
* Uninitializes OS/BM environment.
*
* @returns - execution status
*/
int32_t env_deinit(void)
{
int32_t retval;
vTaskSuspendAll(); /* stop scheduler */
// verify 'env_init_counter'
RL_ASSERT(env_init_counter > 0);
if (env_init_counter <= 0)
{
(void)xTaskResumeAll(); /* re-enable scheduler */
return -1;
}
// counter on zero - call platform deinit
env_init_counter--;
// multiple call of 'env_deinit' - return ok
if (env_init_counter <= 0)
{
// last call
(void)memset(isr_table, 0, sizeof(isr_table));
retval = platform_deinit();
vSemaphoreDelete(env_sema);
env_sema = ((void *)0);
(void)xTaskResumeAll();
return retval;
}
else
{
(void)xTaskResumeAll();
return 0;
}
}
/*!
* env_allocate_memory - implementation
*
* @param size
*/
void *env_allocate_memory(uint32_t size)
{
return (pvPortMalloc(size));
}
/*!
* env_free_memory - implementation
*
* @param ptr
*/
void env_free_memory(void *ptr)
{
if (ptr != ((void *)0))
{
vPortFree(ptr);
}
}
/*!
*
* env_memset - implementation
*
* @param ptr
* @param value
* @param size
*/
void env_memset(void *ptr, int32_t value, uint32_t size)
{
(void)memset(ptr, value, size);
}
/*!
*
* env_memcpy - implementation
*
* @param dst
* @param src
* @param len
*/
void env_memcpy(void *dst, void const *src, uint32_t len)
{
(void)memcpy(dst, src, len);
}
/*!
*
* env_strcmp - implementation
*
* @param dst
* @param src
*/
int32_t env_strcmp(const char *dst, const char *src)
{
return (strcmp(dst, src));
}
/*!
*
* env_strncpy - implementation
*
* @param dest
* @param src
* @param len
*/
void env_strncpy(char *dest, const char *src, uint32_t len)
{
(void)strncpy(dest, src, len);
}
/*!
*
* env_strncmp - implementation
*
* @param dest
* @param src
* @param len
*/
int32_t env_strncmp(char *dest, const char *src, uint32_t len)
{
return (strncmp(dest, src, len));
}
/*!
*
* env_mb - implementation
*
*/
void env_mb(void)
{
MEM_BARRIER();
}
/*!
* env_rmb - implementation
*/
void env_rmb(void)
{
MEM_BARRIER();
}
/*!
* env_wmb - implementation
*/
void env_wmb(void)
{
MEM_BARRIER();
}
/*!
* env_map_vatopa - implementation
*
* @param address
*/
uint32_t env_map_vatopa(void *address)
{
return platform_vatopa(address);
}
/*!
* env_map_patova - implementation
*
* @param address
*/
void *env_map_patova(uint32_t address)
{
return platform_patova(address);
}
/*!
* env_create_mutex
*
* Creates a mutex with the given initial count.
*
*/
int32_t env_create_mutex(void **lock, int32_t count)
{
if (count > RL_ENV_MAX_MUTEX_COUNT)
{
return -1;
}
*lock = xSemaphoreCreateCounting((UBaseType_t)RL_ENV_MAX_MUTEX_COUNT, (UBaseType_t)count);
if (*lock != ((void *)0))
{
return 0;
}
else
{
return -1;
}
}
/*!
* env_delete_mutex
*
* Deletes the given lock
*
*/
void env_delete_mutex(void *lock)
{
vSemaphoreDelete(lock);
}
/*!
* env_lock_mutex
*
* Tries to acquire the lock, if lock is not available then call to
* this function will suspend.
*/
void env_lock_mutex(void *lock)
{
SemaphoreHandle_t xSemaphore = (SemaphoreHandle_t)lock;
if (env_in_isr() == 0)
{
(void)xSemaphoreTake(xSemaphore, portMAX_DELAY);
}
}
/*!
* env_unlock_mutex
*
* Releases the given lock.
*/
void env_unlock_mutex(void *lock)
{
SemaphoreHandle_t xSemaphore = (SemaphoreHandle_t)lock;
if (env_in_isr() == 0)
{
(void)xSemaphoreGive(xSemaphore);
}
}
/*!
* env_create_sync_lock
*
* Creates a synchronization lock primitive. It is used
* when signal has to be sent from the interrupt context to main
* thread context.
*/
int32_t env_create_sync_lock(void **lock, int32_t state)
{
return env_create_mutex(lock, state); /* state=1 .. initially free */
}
/*!
* env_delete_sync_lock
*
* Deletes the given lock
*
*/
void env_delete_sync_lock(void *lock)
{
if (lock != ((void *)0))
{
env_delete_mutex(lock);
}
}
/*!
* env_acquire_sync_lock
*
* Tries to acquire the lock, if lock is not available then call to
* this function waits for lock to become available.
*/
void env_acquire_sync_lock(void *lock)
{
BaseType_t xTaskWokenByReceive = pdFALSE;
SemaphoreHandle_t xSemaphore = (SemaphoreHandle_t)lock;
if (env_in_isr() != 0)
{
(void)xSemaphoreTakeFromISR(xSemaphore, &xTaskWokenByReceive);
portEND_SWITCHING_ISR(xTaskWokenByReceive);
}
else
{
(void)xSemaphoreTake(xSemaphore, portMAX_DELAY);
}
}
/*!
* env_release_sync_lock
*
* Releases the given lock.
*/
void env_release_sync_lock(void *lock)
{
BaseType_t xTaskWokenByReceive = pdFALSE;
SemaphoreHandle_t xSemaphore = (SemaphoreHandle_t)lock;
if (env_in_isr() != 0)
{
(void)xSemaphoreGiveFromISR(xSemaphore, &xTaskWokenByReceive);
portEND_SWITCHING_ISR(xTaskWokenByReceive);
}
else
{
(void)xSemaphoreGive(xSemaphore);
}
}
/*!
* env_sleep_msec
*
* Suspends the calling thread for given time , in msecs.
*/
void env_sleep_msec(uint32_t num_msec)
{
vTaskDelay(num_msec / portTICK_PERIOD_MS);
}
/*!
* env_register_isr
*
* Registers interrupt handler data for the given interrupt vector.
*
* @param vector_id - virtual interrupt vector number
* @param data - interrupt handler data (virtqueue)
*/
void env_register_isr(uint32_t vector_id, void *data)
{
RL_ASSERT(vector_id < ISR_COUNT);
if (vector_id < ISR_COUNT)
{
isr_table[vector_id].data = data;
}
}
/*!
* env_unregister_isr
*
* Unregisters interrupt handler data for the given interrupt vector.
*
* @param vector_id - virtual interrupt vector number
*/
void env_unregister_isr(uint32_t vector_id)
{
RL_ASSERT(vector_id < ISR_COUNT);
if (vector_id < ISR_COUNT)
{
isr_table[vector_id].data = ((void *)0);
}
}
/*!
* env_enable_interrupt
*
* Enables the given interrupt
*
* @param vector_id - virtual interrupt vector number
*/
void env_enable_interrupt(uint32_t vector_id)
{
(void)platform_interrupt_enable(vector_id);
}
/*!
* env_disable_interrupt
*
* Disables the given interrupt
*
* @param vector_id - virtual interrupt vector number
*/
void env_disable_interrupt(uint32_t vector_id)
{
(void)platform_interrupt_disable(vector_id);
}
/*!
* env_map_memory
*
* Enables memory mapping for given memory region.
*
* @param pa - physical address of memory
* @param va - logical address of memory
* @param size - memory size
* param flags - flags for cache/uncached and access type
*/
void env_map_memory(uint32_t pa, uint32_t va, uint32_t size, uint32_t flags)
{
platform_map_mem_region(va, pa, size, flags);
}
/*!
* env_disable_cache
*
* Disables system caches.
*
*/
void env_disable_cache(void)
{
platform_cache_all_flush_invalidate();
platform_cache_disable();
}
/*!
*
* env_get_timestamp
*
* Returns a 64 bit time stamp.
*
*
*/
uint64_t env_get_timestamp(void)
{
if (env_in_isr() != 0)
{
return (uint64_t)xTaskGetTickCountFromISR();
}
else
{
return (uint64_t)xTaskGetTickCount();
}
}
/*========================================================= */
/* Util data / functions */
void env_isr(uint32_t vector)
{
struct isr_info *info;
RL_ASSERT(vector < ISR_COUNT);
if (vector < ISR_COUNT)
{
info = &isr_table[vector];
virtqueue_notification((struct virtqueue *)info->data);
}
}
/*
* env_create_queue
*
* Creates a message queue.
*
* @param queue - pointer to created queue
* @param length - maximum number of elements in the queue
* @param element_size - queue element size in bytes
*
* @return - status of function execution
*/
int32_t env_create_queue(void **queue, int32_t length, int32_t element_size)
{
*queue = xQueueCreate((UBaseType_t)length, (UBaseType_t)element_size);
if (*queue != ((void *)0))
{
return 0;
}
else
{
return -1;
}
}
/*!
* env_delete_queue
*
* Deletes the message queue.
*
* @param queue - queue to delete
*/
void env_delete_queue(void *queue)
{
vQueueDelete(queue);
}
/*!
* env_put_queue
*
* Put an element in a queue.
*
* @param queue - queue to put element in
* @param msg - pointer to the message to be put into the queue
* @param timeout_ms - timeout in ms
*
* @return - status of function execution
*/
int32_t env_put_queue(void *queue, void *msg, uint32_t timeout_ms)
{
BaseType_t xHigherPriorityTaskWoken = pdFALSE;
if (env_in_isr() != 0)
{
if (xQueueSendFromISR(queue, msg, &xHigherPriorityTaskWoken) == pdPASS)
{
portEND_SWITCHING_ISR(xHigherPriorityTaskWoken);
return 1;
}
}
else
{
if (xQueueSend(queue, msg, ((portMAX_DELAY == timeout_ms) ? portMAX_DELAY : timeout_ms / portTICK_PERIOD_MS)) ==
pdPASS)
{
return 1;
}
}
return 0;
}
/*!
* env_get_queue
*
* Get an element out of a queue.
*
* @param queue - queue to get element from
* @param msg - pointer to a memory to save the message
* @param timeout_ms - timeout in ms
*
* @return - status of function execution
*/
int32_t env_get_queue(void *queue, void *msg, uint32_t timeout_ms)
{
BaseType_t xHigherPriorityTaskWoken = pdFALSE;
if (env_in_isr() != 0)
{
if (xQueueReceiveFromISR(queue, msg, &xHigherPriorityTaskWoken) == pdPASS)
{
portEND_SWITCHING_ISR(xHigherPriorityTaskWoken);
return 1;
}
}
else
{
if (xQueueReceive(queue, msg,
((portMAX_DELAY == timeout_ms) ? portMAX_DELAY : timeout_ms / portTICK_PERIOD_MS)) == pdPASS)
{
return 1;
}
}
return 0;
}
/*!
* env_get_current_queue_size
*
* Get current queue size.
*
* @param queue - queue pointer
*
* @return - Number of queued items in the queue
*/
int32_t env_get_current_queue_size(void *queue)
{
if (env_in_isr() != 0)
{
return ((int32_t)uxQueueMessagesWaitingFromISR(queue));
}
else
{
return ((int32_t)uxQueueMessagesWaiting(queue));
}
}

View File

@ -0,0 +1,708 @@
/*
* Copyright (c) 2014, Mentor Graphics Corporation
* Copyright (c) 2015 Xilinx, Inc.
* Copyright (c) 2016 Freescale Semiconductor, Inc.
* Copyright 2016-2019 NXP
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/**************************************************************************
* FILE NAME
*
* rpmsg_env_qnx.c
*
*
* DESCRIPTION
*
* This file is QNX Implementation of env layer.
*
*
**************************************************************************/
#include "rpmsg_env.h"
#include "rpmsg_platform.h"
#include "virtqueue.h"
#include <stdlib.h>
#include <string.h>
#include <pthread.h>
#include <mqueue.h>
#include "rpmsg_env_qnx.h"
#if __PTR_BITS__ > 32
#include <fcntl.h>
#include <aarch64/inline.h>
#else
#include <arm/inline.h>
#endif
/* Max supported ISR counts */
#define ISR_COUNT (32U)
#if (!defined(RL_USE_ENVIRONMENT_CONTEXT)) || (RL_USE_ENVIRONMENT_CONTEXT != 1)
#error "This RPMsg-Lite port requires RL_USE_ENVIRONMENT_CONTEXT set to 1"
#endif
/**
* Structure to keep track of registered ISR's.
*/
struct isr_info
{
void *data;
volatile uint32_t enabled;
};
/**
* Structure to hold queue information
*/
typedef struct env_queue
{
mqd_t mqd;
size_t msg_len;
} env_queue_t;
/**
* Env. context structure
*/
typedef struct env_context
{
void *platform_context; /* Pointer to platform context */
uint32_t pa; /* Physical address of memory pool reserved for rpmsg */
void *va; /* Virtual address of the memory pool */
struct isr_info isr_table[ISR_COUNT]; /* Table with registered Virt. queue data */
} env_context_t;
/**
* Returns pointer to platform context.
*
* @param env_context Pointer to env. context
*
* @return Pointer to platform context
*/
void *env_get_platform_context(void *env_context)
{
env_context_t *env = env_context;
return env->platform_context;
}
/*!
* env_init
*
* Initializes OS/BM environment.
*
*/
int32_t env_init(void **env_context, void *env_init_data)
{
rpmsg_env_init_t *init = env_init_data;
imx_rpmsg_env_cfg_t *user_cfg;
if (init != ((void *)0))
{
user_cfg = init->user_input;
env_context_t *ctx = env_allocate_memory(sizeof(env_context_t));
if (ctx == ((void *)0))
{
return -1;
}
/* Save virtual and phy address of mmaped memory region */
ctx->pa = init->pa;
ctx->va = init->va;
/* Initialize platform, dereference user_input to get platform cfg address */
if (platform_init(&ctx->platform_context, ctx, user_cfg ? user_cfg->platform_cfg : ((void *)0)) != 0)
{
env_free_memory(ctx);
return -1;
}
*env_context = ctx;
return 0;
}
return -1;
}
/*!
* env_deinit
*
* Uninitializes OS/BM environment.
*
* @returns - execution status
*/
int32_t env_deinit(void *env_context)
{
env_context_t *ctx = env_context;
platform_deinit(ctx->platform_context);
env_free_memory(ctx);
return 0;
}
/*!
* env_allocate_memory - implementation
*
* @param size
*/
void *env_allocate_memory(uint32_t size)
{
return malloc(size);
}
/*!
* env_free_memory - implementation
*
* @param ptr
*/
void env_free_memory(void *ptr)
{
free(ptr);
}
/*!
*
* env_memset - implementation
*
* @param ptr
* @param value
* @param size
*/
void env_memset(void *ptr, int32_t value, uint32_t size)
{
(void)memset(ptr, value, size);
}
/*!
*
* env_memcpy - implementation
*
* @param dst
* @param src
* @param len
*/
void env_memcpy(void *dst, void const *src, uint32_t len)
{
(void)memcpy(dst, src, len);
}
/*!
*
* env_strcmp - implementation
*
* @param dst
* @param src
*/
int32_t env_strcmp(const char *dst, const char *src)
{
return (strcmp(dst, src));
}
/*!
*
* env_strncpy - implementation
*
* @param dest
* @param src
* @param len
*/
void env_strncpy(char *dest, const char *src, uint32_t len)
{
(void)strncpy(dest, src, len);
}
/*!
*
* env_strncmp - implementation
*
* @param dest
* @param src
* @param len
*/
int32_t env_strncmp(char *dest, const char *src, uint32_t len)
{
return (strncmp(dest, src, len));
}
/*!
*
* env_mb - implementation
*
*/
void env_mb(void)
{
dsb();
}
/*!
* env_rmb - implementation
*/
void env_rmb(void)
{
dsb();
}
/*!
* env_wmb - implementation
*/
void env_wmb(void)
{
dsb();
}
/*!
* env_map_vatopa - implementation
*
* @param address
*/
uint32_t env_map_vatopa(void *env, void *address)
{
#if IMX_MMAP_VA_ON_PA
return ((uint32_t)address);
#else
/* This is faster then mem_offset64() */
env_context_t *ctx = env;
uint64_t va = (uint64_t)address;
uint64_t va_start = (uint64_t)ctx->va;
uint64_t pa = ctx->pa + (va - va_start);
return pa;
#endif
}
/*!
* env_map_patova - implementation
*
* @param address
*/
void *env_map_patova(void *env, uint32_t address)
{
#if IMX_MMAP_VA_ON_PA
return ((void *)address);
#else
env_context_t *ctx = env;
uint64_t va_start = (uint64_t)ctx->va;
uint64_t va = (va_start + (address - ctx->pa));
return (void *)va;
#endif
}
/*!
* env_create_mutex
*
* Creates a mutex with the given initial count.
*
*/
int32_t env_create_mutex(void **lock, int32_t count)
{
*lock = env_allocate_memory(sizeof(pthread_mutex_t));
if (*lock == ((void *)0))
{
return -1;
}
if (pthread_mutex_init((pthread_mutex_t *)*lock, ((void *)0)) != EOK)
{
env_free_memory(*lock);
*lock = ((void *)0);
return -1;
}
return 0;
}
/*!
* env_delete_mutex
*
* Deletes the given lock
*
*/
void env_delete_mutex(void *lock)
{
pthread_mutex_destroy(lock);
env_free_memory(lock);
}
/*!
* env_lock_mutex
*
* Tries to acquire the lock, if lock is not available then call to
* this function will suspend.
*/
void env_lock_mutex(void *lock)
{
pthread_mutex_lock(lock);
}
/*!
* env_unlock_mutex
*
* Releases the given lock.
*/
void env_unlock_mutex(void *lock)
{
pthread_mutex_unlock(lock);
}
/*!
* env_create_sync_lock
*
* Creates a synchronization lock primitive. It is used
* when signal has to be sent from the interrupt context to main
* thread context.
*/
int32_t env_create_sync_lock(void **lock, int32_t state)
{
return env_create_mutex(lock, state); /* state=1 .. initially free */
}
/*!
* env_delete_sync_lock
*
* Deletes the given lock
*
*/
void env_delete_sync_lock(void *lock)
{
if (lock != ((void *)0))
{
env_delete_mutex(lock);
}
}
/*!
* env_acquire_sync_lock
*
* Tries to acquire the lock, if lock is not available then call to
* this function waits for lock to become available.
*/
void env_acquire_sync_lock(void *lock)
{
env_lock_mutex(lock);
}
/*!
* env_release_sync_lock
*
* Releases the given lock.
*/
void env_release_sync_lock(void *lock)
{
env_unlock_mutex(lock);
}
/*!
* env_sleep_msec
*
* Suspends the calling thread for given time , in msecs.
*/
void env_sleep_msec(uint32_t num_msec)
{
delay(num_msec);
}
/*!
* env_register_isr
*
* Registers interrupt handler data for the given interrupt vector.
*
* @param vector_id - virtual interrupt vector number
* @param data - interrupt handler data (virtqueue)
*/
void env_register_isr(void *env, uint32_t vector_id, void *data)
{
env_context_t *ctx = env;
RL_ASSERT(vector_id < ISR_COUNT);
if (vector_id < ISR_COUNT)
{
ctx->isr_table[vector_id].data = data;
}
}
/*!
* env_unregister_isr
*
* Unregisters interrupt handler data for the given interrupt vector.
*
* @param vector_id - virtual interrupt vector number
*/
void env_unregister_isr(void *env, uint32_t vector_id)
{
env_context_t *ctx = env;
RL_ASSERT(vector_id < ISR_COUNT);
if (vector_id < ISR_COUNT)
{
ctx->isr_table[vector_id].data = ((void *)0);
ctx->isr_table[vector_id].enabled = 0;
}
}
/*!
* env_enable_interrupt
*
* Enables the given interrupt
*
* @param vector_id - virtual interrupt vector number
*/
void env_enable_interrupt(void *env, uint32_t vector_id)
{
env_context_t *ctx = env;
RL_ASSERT(vector_id < ISR_COUNT);
if (vector_id < ISR_COUNT)
{
ctx->isr_table[vector_id].enabled = 1;
}
}
/*!
* env_disable_interrupt
*
* Disables the given interrupt
*
* @param vector_id - virtual interrupt vector number
*/
void env_disable_interrupt(void *env, uint32_t vector_id)
{
env_context_t *ctx = env;
RL_ASSERT(vector_id < ISR_COUNT);
if (vector_id < ISR_COUNT)
{
ctx->isr_table[vector_id].enabled = 0;
}
}
/*!
* env_map_memory
*
* Enables memory mapping for given memory region.
*
* @param pa - physical address of memory
* @param va - logical address of memory
* @param size - memory size
* param flags - flags for cache/uncached and access type
*/
void env_map_memory(uint32_t pa, uint32_t va, uint32_t size, uint32_t flags)
{
platform_map_mem_region(va, pa, size, flags);
}
/*!
* env_disable_cache
*
* Disables system caches.
*
*/
void env_disable_cache(void)
{
platform_cache_all_flush_invalidate();
platform_cache_disable();
}
/*!
*
* env_get_timestamp
*
* Returns a 64 bit time stamp.
*
*
*/
uint32_t long env_get_timestamp(void)
{
fprintf(stderr, "%s unsupported\n", __FUNCTION__);
return 0;
}
/*========================================================= */
/* Util data / functions */
/**
* Called from receive thread
*
* @param env Pointer to env context
* @param vector Vector ID.
*/
void env_isr(void *env, uint32_t vector)
{
struct isr_info *info;
env_context_t *ctx = env;
RL_ASSERT(vector < ISR_COUNT);
if (vector < ISR_COUNT)
{
info = &ctx->isr_table[vector];
if (info->enabled)
{
virtqueue_notification((struct virtqueue *)info->data);
}
}
}
/**
* Called by rpmsg to init an interrupt
*
* @param env Pointer to env context.
* @param vq_id Virt. queue ID.
* @param isr_data Pointer to interrupt data.
*
* @return Execution status.
*/
int32_t env_init_interrupt(void *env, int32_t vq_id, void *isr_data)
{
env_register_isr(env, vq_id, isr_data);
return 0;
}
/**
* Called by rpmsg to deinit an interrupt.
*
* @param env Pointer to env context.
* @param vq_id Virt. queue ID.
*
* @return Execution status.
*/
int32_t env_deinit_interrupt(void *env, int32_t vq_id)
{
env_unregister_isr(env, vq_id);
return 0;
}
/**
* env_create_queue
*
* Creates a message queue.
*
* @param queue - pointer to created queue
* @param length - maximum number of elements in the queue
* @param element_size - queue element size in bytes
*
* @return - status of function execution
*/
int32_t env_create_queue(void **queue, int32_t length, int32_t element_size)
{
char name[100];
struct mq_attr mqstat;
env_queue_t *q = env_allocate_memory(sizeof(env_queue_t));
if (q == ((void *)0))
{
return -1;
}
/* Creates a unique queue in /dev/mq/PID_virtaddr_length */
sprintf(name, "/%u_0x%lx_%u", getpid(), (uint64_t)q, length);
mqstat.mq_maxmsg = length;
mqstat.mq_msgsize = element_size;
mqstat.mq_flags = 0;
mqstat.mq_curmsgs = 0;
mqstat.mq_recvwait = 0;
mqstat.mq_sendwait = 0;
q->msg_len = element_size;
q->mqd = mq_open(name, O_RDWR | O_CREAT, S_IRUSR | S_IWUSR, &mqstat);
if (q->mqd == -1)
{
env_free_memory(q);
fprintf(stderr, "mq_open failed: %s\n", strerror(errno));
return -1;
}
/* Return queue */
*queue = q;
return 0;
}
/*!
* env_delete_queue
*
* Deletes the message queue.
*
* @param queue - queue to delete
*/
void env_delete_queue(void *queue)
{
env_queue_t *q = queue;
mq_close(q->mqd);
env_free_memory(queue);
}
/*!
* env_put_queue
*
* Put an element in a queue.
*
* @param queue - queue to put element in
* @param msg - pointer to the message to be put into the queue
* @param timeout_ms - timeout in ms
*
* @return - status of function execution
*/
int32_t env_put_queue(void *queue, void *msg, uint32_t timeout_ms)
{
env_queue_t *q = queue;
if (mq_send(q->mqd, (const char *)msg, q->msg_len, 0))
{
fprintf(stderr, "mq_send failed: %s\n", strerror(errno));
return 0;
}
return 1;
}
/*!
* env_get_queue
*
* Get an element out of a queue.
*
* @param queue - queue to get element from
* @param msg - pointer to a memory to save the message
* @param timeout_ms - timeout in ms
*
* @return - status of function execution
*/
int32_t env_get_queue(void *queue, void *msg, uint32_t timeout_ms)
{
env_queue_t *q = queue;
if (mq_receive(q->mqd, msg, q->msg_len, ((void *)0)) == -1)
{
fprintf(stderr, "mq_receive failed: %s\n", strerror(errno));
return 0;
}
return 1;
}
/*!
* env_get_current_queue_size
*
* Get current queue size.
*
* @param queue - queue pointer
*
* @return - Number of queued items in the queue
*/
int32_t env_get_current_queue_size(void *queue)
{
struct mq_attr mqstat;
env_queue_t *q = queue;
if (mq_getattr(q->mqd, &mqstat) != -1)
{
return mqstat.mq_curmsgs;
}
return 0;
}

View File

@ -0,0 +1,56 @@
/*
* Copyright 2016-2019 NXP
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice, this list
* of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright notice, this
* list of conditions and the following disclaimer in the documentation and/or
* other materials provided with the distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
/**************************************************************************
* FILE NAME
*
* rpmsg_env_qnx.h
*
*
* DESCRIPTION
*
* This file is QNX header file of env layer.
*
*
**************************************************************************/
#ifndef RPMSG_ENV_QNX_H_
#define RPMSG_ENV_QNX_H_
#include <sys/imx_rpmsg_lite.h>
typedef struct rpmsg_env_init
{
void *user_input; /* Pointer to user init cfg */
uint32_t pa; /* Physical address of memory pool reserved for rpmsg */
void *va; /* Virtual address of the memory pool */
} rpmsg_env_init_t;
#endif /* RPMSG_ENV_QNX_H_ */

View File

@ -0,0 +1,637 @@
/*
* Copyright 2020 NXP
* All rights reserved.
*
*
* SPDX-License-Identifier: BSD-3-Clause
*/
/**************************************************************************
* FILE NAME
*
* rpmsg_env_threadx.c
*
*
* DESCRIPTION
*
* This file is ThreadX Implementation of env layer for OpenAMP.
*
*
**************************************************************************/
#include "rpmsg_env.h"
#include "tx_api.h"
#include "tx_event_flags.h"
#include "rpmsg_platform.h"
#include "fsl_common.h"
#include "rpmsg_compiler.h"
#include "fsl_component_mem_manager.h"
#include <stdlib.h>
#include <string.h>
#include "virtqueue.h"
static int32_t env_init_counter = 0;
static TX_SEMAPHORE env_sema;
/* RL_ENV_MAX_MUTEX_COUNT is an arbitrary count greater than 'count'
if the inital count is 1, this function behaves as a mutex
if it is greater than 1, it acts as a "resource allocator" with
the maximum of 'count' resources available.
Currently, only the first use-case is applicable/applied in RPMsg-Lite.
*/
#define RL_ENV_MAX_MUTEX_COUNT (10)
/* Max supported ISR counts */
#define ISR_COUNT (32U)
/*!
* Structure to keep track of registered ISR's.
*/
struct isr_info
{
void *data;
};
static struct isr_info isr_table[ISR_COUNT];
#if defined(RL_USE_ENVIRONMENT_CONTEXT) && (RL_USE_ENVIRONMENT_CONTEXT == 1)
#error "This RPMsg-Lite port requires RL_USE_ENVIRONMENT_CONTEXT set to 0"
#endif
/*!
* env_in_isr
*
* @returns - true, if currently in ISR
*
*/
static int32_t env_in_isr(void)
{
return platform_in_isr();
}
/*!
* env_init
*
* Initializes OS/ThreadX environment.
*
*/
int32_t env_init(void)
{
int32_t retval;
uint32_t regPrimask = DisableGlobalIRQ(); /* stop scheduler */
// verify 'env_init_counter'
RL_ASSERT(env_init_counter >= 0);
if (env_init_counter < 0)
{
EnableGlobalIRQ(regPrimask); /* re-enable scheduler */
return -1;
}
env_init_counter++;
// multiple call of 'env_init' - return ok
if (env_init_counter == 1)
{
// first call
if (TX_SUCCESS != _tx_semaphore_create((TX_SEMAPHORE *)&env_sema, NULL, 0))
{
EnableGlobalIRQ(regPrimask);
return -1;
}
(void)memset(isr_table, 0, sizeof(isr_table));
EnableGlobalIRQ(regPrimask);
retval = platform_init();
tx_semaphore_put((TX_SEMAPHORE *)&env_sema);
return retval;
}
else
{
EnableGlobalIRQ(regPrimask);
/* Get the semaphore and then return it,
* this allows for platform_init() to block
* if needed and other tasks to wait for the
* blocking to be done.
* This is in ENV layer as this is ENV specific.*/
if (TX_SUCCESS == tx_semaphore_get((TX_SEMAPHORE *)&env_sema, TX_WAIT_FOREVER))
{
tx_semaphore_put((TX_SEMAPHORE *)&env_sema);
}
return 0;
}
}
/*!
* env_deinit
*
* Uninitializes OS/BM environment.
*
* @returns - execution status
*/
int32_t env_deinit(void)
{
int32_t retval;
uint32_t regPrimask = DisableGlobalIRQ(); /* stop scheduler */
// verify 'env_init_counter'
RL_ASSERT(env_init_counter > 0);
if (env_init_counter <= 0)
{
EnableGlobalIRQ(regPrimask);
return -1;
}
// counter on zero - call platform deinit
env_init_counter--;
// multiple call of 'env_deinit' - return ok
if (env_init_counter <= 0)
{
// last call
(void)memset(isr_table, 0, sizeof(isr_table));
retval = platform_deinit();
(void)_tx_semaphore_delete((TX_SEMAPHORE *)&env_sema);
(void)memset(&env_sema, 0, sizeof(env_sema));
EnableGlobalIRQ(regPrimask);
return retval;
}
else
{
EnableGlobalIRQ(regPrimask);
return 0;
}
}
/*!
* env_allocate_memory - implementation
*
* @param size
*/
void *env_allocate_memory(uint32_t size)
{
return (MEM_BufferAlloc(size));
}
/*!
* env_free_memory - implementation
*
* @param ptr
*/
void env_free_memory(void *ptr)
{
if (ptr != ((void *)0))
{
MEM_BufferFree(ptr);
}
}
/*!
*
* env_memset - implementation
*
* @param ptr
* @param value
* @param size
*/
void env_memset(void *ptr, int32_t value, uint32_t size)
{
(void)memset(ptr, value, size);
}
/*!
*
* env_memcpy - implementation
*
* @param dst
* @param src
* @param len
*/
void env_memcpy(void *dst, void const *src, uint32_t len)
{
(void)memcpy(dst, src, len);
}
/*!
*
* env_strcmp - implementation
*
* @param dst
* @param src
*/
int32_t env_strcmp(const char *dst, const char *src)
{
return (strcmp(dst, src));
}
/*!
*
* env_strncpy - implementation
*
* @param dest
* @param src
* @param len
*/
void env_strncpy(char *dest, const char *src, uint32_t len)
{
(void)strncpy(dest, src, len);
}
/*!
*
* env_strncmp - implementation
*
* @param dest
* @param src
* @param len
*/
int32_t env_strncmp(char *dest, const char *src, uint32_t len)
{
return (strncmp(dest, src, len));
}
/*!
*
* env_mb - implementation
*
*/
void env_mb(void)
{
MEM_BARRIER();
}
/*!
* env_rmb - implementation
*/
void env_rmb(void)
{
MEM_BARRIER();
}
/*!
* env_wmb - implementation
*/
void env_wmb(void)
{
MEM_BARRIER();
}
/*!
* env_map_vatopa - implementation
*
* @param address
*/
uint32_t env_map_vatopa(void *address)
{
return platform_vatopa(address);
}
/*!
* env_map_patova - implementation
*
* @param address
*/
void *env_map_patova(uint32_t address)
{
return platform_patova(address);
}
/*!
* env_create_mutex
*
* Creates a mutex with the given initial count.
*
*/
int32_t env_create_mutex(void **lock, int32_t count)
{
TX_SEMAPHORE *semaphore_ptr;
semaphore_ptr = (TX_SEMAPHORE *)env_allocate_memory(sizeof(TX_SEMAPHORE));
if (semaphore_ptr == ((void *)0))
{
return -1;
}
if (count > RL_ENV_MAX_MUTEX_COUNT)
{
return -1;
}
if (TX_SUCCESS != _tx_semaphore_create((TX_SEMAPHORE *)semaphore_ptr, NULL, count))
{
return -1;
}
*lock = (void *)semaphore_ptr;
return 0;
}
/*!
* env_delete_mutex
*
* Deletes the given lock
*
*/
void env_delete_mutex(void *lock)
{
(void)_tx_semaphore_delete((TX_SEMAPHORE *)lock);
env_free_memory(lock);
}
/*!
* env_lock_mutex
*
* Tries to acquire the lock, if lock is not available then call to
* this function will suspend.
*/
void env_lock_mutex(void *lock)
{
if (env_in_isr() == 0)
{
(void)tx_semaphore_get((TX_SEMAPHORE *)lock, TX_WAIT_FOREVER);
}
}
/*!
* env_unlock_mutex
*
* Releases the given lock.
*/
void env_unlock_mutex(void *lock)
{
if (env_in_isr() == 0)
{
tx_semaphore_put((TX_SEMAPHORE *)lock);
}
}
/*!
* env_create_sync_lock
*
* Creates a synchronization lock primitive. It is used
* when signal has to be sent from the interrupt context to main
* thread context.
*/
int32_t env_create_sync_lock(void **lock, int32_t state)
{
return env_create_mutex(lock, state); /* state=1 .. initially free */
}
/*!
* env_delete_sync_lock
*
* Deletes the given lock
*
*/
void env_delete_sync_lock(void *lock)
{
if (lock != ((void *)0))
{
env_delete_mutex(lock);
}
}
/*!
* env_acquire_sync_lock
*
* Tries to acquire the lock, if lock is not available then call to
* this function waits for lock to become available.
*/
void env_acquire_sync_lock(void *lock)
{
if (lock != ((void *)0))
{
env_lock_mutex(lock);
}
}
/*!
* env_release_sync_lock
*
* Releases the given lock.
*/
void env_release_sync_lock(void *lock)
{
if (lock != ((void *)0))
{
env_unlock_mutex(lock);
}
}
/*!
* env_sleep_msec
*
* Suspends the calling thread for given time , in msecs.
*/
void env_sleep_msec(uint32_t num_msec)
{
tx_thread_sleep(num_msec);
}
/*!
* env_register_isr
*
* Registers interrupt handler data for the given interrupt vector.
*
* @param vector_id - virtual interrupt vector number
* @param data - interrupt handler data (virtqueue)
*/
void env_register_isr(uint32_t vector_id, void *data)
{
RL_ASSERT(vector_id < ISR_COUNT);
if (vector_id < ISR_COUNT)
{
isr_table[vector_id].data = data;
}
}
/*!
* env_unregister_isr
*
* Unregisters interrupt handler data for the given interrupt vector.
*
* @param vector_id - virtual interrupt vector number
*/
void env_unregister_isr(uint32_t vector_id)
{
RL_ASSERT(vector_id < ISR_COUNT);
if (vector_id < ISR_COUNT)
{
isr_table[vector_id].data = ((void *)0);
}
}
/*!
* env_enable_interrupt
*
* Enables the given interrupt
*
* @param vector_id - virtual interrupt vector number
*/
void env_enable_interrupt(uint32_t vector_id)
{
(void)platform_interrupt_enable(vector_id);
}
/*!
* env_disable_interrupt
*
* Disables the given interrupt
*
* @param vector_id - virtual interrupt vector number
*/
void env_disable_interrupt(uint32_t vector_id)
{
(void)platform_interrupt_disable(vector_id);
}
/*!
* env_map_memory
*
* Enables memory mapping for given memory region.
*
* @param pa - physical address of memory
* @param va - logical address of memory
* @param size - memory size
* param flags - flags for cache/uncached and access type
*/
void env_map_memory(uint32_t pa, uint32_t va, uint32_t size, uint32_t flags)
{
platform_map_mem_region(va, pa, size, flags);
}
/*!
* env_disable_cache
*
* Disables system caches.
*
*/
void env_disable_cache(void)
{
platform_cache_all_flush_invalidate();
platform_cache_disable();
}
/*========================================================= */
/* Util data / functions */
void env_isr(uint32_t vector)
{
struct isr_info *info;
RL_ASSERT(vector < ISR_COUNT);
if (vector < ISR_COUNT)
{
info = &isr_table[vector];
virtqueue_notification((struct virtqueue *)info->data);
}
}
/*
* env_create_queue
*
* Creates a message queue.
*
* @param queue - pointer to created queue
* @param length - maximum number of elements in the queue
* @param element_size - queue element size in bytes
*
* @return - status of function execution
*/
int32_t env_create_queue(void **queue, int32_t length, int32_t element_size)
{
if (TX_SUCCESS == _tx_queue_create((TX_QUEUE *)*queue, NULL, element_size, NULL, length))
{
return 0;
}
else
{
return -1;
}
}
/*!
* env_delete_queue
*
* Deletes the message queue.
*
* @param queue - queue to delete
*/
void env_delete_queue(void *queue)
{
tx_queue_delete(queue);
}
/*!
* env_put_queue
*
* Put an element in a queue.
*
* @param queue - queue to put element in
* @param msg - pointer to the message to be put into the queue
* @param timeout_ms - timeout in ms
*
* @return - status of function execution
*/
int32_t env_put_queue(void *queue, void *msg, uint32_t timeout_ms)
{
if (TX_SUCCESS == tx_queue_send((TX_QUEUE *)queue, msg, timeout_ms))
{
return 0;
}
else
{
return -1;
}
}
/*!
* env_get_queue
*
* Get an element out of a queue.
*
* @param queue - queue to get element from
* @param msg - pointer to a memory to save the message
* @param timeout_ms - timeout in ms
*
* @return - status of function execution
*/
int32_t env_get_queue(void *queue, void *msg, uint32_t timeout_ms)
{
if (TX_SUCCESS == tx_queue_receive((TX_QUEUE *)queue, msg, timeout_ms))
{
return 0;
}
else
{
return -1;
}
}
/*!
* env_get_current_queue_size
*
* Get current queue size.
*
* @param queue - queue pointer
*
* @return - Number of queued items in the queue
*/
int32_t env_get_current_queue_size(void *queue)
{
int32_t enqueued;
ULONG available_storage;
TX_THREAD *first_suspended;
ULONG suspended_count;
TX_QUEUE *next_queue;
if (TX_SUCCESS == tx_queue_info_get((TX_QUEUE *)queue, NULL, (ULONG *)&enqueued, &available_storage,
&first_suspended, &suspended_count, &next_queue))
{
return 0;
}
else
{
return -1;
}
}

View File

@ -0,0 +1,681 @@
/*
* Copyright (c) 2014, Mentor Graphics Corporation
* Copyright (c) 2015 Xilinx, Inc.
* Copyright (c) 2016 Freescale Semiconductor, Inc.
* Copyright 2021 NXP
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/**************************************************************************
* FILE NAME
*
* rpmsg_env_xos.c
*
*
* DESCRIPTION
*
* This file is XOS Implementation of env layer for RPMsg_Lite.
*
*
**************************************************************************/
#include "rpmsg_env.h"
#include "rpmsg_lite.h"
#include <xtensa/xos.h>
#include "rpmsg_platform.h"
#include "virtqueue.h"
#include "rpmsg_compiler.h"
#include <stdlib.h>
#include <string.h>
static int32_t env_init_counter = 0;
static struct XosSem env_sema = {0};
/* RL_ENV_MAX_MUTEX_COUNT is an arbitrary count greater than 'count'
if the inital count is 1, this function behaves as a mutex
if it is greater than 1, it acts as a "resource allocator" with
the maximum of 'count' resources available.
Currently, only the first use-case is applicable/applied in RPMsg-Lite.
*/
#define RL_ENV_MAX_MUTEX_COUNT (10)
/* Max supported ISR counts */
#define ISR_COUNT (32U)
/*!
* Structure to keep track of registered ISR's.
*/
struct isr_info
{
void *data;
};
static struct isr_info isr_table[ISR_COUNT];
#if defined(RL_USE_ENVIRONMENT_CONTEXT) && (RL_USE_ENVIRONMENT_CONTEXT == 1)
#error "This RPMsg-Lite port requires RL_USE_ENVIRONMENT_CONTEXT set to 0"
#endif
/*!
* env_in_isr
*
* @returns - true, if currently in ISR
*
*/
static int32_t env_in_isr(void)
{
return platform_in_isr();
}
/*!
* env_init
*
* Initializes XOS environment.
*
*/
int32_t env_init(void)
{
int32_t retval;
uint32_t regPrimask = xos_disable_interrupts(); /* stop scheduler */
/* verify 'env_init_counter' */
RL_ASSERT(env_init_counter >= 0);
if (env_init_counter < 0)
{
xos_restore_interrupts(regPrimask); /* re-enable scheduler */
return -1;
}
env_init_counter++;
/* multiple call of 'env_init' - return ok */
if (env_init_counter == 1)
{
/* first call */
(void)xos_sem_create(&env_sema, XOS_SEM_WAIT_PRIORITY, 1);
(void)memset(isr_table, 0, sizeof(isr_table));
xos_restore_interrupts(regPrimask);
retval = platform_init();
(void)xos_sem_put(&env_sema);
return retval;
}
else
{
xos_restore_interrupts(regPrimask);
/* Get the semaphore and then return it,
* this allows for platform_init() to block
* if needed and other tasks to wait for the
* blocking to be done.
* This is in ENV layer as this is ENV specific.*/
if (XOS_OK == xos_sem_get(&env_sema))
{
(void)xos_sem_put(&env_sema);
}
return 0;
}
}
/*!
* env_deinit
*
* Uninitializes XOS environment.
*
* @returns - execution status
*/
int32_t env_deinit(void)
{
int32_t retval;
uint32_t regPrimask = xos_disable_interrupts(); /* stop scheduler */
/* verify 'env_init_counter' */
RL_ASSERT(env_init_counter > 0);
if (env_init_counter <= 0)
{
xos_restore_interrupts(regPrimask); /* re-enable scheduler */
return -1;
}
/* counter on zero - call platform deinit */
env_init_counter--;
/* multiple call of 'env_deinit' - return ok */
if (env_init_counter <= 0)
{
/* last call */
(void)memset(isr_table, 0, sizeof(isr_table));
retval = platform_deinit();
(void)xos_sem_delete(&env_sema);
xos_restore_interrupts(regPrimask);
return retval;
}
else
{
xos_restore_interrupts(regPrimask);
return 0;
}
}
/*!
* env_allocate_memory - implementation
*
* @param size
*/
void *env_allocate_memory(uint32_t size)
{
return (malloc(size));
}
/*!
* env_free_memory - implementation
*
* @param ptr
*/
void env_free_memory(void *ptr)
{
if (ptr != ((void *)0))
{
free(ptr);
}
}
/*!
*
* env_memset - implementation
*
* @param ptr
* @param value
* @param size
*/
void env_memset(void *ptr, int32_t value, uint32_t size)
{
(void)memset(ptr, value, size);
}
/*!
*
* env_memcpy - implementation
*
* @param dst
* @param src
* @param len
*/
void env_memcpy(void *dst, void const *src, uint32_t len)
{
(void)memcpy(dst, src, len);
}
/*!
*
* env_strcmp - implementation
*
* @param dst
* @param src
*/
int32_t env_strcmp(const char *dst, const char *src)
{
return (strcmp(dst, src));
}
/*!
*
* env_strncpy - implementation
*
* @param dest
* @param src
* @param len
*/
void env_strncpy(char *dest, const char *src, uint32_t len)
{
(void)strncpy(dest, src, len);
}
/*!
*
* env_strncmp - implementation
*
* @param dest
* @param src
* @param len
*/
int32_t env_strncmp(char *dest, const char *src, uint32_t len)
{
return (strncmp(dest, src, len));
}
/*!
*
* env_mb - implementation
*
*/
void env_mb(void)
{
MEM_BARRIER();
}
/*!
* env_rmb - implementation
*/
void env_rmb(void)
{
MEM_BARRIER();
}
/*!
* env_wmb - implementation
*/
void env_wmb(void)
{
MEM_BARRIER();
}
/*!
* env_map_vatopa - implementation
*
* @param address
*/
uint32_t env_map_vatopa(void *address)
{
return platform_vatopa(address);
}
/*!
* env_map_patova - implementation
*
* @param address
*/
void *env_map_patova(uint32_t address)
{
return platform_patova(address);
}
/*!
* env_create_mutex
*
* Creates a mutex with the given initial count.
*
*/
int32_t env_create_mutex(void **lock, int32_t count)
{
struct XosSem *semaphore_ptr;
if (count > RL_ENV_MAX_MUTEX_COUNT)
{
return -1;
}
semaphore_ptr = (struct XosSem *)env_allocate_memory(sizeof(struct XosSem));
if (semaphore_ptr == ((void *)0))
{
return -1;
}
if (XOS_OK == xos_sem_create(semaphore_ptr, XOS_SEM_WAIT_PRIORITY, count))
{
*lock = (void *)semaphore_ptr;
return 0;
}
else
{
env_free_memory(semaphore_ptr);
return -1;
}
}
/*!
* env_delete_mutex
*
* Deletes the given lock
*
*/
void env_delete_mutex(void *lock)
{
xos_sem_delete(lock);
env_free_memory(lock);
}
/*!
* env_lock_mutex
*
* Tries to acquire the lock, if lock is not available then call to
* this function will suspend.
*/
void env_lock_mutex(void *lock)
{
if (env_in_isr() == 0)
{
(void)xos_sem_get((struct XosSem *)lock);
}
}
/*!
* env_unlock_mutex
*
* Releases the given lock.
*/
void env_unlock_mutex(void *lock)
{
if (env_in_isr() == 0)
{
(void)xos_sem_put((struct XosSem *)lock);
}
}
/*!
* env_create_sync_lock
*
* Creates a synchronization lock primitive. It is used
* when signal has to be sent from the interrupt context to main
* thread context.
*/
int32_t env_create_sync_lock(void **lock, int32_t state)
{
return env_create_mutex(lock, state); /* state=1 .. initially free */
}
/*!
* env_delete_sync_lock
*
* Deletes the given lock
*
*/
void env_delete_sync_lock(void *lock)
{
if (lock != ((void *)0))
{
env_delete_mutex(lock);
}
}
/*!
* env_acquire_sync_lock
*
* Tries to acquire the lock, if lock is not available then call to
* this function waits for lock to become available.
*/
void env_acquire_sync_lock(void *lock)
{
if (lock != ((void *)0))
{
env_lock_mutex(lock);
}
}
/*!
* env_release_sync_lock
*
* Releases the given lock.
*/
void env_release_sync_lock(void *lock)
{
if (lock != ((void *)0))
{
env_unlock_mutex(lock);
}
}
/*!
* env_sleep_msec
*
* Suspends the calling thread for given time , in msecs.
*/
void env_sleep_msec(uint32_t num_msec)
{
(void)xos_thread_sleep_msec(num_msec);
}
/*!
* env_register_isr
*
* Registers interrupt handler data for the given interrupt vector.
*
* @param vector_id - virtual interrupt vector number
* @param data - interrupt handler data (virtqueue)
*/
void env_register_isr(uint32_t vector_id, void *data)
{
RL_ASSERT(vector_id < ISR_COUNT);
if (vector_id < ISR_COUNT)
{
isr_table[vector_id].data = data;
}
}
/*!
* env_unregister_isr
*
* Unregisters interrupt handler data for the given interrupt vector.
*
* @param vector_id - virtual interrupt vector number
*/
void env_unregister_isr(uint32_t vector_id)
{
RL_ASSERT(vector_id < ISR_COUNT);
if (vector_id < ISR_COUNT)
{
isr_table[vector_id].data = ((void *)0);
}
}
/*!
* env_enable_interrupt
*
* Enables the given interrupt
*
* @param vector_id - virtual interrupt vector number
*/
void env_enable_interrupt(uint32_t vector_id)
{
(void)platform_interrupt_enable(vector_id);
}
/*!
* env_disable_interrupt
*
* Disables the given interrupt
*
* @param vector_id - virtual interrupt vector number
*/
void env_disable_interrupt(uint32_t vector_id)
{
(void)platform_interrupt_disable(vector_id);
}
/*!
* env_map_memory
*
* Enables memory mapping for given memory region.
*
* @param pa - physical address of memory
* @param va - logical address of memory
* @param size - memory size
* param flags - flags for cache/uncached and access type
*/
void env_map_memory(uint32_t pa, uint32_t va, uint32_t size, uint32_t flags)
{
platform_map_mem_region(va, pa, size, flags);
}
/*!
* env_disable_cache
*
* Disables system caches.
*
*/
void env_disable_cache(void)
{
platform_cache_all_flush_invalidate();
platform_cache_disable();
}
/*========================================================= */
/* Util data / functions */
void env_isr(uint32_t vector)
{
struct isr_info *info;
RL_ASSERT(vector < ISR_COUNT);
if (vector < ISR_COUNT)
{
info = &isr_table[vector];
virtqueue_notification((struct virtqueue *)info->data);
}
}
/*
* env_create_queue
*
* Creates a message queue.
*
* @param queue - pointer to created queue
* @param length - maximum number of elements in the queue
* @param element_size - queue element size in bytes
*
* @return - status of function execution
*/
int32_t env_create_queue(void **queue, int32_t length, int32_t element_size)
{
char *queue_ptr = ((void *)0);
queue_ptr = (char *)env_allocate_memory(XOS_MSGQ_SIZE(length, element_size));
if (queue_ptr != ((void *)0))
{
if (XOS_OK ==
xos_msgq_create((XosMsgQueue *)queue_ptr, (uint16_t)length, (uint32_t)element_size, XOS_MSGQ_WAIT_PRIORITY))
{
*queue = (void *)queue_ptr;
return 0;
}
else
{
env_free_memory(queue_ptr);
return -1;
}
}
return -1;
}
/*!
* env_delete_queue
*
* Deletes the message queue.
*
* @param queue - queue to delete
*/
void env_delete_queue(void *queue)
{
xos_msgq_delete(queue);
env_free_memory(queue);
}
/*!
* env_put_queue
*
* Put an element in a queue.
*
* @param queue - queue to put element in
* @param msg - pointer to the message to be put into the queue
* @param timeout_ms - timeout in ms
*
* @return - status of function execution
*/
int32_t env_put_queue(void *queue, void *msg, uint32_t timeout_ms)
{
if (RL_BLOCK == timeout_ms)
{
/* If no space is available, this function will block if called from a thread, but will
return immediately if called from an interrupt handler. */
if (XOS_OK == xos_msgq_put(queue, msg))
{
return 1;
}
}
else
{
/* If no space is available, this function will block if called from a thread, but will
return immediately if called from an interrupt handler. */
if (XOS_OK == xos_msgq_put_timeout(queue, msg, xos_msecs_to_cycles(timeout_ms)))
{
return 1;
}
}
return 0;
}
/*!
* env_get_queue
*
* Get an element out of a queue.
*
* @param queue - queue to get element from
* @param msg - pointer to a memory to save the message
* @param timeout_ms - timeout in ms
*
* @return - status of function execution
*/
int32_t env_get_queue(void *queue, void *msg, uint32_t timeout_ms)
{
if (RL_BLOCK == timeout_ms)
{
/* If no message is available, this function will block if called from a thread, but will return
immediately if called from an interrupt handler. */
if (XOS_OK == xos_msgq_get(queue, msg))
{
return 1;
}
}
else
{
/* If no message is available, this function will block if called from a thread, but will return
immediately if called from an interrupt handler. The thread will be unblocked when a message
arrives in the queue or the timeout expires. */
if (XOS_OK == xos_msgq_get_timeout(queue, msg, xos_msecs_to_cycles(timeout_ms)))
{
return 1;
}
}
return 0;
}
/*!
* env_get_current_queue_size
*
* Get current queue size.
*
* @param queue - queue pointer
*
* @return - Number of queued items in the queue
*/
int32_t env_get_current_queue_size(void *queue)
{
return ((int32_t)xos_msgq_empty(queue));
}

View File

@ -0,0 +1,648 @@
/*
* Copyright (c) 2014, Mentor Graphics Corporation
* Copyright (c) 2015 Xilinx, Inc.
* Copyright (c) 2016 Freescale Semiconductor, Inc.
* Copyright 2016-2019 NXP
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/**************************************************************************
* FILE NAME
*
* rpmsg_env_zephyr.c
*
*
* DESCRIPTION
*
* This file is Zephyr RTOS Implementation of env layer for OpenAMP.
*
*
**************************************************************************/
#include "rpmsg_env.h"
#include <zephyr.h>
#include "rpmsg_platform.h"
#include "virtqueue.h"
#include "rpmsg_compiler.h"
#include <stdlib.h>
#include <string.h>
#if defined(RL_USE_ENVIRONMENT_CONTEXT) && (RL_USE_ENVIRONMENT_CONTEXT == 1)
#error "This RPMsg-Lite port requires RL_USE_ENVIRONMENT_CONTEXT set to 0"
#endif
/* RL_ENV_MAX_MUTEX_COUNT is an arbitrary count greater than 'count'
if the inital count is 1, this function behaves as a mutex
if it is greater than 1, it acts as a "resource allocator" with
the maximum of 'count' resources available.
Currently, only the first use-case is applicable/applied in RPMsg-Lite.
*/
#define RL_ENV_MAX_MUTEX_COUNT (10)
static int32_t env_init_counter = 0;
static struct k_sem env_sema = {0};
/* Max supported ISR counts */
#define ISR_COUNT (32U)
/*!
* Structure to keep track of registered ISR's.
*/
struct isr_info
{
void *data;
};
static struct isr_info isr_table[ISR_COUNT];
/*!
* env_in_isr
*
* @returns - true, if currently in ISR
*
*/
static int32_t env_in_isr(void)
{
return platform_in_isr();
}
/*!
* env_init
*
* Initializes OS/BM environment.
*
*/
int32_t env_init(void)
{
int32_t retval;
k_sched_lock(); /* stop scheduler */
/* verify 'env_init_counter' */
RL_ASSERT(env_init_counter >= 0);
if (env_init_counter < 0)
{
k_sched_unlock(); /* re-enable scheduler */
return -1;
}
env_init_counter++;
/* multiple call of 'env_init' - return ok */
if (env_init_counter == 1)
{
/* first call */
k_sem_init(&env_sema, 0, 1);
(void)memset(isr_table, 0, sizeof(isr_table));
k_sched_unlock();
retval = platform_init();
k_sem_give(&env_sema);
return retval;
}
else
{
k_sched_unlock();
/* Get the semaphore and then return it,
* this allows for platform_init() to block
* if needed and other tasks to wait for the
* blocking to be done.
* This is in ENV layer as this is ENV specific.*/
k_sem_take(&env_sema, K_FOREVER);
k_sem_give(&env_sema);
return 0;
}
}
/*!
* env_deinit
*
* Uninitializes OS/BM environment.
*
* @returns - execution status
*/
int32_t env_deinit(void)
{
int32_t retval;
k_sched_lock(); /* stop scheduler */
/* verify 'env_init_counter' */
RL_ASSERT(env_init_counter > 0);
if (env_init_counter <= 0)
{
k_sched_unlock(); /* re-enable scheduler */
return -1;
}
/* counter on zero - call platform deinit */
env_init_counter--;
/* multiple call of 'env_deinit' - return ok */
if (env_init_counter <= 0)
{
/* last call */
(void)memset(isr_table, 0, sizeof(isr_table));
retval = platform_deinit();
k_sem_reset(&env_sema);
k_sched_unlock();
return retval;
}
else
{
k_sched_unlock();
return 0;
}
}
/*!
* env_allocate_memory - implementation
*
* @param size
*/
void *env_allocate_memory(uint32_t size)
{
return (k_malloc(size));
}
/*!
* env_free_memory - implementation
*
* @param ptr
*/
void env_free_memory(void *ptr)
{
if (ptr != ((void *)0))
{
k_free(ptr);
}
}
/*!
*
* env_memset - implementation
*
* @param ptr
* @param value
* @param size
*/
void env_memset(void *ptr, int32_t value, uint32_t size)
{
(void)memset(ptr, value, size);
}
/*!
*
* env_memcpy - implementation
*
* @param dst
* @param src
* @param len
*/
void env_memcpy(void *dst, void const *src, uint32_t len)
{
(void)memcpy(dst, src, len);
}
/*!
*
* env_strcmp - implementation
*
* @param dst
* @param src
*/
int32_t env_strcmp(const char *dst, const char *src)
{
return (strcmp(dst, src));
}
/*!
*
* env_strncpy - implementation
*
* @param dst
* @param src
* @param len
*/
void env_strncpy(char *dst, const char *src, uint32_t len)
{
(void)strncpy(dst, src, len);
}
/*!
*
* env_strncmp - implementation
*
* @param dst
* @param src
* @param len
*/
int32_t env_strncmp(char *dst, const char *src, uint32_t len)
{
return (strncmp(dst, src, len));
}
/*!
*
* env_mb - implementation
*
*/
void env_mb(void)
{
MEM_BARRIER();
}
/*!
* env_rmb - implementation
*/
void env_rmb(void)
{
MEM_BARRIER();
}
/*!
* env_wmb - implementation
*/
void env_wmb(void)
{
MEM_BARRIER();
}
/*!
* env_map_vatopa - implementation
*
* @param address
*/
uint32_t env_map_vatopa(void *address)
{
return platform_vatopa(address);
}
/*!
* env_map_patova - implementation
*
* @param address
*/
void *env_map_patova(uint32_t address)
{
return platform_patova(address);
}
/*!
* env_create_mutex
*
* Creates a mutex with the given initial count.
*
*/
int32_t env_create_mutex(void **lock, int32_t count)
{
struct k_sem *semaphore_ptr;
semaphore_ptr = (struct k_sem *)env_allocate_memory(sizeof(struct k_sem));
if (semaphore_ptr == ((void *)0))
{
return -1;
}
if (count > RL_ENV_MAX_MUTEX_COUNT)
{
return -1;
}
k_sem_init(semaphore_ptr, count, RL_ENV_MAX_MUTEX_COUNT);
*lock = (void *)semaphore_ptr;
return 0;
}
/*!
* env_delete_mutex
*
* Deletes the given lock
*
*/
void env_delete_mutex(void *lock)
{
k_sem_reset(lock);
env_free_memory(lock);
}
/*!
* env_lock_mutex
*
* Tries to acquire the lock, if lock is not available then call to
* this function will suspend.
*/
void env_lock_mutex(void *lock)
{
if (env_in_isr() == 0)
{
k_sem_take((struct k_sem *)lock, K_FOREVER);
}
}
/*!
* env_unlock_mutex
*
* Releases the given lock.
*/
void env_unlock_mutex(void *lock)
{
if (env_in_isr() == 0)
{
k_sem_give((struct k_sem *)lock);
}
}
/*!
* env_create_sync_lock
*
* Creates a synchronization lock primitive. It is used
* when signal has to be sent from the interrupt context to main
* thread context.
*/
int32_t env_create_sync_lock(void **lock, int32_t state)
{
return env_create_mutex(lock, state); /* state=1 .. initially free */
}
/*!
* env_delete_sync_lock
*
* Deletes the given lock
*
*/
void env_delete_sync_lock(void *lock)
{
if (lock != ((void *)0))
{
env_delete_mutex(lock);
}
}
/*!
* env_acquire_sync_lock
*
* Tries to acquire the lock, if lock is not available then call to
* this function waits for lock to become available.
*/
void env_acquire_sync_lock(void *lock)
{
if (lock != ((void *)0))
{
env_lock_mutex(lock);
}
}
/*!
* env_release_sync_lock
*
* Releases the given lock.
*/
void env_release_sync_lock(void *lock)
{
if (lock != ((void *)0))
{
env_unlock_mutex(lock);
}
}
/*!
* env_sleep_msec
*
* Suspends the calling thread for given time , in msecs.
*/
void env_sleep_msec(uint32_t num_msec)
{
k_sleep(num_msec);
}
/*!
* env_register_isr
*
* Registers interrupt handler data for the given interrupt vector.
*
* @param vector_id - virtual interrupt vector number
* @param data - interrupt handler data (virtqueue)
*/
void env_register_isr(uint32_t vector_id, void *data)
{
RL_ASSERT(vector_id < ISR_COUNT);
if (vector_id < ISR_COUNT)
{
isr_table[vector_id].data = data;
}
}
/*!
* env_unregister_isr
*
* Unregisters interrupt handler data for the given interrupt vector.
*
* @param vector_id - virtual interrupt vector number
*/
void env_unregister_isr(uint32_t vector_id)
{
RL_ASSERT(vector_id < ISR_COUNT);
if (vector_id < ISR_COUNT)
{
isr_table[vector_id].data = ((void *)0);
}
}
/*!
* env_enable_interrupt
*
* Enables the given interrupt
*
* @param vector_id - interrupt vector number
*/
void env_enable_interrupt(uint32_t vector_id)
{
(void)platform_interrupt_enable(vector_id);
}
/*!
* env_disable_interrupt
*
* Disables the given interrupt
*
* @param vector_id - interrupt vector number
*/
void env_disable_interrupt(uint32_t vector_id)
{
(void)platform_interrupt_disable(vector_id);
}
/*!
* env_map_memory
*
* Enables memory mapping for given memory region.
*
* @param pa - physical address of memory
* @param va - logical address of memory
* @param size - memory size
* param flags - flags for cache/uncached and access type
*/
void env_map_memory(uint32_t pa, uint32_t va, uint32_t size, uint32_t flags)
{
platform_map_mem_region(va, pa, size, flags);
}
/*!
* env_disable_cache
*
* Disables system caches.
*
*/
void env_disable_cache(void)
{
platform_cache_all_flush_invalidate();
platform_cache_disable();
}
/*========================================================= */
/* Util data / functions */
void env_isr(uint32_t vector)
{
struct isr_info *info;
RL_ASSERT(vector < ISR_COUNT);
if (vector < ISR_COUNT)
{
info = &isr_table[vector];
virtqueue_notification((struct virtqueue *)info->data);
}
}
/*
* env_create_queue
*
* Creates a message queue.
*
* @param queue - pointer to created queue
* @param length - maximum number of elements in the queue
* @param element_size - queue element size in bytes
*
* @return - status of function execution
*/
int32_t env_create_queue(void **queue, int32_t length, int32_t element_size)
{
struct k_msgq *queue_ptr = ((void *)0);
char *msgq_buffer_ptr = ((void *)0);
queue_ptr = (struct k_msgq *)env_allocate_memory(sizeof(struct k_msgq));
msgq_buffer_ptr = (char *)env_allocate_memory(length * element_size);
if ((queue_ptr == ((void *)0)) || (msgq_buffer_ptr == ((void *)0)))
{
return -1;
}
k_msgq_init(queue_ptr, msgq_buffer_ptr, element_size, length);
*queue = (void *)queue_ptr;
return 0;
}
/*!
* env_delete_queue
*
* Deletes the message queue.
*
* @param queue - queue to delete
*/
void env_delete_queue(void *queue)
{
k_msgq_purge((struct k_msgq *)queue);
env_free_memory(((struct k_msgq *)queue)->buffer_start);
env_free_memory(queue);
}
/*!
* env_put_queue
*
* Put an element in a queue.
*
* @param queue - queue to put element in
* @param msg - pointer to the message to be put into the queue
* @param timeout_ms - timeout in ms
*
* @return - status of function execution
*/
int32_t env_put_queue(void *queue, void *msg, uint32_t timeout_ms)
{
if (env_in_isr() != 0)
{
timeout_ms = 0; /* force timeout == 0 when in ISR */
}
if (0 == k_msgq_put((struct k_msgq *)queue, msg, timeout_ms))
{
return 1;
}
return 0;
}
/*!
* env_get_queue
*
* Get an element out of a queue.
*
* @param queue - queue to get element from
* @param msg - pointer to a memory to save the message
* @param timeout_ms - timeout in ms
*
* @return - status of function execution
*/
int32_t env_get_queue(void *queue, void *msg, uint32_t timeout_ms)
{
if (env_in_isr() != 0)
{
timeout_ms = 0; /* force timeout == 0 when in ISR */
}
if (0 == k_msgq_get((struct k_msgq *)queue, msg, timeout_ms))
{
return 1;
}
return 0;
}
/*!
* env_get_current_queue_size
*
* Get current queue size.
*
* @param queue - queue pointer
*
* @return - Number of queued items in the queue
*/
int32_t env_get_current_queue_size(void *queue)
{
return k_msgq_num_used_get((struct k_msgq *)queue);
}

View File

@ -0,0 +1,292 @@
/*
* Copyright 2019-2021 NXP
* All rights reserved.
*
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <stdio.h>
#include <string.h>
#include "rpmsg_platform.h"
#include "rpmsg_env.h"
#include "co_util.h"
#include "fr30xx.h"
#if defined(RL_USE_ENVIRONMENT_CONTEXT) && (RL_USE_ENVIRONMENT_CONTEXT == 1)
#error "This RPMsg-Lite port requires RL_USE_ENVIRONMENT_CONTEXT set to 0"
#endif
/* The IPC instance used for CM33 and DSP core communication */
static IPC_HandleTypeDef ipc_mcu;
static int32_t isr_counter = 0;
static int32_t disable_counter = 0;
static void *platform_lock;
static void platform_global_isr_disable(void)
{
__asm volatile("cpsid i");
}
static void platform_global_isr_enable(void)
{
__asm volatile("cpsie i");
}
int32_t platform_init_interrupt(uint32_t vector_id, void *isr_data)
{
/* Register ISR to environment layer */
env_register_isr(vector_id, isr_data);
env_lock_mutex(platform_lock);
RL_ASSERT(0 <= isr_counter);
// if (isr_counter < 2)
// {
// printf("MU_EnableInterrupts(APP_MU, 1UL << (31UL - vector_id));");
// }
isr_counter++;
env_unlock_mutex(platform_lock);
return 0;
}
int32_t platform_deinit_interrupt(uint32_t vector_id)
{
/* Prepare the MU Hardware */
env_lock_mutex(platform_lock);
RL_ASSERT(0 < isr_counter);
isr_counter--;
// if (isr_counter < 2)
// {
// printf("MU_DisableInterrupts(APP_MU, 1UL << (31UL - vector_id));");
// }
/* Unregister ISR from environment layer */
env_unregister_isr(vector_id);
env_unlock_mutex(platform_lock);
return 0;
}
void platform_notify(uint32_t vector_id)
{
env_lock_mutex(platform_lock);
if (vector_id == 0) {
ipc_msg_send(&ipc_mcu, IPC_CH_0, 0);
}
else if (vector_id == 1) {
ipc_msg_send(&ipc_mcu, IPC_CH_1, 0);
}
else {
while(1);
}
env_unlock_mutex(platform_lock);
}
/**
* platform_time_delay
*
* @param num_msec Delay time in ms.
*
* This is not an accurate delay, it ensures at least num_msec passed when return.
*/
void platform_time_delay(uint32_t num_msec)
{
system_delay_us(1000 * num_msec);
}
/**
* platform_in_isr
*
* Return whether CPU is processing IRQ
*
* @return True for IRQ, false otherwise.
*
*/
int32_t platform_in_isr(void)
{
return (((SCB->ICSR & SCB_ICSR_VECTACTIVE_Msk) != 0UL) ? 1 : 0);
}
/**
* platform_interrupt_enable
*
* Enable peripheral-related interrupt
*
* @param vector_id Virtual vector ID that needs to be converted to IRQ number
*
* @return vector_id Return value is never checked.
*
*/
int32_t platform_interrupt_enable(uint32_t vector_id)
{
RL_ASSERT(0 < disable_counter);
platform_global_isr_disable();
disable_counter--;
if (disable_counter == 0)
{
NVIC_EnableIRQ(IPC_MCU_IRQn);
}
platform_global_isr_enable();
return ((int32_t)vector_id);
}
/**
* platform_interrupt_disable
*
* Disable peripheral-related interrupt.
*
* @param vector_id Virtual vector ID that needs to be converted to IRQ number
*
* @return vector_id Return value is never checked.
*
*/
int32_t platform_interrupt_disable(uint32_t vector_id)
{
RL_ASSERT(0 <= disable_counter);
platform_global_isr_disable();
/* virtqueues use the same NVIC vector
if counter is set - the interrupts are disabled */
if (disable_counter == 0)
{
NVIC_DisableIRQ(IPC_MCU_IRQn);
}
disable_counter++;
platform_global_isr_enable();
return ((int32_t)vector_id);
}
/**
* platform_map_mem_region
*
* Dummy implementation
*
*/
void platform_map_mem_region(uint32_t vrt_addr, uint32_t phy_addr, uint32_t size, uint32_t flags)
{
}
/**
* platform_cache_all_flush_invalidate
*
* Dummy implementation
*
*/
void platform_cache_all_flush_invalidate(void)
{
}
/**
* platform_cache_disable
*
* Dummy implementation
*
*/
void platform_cache_disable(void)
{
}
/**
* platform_vatopa
*
* Translate CM33 addresses to DSP addresses
*
*/
uint32_t platform_vatopa(void *addr)
{
return (uint32_t)addr;
}
/**
* platform_patova
*
* Translate DSP addresses to CM33 addresses
*
*/
void *platform_patova(uint32_t addr)
{
return (void *)addr;
}
static void ipc_mcu_rx(struct __IPC_HandleTypeDef *hipc, enum_IPC_Chl_Sel_t ch, uint32_t msg)
{
// printf("ipc_mcu_rx: %d, 0x%08x\r\n", ch, msg);
if (ch == IPC_CH_0) {
env_isr(0);
}
else if (ch == IPC_CH_1) {
env_isr(1);
}
}
static void ipc_mcu_tx(struct __IPC_HandleTypeDef *hipc, enum_IPC_Chl_Sel_t ch)
{
// printf("ipc_mcu_tx: %d\r\n", ch);
}
void ipc_mcu_irq(void)
{
ipc_IRQHandler(&ipc_mcu);
}
/**
* platform_init
*
* platform/environment init
*/
int32_t platform_init(void)
{
__SYSTEM_APP_IPC_CLK_ENABLE();
ipc_mcu.IPCx = IPC_MCU;
ipc_mcu.RxEnableChannels = IPC_CH_0 | IPC_CH_1;
ipc_mcu.TxEnableChannels = IPC_CH_0 | IPC_CH_1;
ipc_mcu.RxCallback = ipc_mcu_rx;
ipc_mcu.TxCallback = ipc_mcu_tx;
ipc_init(&ipc_mcu);
/* Create lock used in multi-instanced RPMsg */
if (0 != env_create_mutex(&platform_lock, 1))
{
return -1;
}
return 0;
}
/**
* platform_deinit
*
* platform/environment deinit process
*/
int32_t platform_deinit(void)
{
/* Delete lock used in multi-instanced RPMsg */
env_delete_mutex(platform_lock);
platform_lock = ((void *)0);
return 0;
}
/**
* platform_reinit
*
* platform/environment reinit
*/
int32_t platform_reinit(void)
{
__SYSTEM_APP_IPC_CLK_ENABLE();
ipc_init(&ipc_mcu);
NVIC_EnableIRQ(IPC_MCU_IRQn);
return 0;
}

View File

@ -0,0 +1,303 @@
/*
* Copyright 2019-2021 NXP
* All rights reserved.
*
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <stdio.h>
#include <string.h>
#include "rpmsg_platform.h"
#include "rpmsg_env.h"
#include "fsl_device_registers.h"
#include "fsl_mu.h"
#if defined(RL_USE_MCMGR_IPC_ISR_HANDLER) && (RL_USE_MCMGR_IPC_ISR_HANDLER == 1)
#include "mcmgr.h"
#endif
#if defined(RL_USE_ENVIRONMENT_CONTEXT) && (RL_USE_ENVIRONMENT_CONTEXT == 1)
#error "This RPMsg-Lite port requires RL_USE_ENVIRONMENT_CONTEXT set to 0"
#endif
#define APP_MU_IRQ_PRIORITY (3U)
/* The MU instance used for CM33 and DSP core communication */
#define APP_MU MUA
#define APP_MU_IRQn MU_A_IRQn
static int32_t isr_counter = 0;
static int32_t disable_counter = 0;
static void *platform_lock;
#if defined(RL_USE_MCMGR_IPC_ISR_HANDLER) && (RL_USE_MCMGR_IPC_ISR_HANDLER == 1)
static void mcmgr_event_handler(uint16_t vring_idx, void *context)
{
env_isr((uint32_t)vring_idx);
}
#else
void MU_A_IRQHandler(void)
{
uint32_t flags;
flags = MU_GetStatusFlags(APP_MU);
if (((uint32_t)kMU_GenInt0Flag & flags) != 0UL)
{
MU_ClearStatusFlags(APP_MU, (uint32_t)kMU_GenInt0Flag);
env_isr(0);
}
if (((uint32_t)kMU_GenInt1Flag & flags) != 0UL)
{
MU_ClearStatusFlags(APP_MU, (uint32_t)kMU_GenInt1Flag);
env_isr(1);
}
}
#endif
static void platform_global_isr_disable(void)
{
__asm volatile("cpsid i");
}
static void platform_global_isr_enable(void)
{
__asm volatile("cpsie i");
}
int32_t platform_init_interrupt(uint32_t vector_id, void *isr_data)
{
/* Register ISR to environment layer */
env_register_isr(vector_id, isr_data);
env_lock_mutex(platform_lock);
RL_ASSERT(0 <= isr_counter);
if (isr_counter < 2)
{
MU_EnableInterrupts(APP_MU, 1UL << (31UL - vector_id));
}
isr_counter++;
env_unlock_mutex(platform_lock);
return 0;
}
int32_t platform_deinit_interrupt(uint32_t vector_id)
{
/* Prepare the MU Hardware */
env_lock_mutex(platform_lock);
RL_ASSERT(0 < isr_counter);
isr_counter--;
if (isr_counter < 2)
{
MU_DisableInterrupts(APP_MU, 1UL << (31UL - vector_id));
}
/* Unregister ISR from environment layer */
env_unregister_isr(vector_id);
env_unlock_mutex(platform_lock);
return 0;
}
void platform_notify(uint32_t vector_id)
{
env_lock_mutex(platform_lock);
#if defined(RL_USE_MCMGR_IPC_ISR_HANDLER) && (RL_USE_MCMGR_IPC_ISR_HANDLER == 1)
(void)MCMGR_TriggerEvent(kMCMGR_RemoteRPMsgEvent, RL_GET_Q_ID(vector_id));
env_unlock_mutex(platform_lock);
#else
(void)MU_TriggerInterrupts(APP_MU, 1UL << (19UL - RL_GET_Q_ID(vector_id)));
#endif
env_unlock_mutex(platform_lock);
}
/**
* platform_time_delay
*
* @param num_msec Delay time in ms.
*
* This is not an accurate delay, it ensures at least num_msec passed when return.
*/
void platform_time_delay(uint32_t num_msec)
{
uint32_t loop;
/* Recalculate the CPU frequency */
SystemCoreClockUpdate();
/* Calculate the CPU loops to delay, each loop has 3 cycles */
loop = SystemCoreClock / 3U / 1000U * num_msec;
/* There's some difference among toolchains, 3 or 4 cycles each loop */
while (loop > 0U)
{
__NOP();
loop--;
}
}
/**
* platform_in_isr
*
* Return whether CPU is processing IRQ
*
* @return True for IRQ, false otherwise.
*
*/
int32_t platform_in_isr(void)
{
return (((SCB->ICSR & SCB_ICSR_VECTACTIVE_Msk) != 0UL) ? 1 : 0);
}
/**
* platform_interrupt_enable
*
* Enable peripheral-related interrupt
*
* @param vector_id Virtual vector ID that needs to be converted to IRQ number
*
* @return vector_id Return value is never checked.
*
*/
int32_t platform_interrupt_enable(uint32_t vector_id)
{
RL_ASSERT(0 < disable_counter);
platform_global_isr_disable();
disable_counter--;
if (disable_counter == 0)
{
NVIC_EnableIRQ(APP_MU_IRQn);
}
platform_global_isr_enable();
return ((int32_t)vector_id);
}
/**
* platform_interrupt_disable
*
* Disable peripheral-related interrupt.
*
* @param vector_id Virtual vector ID that needs to be converted to IRQ number
*
* @return vector_id Return value is never checked.
*
*/
int32_t platform_interrupt_disable(uint32_t vector_id)
{
RL_ASSERT(0 <= disable_counter);
platform_global_isr_disable();
/* virtqueues use the same NVIC vector
if counter is set - the interrupts are disabled */
if (disable_counter == 0)
{
NVIC_DisableIRQ(APP_MU_IRQn);
}
disable_counter++;
platform_global_isr_enable();
return ((int32_t)vector_id);
}
/**
* platform_map_mem_region
*
* Dummy implementation
*
*/
void platform_map_mem_region(uint32_t vrt_addr, uint32_t phy_addr, uint32_t size, uint32_t flags)
{
}
/**
* platform_cache_all_flush_invalidate
*
* Dummy implementation
*
*/
void platform_cache_all_flush_invalidate(void)
{
}
/**
* platform_cache_disable
*
* Dummy implementation
*
*/
void platform_cache_disable(void)
{
}
/**
* platform_vatopa
*
* Translate CM33 addresses to DSP addresses
*
*/
uint32_t platform_vatopa(void *addr)
{
return (((uint32_t)(char *)addr & 0x0FFFFFFFu) + 0x800000u);
}
/**
* platform_patova
*
* Translate DSP addresses to CM33 addresses
*
*/
void *platform_patova(uint32_t addr)
{
return (void *)(char *)((addr - 0x00800000u) | 0x20000000u);
}
/**
* platform_init
*
* platform/environment init
*/
int32_t platform_init(void)
{
#if defined(RL_USE_MCMGR_IPC_ISR_HANDLER) && (RL_USE_MCMGR_IPC_ISR_HANDLER == 1)
mcmgr_status_t retval = kStatus_MCMGR_Error;
retval = MCMGR_RegisterEvent(kMCMGR_RemoteRPMsgEvent, mcmgr_event_handler, ((void *)0));
if (kStatus_MCMGR_Success != retval)
{
return -1;
}
#else
MU_Init(APP_MU);
NVIC_SetPriority(APP_MU_IRQn, APP_MU_IRQ_PRIORITY);
NVIC_EnableIRQ(APP_MU_IRQn);
#endif
/* Create lock used in multi-instanced RPMsg */
if (0 != env_create_mutex(&platform_lock, 1))
{
return -1;
}
return 0;
}
/**
* platform_deinit
*
* platform/environment deinit process
*/
int32_t platform_deinit(void)
{
MU_Deinit(APP_MU);
/* Delete lock used in multi-instanced RPMsg */
env_delete_mutex(platform_lock);
platform_lock = ((void *)0);
return 0;
}

View File

@ -0,0 +1,303 @@
/*
* Copyright 2019-2020 NXP
* All rights reserved.
*
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <stdio.h>
#include <string.h>
#include "rpmsg_platform.h"
#include "rpmsg_env.h"
#include "fsl_device_registers.h"
#include "fsl_mu.h"
#if defined(RL_USE_MCMGR_IPC_ISR_HANDLER) && (RL_USE_MCMGR_IPC_ISR_HANDLER == 1)
#include "mcmgr.h"
#endif
#if defined(RL_USE_ENVIRONMENT_CONTEXT) && (RL_USE_ENVIRONMENT_CONTEXT == 1)
#error "This RPMsg-Lite port requires RL_USE_ENVIRONMENT_CONTEXT set to 0"
#endif
#define APP_MU_IRQ_PRIORITY (3U)
/* The MU instance used for CM33 and DSP core communication */
#define APP_MU MUA
#define APP_MU_IRQn MU_A_IRQn
static int32_t isr_counter = 0;
static int32_t disable_counter = 0;
static void *platform_lock;
#if defined(RL_USE_MCMGR_IPC_ISR_HANDLER) && (RL_USE_MCMGR_IPC_ISR_HANDLER == 1)
static void mcmgr_event_handler(uint16_t vring_idx, void *context)
{
env_isr((uint32_t)vring_idx);
}
#else
void MU_A_IRQHandler(void)
{
uint32_t flags;
flags = MU_GetStatusFlags(APP_MU);
if (((uint32_t)kMU_GenInt0Flag & flags) != 0UL)
{
MU_ClearStatusFlags(APP_MU, (uint32_t)kMU_GenInt0Flag);
env_isr(0);
}
if (((uint32_t)kMU_GenInt1Flag & flags) != 0UL)
{
MU_ClearStatusFlags(APP_MU, (uint32_t)kMU_GenInt1Flag);
env_isr(1);
}
}
#endif
static void platform_global_isr_disable(void)
{
__asm volatile("cpsid i");
}
static void platform_global_isr_enable(void)
{
__asm volatile("cpsie i");
}
int32_t platform_init_interrupt(uint32_t vector_id, void *isr_data)
{
/* Register ISR to environment layer */
env_register_isr(vector_id, isr_data);
env_lock_mutex(platform_lock);
RL_ASSERT(0 <= isr_counter);
if (isr_counter < 2)
{
MU_EnableInterrupts(APP_MU, 1UL << (31UL - vector_id));
}
isr_counter++;
env_unlock_mutex(platform_lock);
return 0;
}
int32_t platform_deinit_interrupt(uint32_t vector_id)
{
/* Prepare the MU Hardware */
env_lock_mutex(platform_lock);
RL_ASSERT(0 < isr_counter);
isr_counter--;
if (isr_counter < 2)
{
MU_DisableInterrupts(APP_MU, 1UL << (31UL - vector_id));
}
/* Unregister ISR from environment layer */
env_unregister_isr(vector_id);
env_unlock_mutex(platform_lock);
return 0;
}
void platform_notify(uint32_t vector_id)
{
env_lock_mutex(platform_lock);
#if defined(RL_USE_MCMGR_IPC_ISR_HANDLER) && (RL_USE_MCMGR_IPC_ISR_HANDLER == 1)
(void)MCMGR_TriggerEvent(kMCMGR_RemoteRPMsgEvent, RL_GET_Q_ID(vector_id));
env_unlock_mutex(platform_lock);
#else
(void)MU_TriggerInterrupts(APP_MU, 1UL << (19UL - RL_GET_Q_ID(vector_id)));
#endif
env_unlock_mutex(platform_lock);
}
/**
* platform_time_delay
*
* @param num_msec Delay time in ms.
*
* This is not an accurate delay, it ensures at least num_msec passed when return.
*/
void platform_time_delay(uint32_t num_msec)
{
uint32_t loop;
/* Recalculate the CPU frequency */
SystemCoreClockUpdate();
/* Calculate the CPU loops to delay, each loop has 3 cycles */
loop = SystemCoreClock / 3U / 1000U * num_msec;
/* There's some difference among toolchains, 3 or 4 cycles each loop */
while (loop > 0U)
{
__NOP();
loop--;
}
}
/**
* platform_in_isr
*
* Return whether CPU is processing IRQ
*
* @return True for IRQ, false otherwise.
*
*/
int32_t platform_in_isr(void)
{
return (((SCB->ICSR & SCB_ICSR_VECTACTIVE_Msk) != 0UL) ? 1 : 0);
}
/**
* platform_interrupt_enable
*
* Enable peripheral-related interrupt
*
* @param vector_id Virtual vector ID that needs to be converted to IRQ number
*
* @return vector_id Return value is never checked.
*
*/
int32_t platform_interrupt_enable(uint32_t vector_id)
{
RL_ASSERT(0 < disable_counter);
platform_global_isr_disable();
disable_counter--;
if (disable_counter == 0)
{
NVIC_EnableIRQ(APP_MU_IRQn);
}
platform_global_isr_enable();
return ((int32_t)vector_id);
}
/**
* platform_interrupt_disable
*
* Disable peripheral-related interrupt.
*
* @param vector_id Virtual vector ID that needs to be converted to IRQ number
*
* @return vector_id Return value is never checked.
*
*/
int32_t platform_interrupt_disable(uint32_t vector_id)
{
RL_ASSERT(0 <= disable_counter);
platform_global_isr_disable();
/* virtqueues use the same NVIC vector
if counter is set - the interrupts are disabled */
if (disable_counter == 0)
{
NVIC_DisableIRQ(APP_MU_IRQn);
}
disable_counter++;
platform_global_isr_enable();
return ((int32_t)vector_id);
}
/**
* platform_map_mem_region
*
* Dummy implementation
*
*/
void platform_map_mem_region(uint32_t vrt_addr, uint32_t phy_addr, uint32_t size, uint32_t flags)
{
}
/**
* platform_cache_all_flush_invalidate
*
* Dummy implementation
*
*/
void platform_cache_all_flush_invalidate(void)
{
}
/**
* platform_cache_disable
*
* Dummy implementation
*
*/
void platform_cache_disable(void)
{
}
/**
* platform_vatopa
*
* Dummy implementation
*
*/
uint32_t platform_vatopa(void *addr)
{
return ((uint32_t)(char *)addr);
}
/**
* platform_patova
*
* Dummy implementation
*
*/
void *platform_patova(uint32_t addr)
{
return ((void *)(char *)addr);
}
/**
* platform_init
*
* platform/environment init
*/
int32_t platform_init(void)
{
#if defined(RL_USE_MCMGR_IPC_ISR_HANDLER) && (RL_USE_MCMGR_IPC_ISR_HANDLER == 1)
mcmgr_status_t retval = kStatus_MCMGR_Error;
retval = MCMGR_RegisterEvent(kMCMGR_RemoteRPMsgEvent, mcmgr_event_handler, ((void *)0));
if (kStatus_MCMGR_Success != retval)
{
return -1;
}
#else
MU_Init(APP_MU);
NVIC_SetPriority(APP_MU_IRQn, APP_MU_IRQ_PRIORITY);
NVIC_EnableIRQ(APP_MU_IRQn);
#endif
/* Create lock used in multi-instanced RPMsg */
if (0 != env_create_mutex(&platform_lock, 1))
{
return -1;
}
return 0;
}
/**
* platform_deinit
*
* platform/environment deinit process
*/
int32_t platform_deinit(void)
{
MU_Deinit(APP_MU);
/* Delete lock used in multi-instanced RPMsg */
env_delete_mutex(platform_lock);
platform_lock = ((void *)0);
return 0;
}

View File

@ -0,0 +1,350 @@
#include <stdint.h>
#include <string.h>
#include <assert.h>
#include "dsp.h"
#include "rpmsg.h"
#include "rpmsg_lite.h"
#include "rpmsg_queue.h"
#include "rpmsg_ns.h"
#define RPMSG_LITE_LINK_ID 0
#define SH_MEM_TOTAL_SIZE (1536U)
#define EPT_ADDR_SYNC 1
#define EPT_ADDR_ASYNC 2
#ifndef RAM_PROJECT
static __attribute__((section(".ARM.__at_0x20000000"))) uint8_t rpmsg_share_mem[SH_MEM_TOTAL_SIZE];
#else
static uint8_t *rpmsg_share_mem = (void *)0x20000000;
#endif
static rpmsg_queue_handle queue_sync = NULL;
static rpmsg_queue_handle queue_async = NULL;
static struct rpmsg_lite_endpoint *ept_sync = NULL;
static struct rpmsg_lite_endpoint *ept_async = NULL;
static struct rpmsg_lite_instance *remote_rpmsg = NULL;
static struct rpmsg_lite_instance *master_rpmsg = NULL;
static LOCK *ept_sync_lock;
static void (*msg_callback)(struct rpmsg_lite_instance *rpmsg, struct rpmsg_msg_t *msg) = NULL;
/************************************************************************************
* @fn rpmsg_sync_invoke
*
* @brief Start synchronous invocation to the other side.
*
* @param rpmsg: rpmsg instance.
* @param func_id: request function ID.
* @param param: all parameters.
* @param ret: return value.
*
* @return the function is handled by the other side normally or note
*/
uint32_t rpmsg_sync_invoke(struct rpmsg_lite_instance *rpmsg, uint32_t func_id, void *param, uint32_t *ret)
{
struct rpmsg_msg_t *msg;
uint32_t msg_len;
uint32_t src_addr;
uint32_t status;
int32_t remote_call_ret;
if (rpmsg == NULL) {
return -1;
}
env_lock_mutex(ept_sync_lock);
system_prevent_sleep_set(SYSTEM_PREVENT_SLEEP_TYPE_DSP);
msg = rpmsg_lite_alloc_tx_buffer(rpmsg, &msg_len, RL_BLOCK);
msg->msg_type = RPMSG_MSG_TYPE_SYNC_INVOKE;
msg->p.sync_func.func_id = func_id;
msg->p.sync_func.param = param;
// fputc('S', NULL);
rpmsg_lite_send_nocopy(rpmsg, ept_async, EPT_ADDR_ASYNC, msg, msg_len);
remote_call_ret = rpmsg_queue_recv_nocopy(rpmsg, queue_sync, (uint32_t *)&src_addr, (char **)&msg, &msg_len, 1000);
assert(RL_SUCCESS == remote_call_ret);
// fputc('s', NULL);
if (ret) {
*ret = msg->p.sync_ret.result;
}
status = msg->p.sync_ret.status;
system_prevent_sleep_clear(SYSTEM_PREVENT_SLEEP_TYPE_DSP);
env_unlock_mutex(ept_sync_lock);
rpmsg_lite_release_rx_buffer(rpmsg, msg);
return status;
}
/************************************************************************************
* @fn rpmsg_send_async
*
* @brief Send message or command to the other side.
*
* @param rpmsg: rpmsg instance.
* @param async_msg: asynchronous message, this struct will be copied into share memory.
*
* @return The message is sent to the other side successfully or not
*/
uint32_t rpmsg_send_async(struct rpmsg_lite_instance *rpmsg, struct rpmsg_async_msg_t *async_msg)
{
struct rpmsg_msg_t *msg;
uint32_t msg_len;
uint32_t src_addr;
msg = rpmsg_lite_alloc_tx_buffer(rpmsg, &msg_len, RL_BLOCK);
msg->msg_type = RPMSG_MSG_TYPE_ASYNC_MSG;
memcpy((void *)&msg->p.async_msg, (void *)async_msg, sizeof(struct rpmsg_async_msg_t));
rpmsg_lite_send_nocopy(rpmsg, ept_async, EPT_ADDR_ASYNC, msg, msg_len);
return 0;
}
/************************************************************************************
* @fn rpmsg_send_sync_ret
*
* @brief Send response to the other side after execute synchronous invocation.
*
* @param rpmsg: rpmsg instance.
* @param status: handle the invocation normally or not.
* @param ret: return value of request function.
*
* @return The message is sent to the other side successfully or not
*/
uint32_t rpmsg_send_sync_ret(struct rpmsg_lite_instance *rpmsg, uint32_t status, uint32_t ret)
{
struct rpmsg_msg_t *msg;
uint32_t msg_len;
uint32_t src_addr;
msg = rpmsg_lite_alloc_tx_buffer(rpmsg, &msg_len, RL_BLOCK);
msg->msg_type = RPMSG_MSG_TYPE_SYNC_RETURN;
msg->p.sync_ret.status = status;
msg->p.sync_ret.result = ret;
rpmsg_lite_send_nocopy(rpmsg, ept_sync, EPT_ADDR_SYNC, msg, msg_len);
return 0;
}
/************************************************************************************
* @fn rpmsg_master_init
*
* @brief Initialize rpmsg-lite master side, this function is used in DSP side in general usage.
*
* @param callback: callback function to receive message from the other side.
*
* @return initialized rpmsg-lite master instance
*/
struct rpmsg_lite_instance *rpmsg_master_init(void (*recv)(struct rpmsg_lite_instance *rpmsg, struct rpmsg_msg_t *msg))
{
struct rpmsg_lite_instance *my_rpmsg;
struct rpmsg_msg_t *msg;
uint32_t src_addr;
uint32_t msg_len;
my_rpmsg = rpmsg_lite_master_init((void *)0x20000000, SH_MEM_TOTAL_SIZE, RPMSG_LITE_LINK_ID, RL_NO_FLAGS);
queue_sync = rpmsg_queue_create(my_rpmsg);
queue_async = rpmsg_queue_create(my_rpmsg);
ept_sync = rpmsg_lite_create_ept(my_rpmsg, EPT_ADDR_SYNC, rpmsg_queue_rx_cb, queue_sync);
ept_async = rpmsg_lite_create_ept(my_rpmsg, EPT_ADDR_ASYNC, rpmsg_queue_rx_cb, queue_async);
env_create_mutex((void **)&ept_sync_lock, 1);
msg_callback = recv;
master_rpmsg = my_rpmsg;
/* notice remote side "I'm ready." */
msg = rpmsg_lite_alloc_tx_buffer(my_rpmsg, &msg_len, RL_BLOCK);
msg->msg_type = RPMSG_MSG_TYPE_MASTER_READY;
rpmsg_lite_send_nocopy(my_rpmsg, ept_sync, EPT_ADDR_SYNC, msg, msg_len);
return my_rpmsg;
}
/************************************************************************************
* @fn rpmsg_remote_init
*
* @brief Initialize rpmsg-lite remote side, this function is used in CM33 side in general usage.
*
* @param callback: callback function to receive message from the other side.
*
* @return initialized rpmsg-lite remote instance
*/
struct rpmsg_lite_instance *rpmsg_remote_init(void (*recv)(struct rpmsg_lite_instance *rpmsg, struct rpmsg_msg_t *msg))
{
struct rpmsg_lite_instance *my_rpmsg;
struct rpmsg_msg_t *msg;
uint32_t src_addr;
uint32_t msg_len;
my_rpmsg = rpmsg_lite_remote_init((void *)&rpmsg_share_mem[0], RPMSG_LITE_LINK_ID, RL_NO_FLAGS);
// while (0 == rpmsg_lite_is_link_up(my_rpmsg));
queue_sync = rpmsg_queue_create(my_rpmsg);
queue_async = rpmsg_queue_create(my_rpmsg);
ept_sync = rpmsg_lite_create_ept(my_rpmsg, EPT_ADDR_SYNC, rpmsg_queue_rx_cb, queue_sync);
ept_async = rpmsg_lite_create_ept(my_rpmsg, EPT_ADDR_ASYNC, rpmsg_queue_rx_cb, queue_async);
env_create_mutex((void **)&ept_sync_lock, 1);
msg_callback = recv;
remote_rpmsg = my_rpmsg;
return my_rpmsg;
}
/************************************************************************************
* @fn rpmsg_wait_master_ready
*
* @brief used by remote side to wait for master become ready after calling rpmsg_remote_init and
* boot up DSP (master side).
*
* @param rpmsg: rpmsg instance of remote side.
*/
void rpmsg_wait_master_ready(struct rpmsg_lite_instance *rpmsg)
{
struct rpmsg_msg_t *msg;
uint32_t src_addr;
uint32_t msg_len;
/* wait for "I'm ready." from master side */
rpmsg_queue_recv_nocopy(rpmsg, queue_sync, (uint32_t *)&src_addr, (char **)&msg, &msg_len, RL_BLOCK);
while ((src_addr != EPT_ADDR_SYNC) || (msg->msg_type != RPMSG_MSG_TYPE_MASTER_READY));
rpmsg_lite_release_rx_buffer(rpmsg, msg);
}
/************************************************************************************
* @fn rpmsg_destroy
*
* @brief destroy an initialized rpmsg-lite instance.
*
* @param rpmsg: rpmsg-lite instance.
*/
void rpmsg_destroy(struct rpmsg_lite_instance *rpmsg)
{
env_lock_mutex(ept_sync_lock);
(void)rpmsg_lite_destroy_ept(rpmsg, ept_sync);
ept_sync = ((void *)0);
(void)rpmsg_lite_destroy_ept(rpmsg, ept_async);
ept_async = ((void *)0);
(void)rpmsg_queue_destroy(rpmsg, queue_sync);
queue_sync = ((void *)0);
(void)rpmsg_queue_destroy(rpmsg, queue_async);
queue_async = ((void *)0);
(void)rpmsg_lite_deinit(rpmsg);
if (rpmsg == remote_rpmsg) {
remote_rpmsg = NULL;
}
if (rpmsg == master_rpmsg) {
master_rpmsg = NULL;
}
env_unlock_mutex(ept_sync_lock);
env_delete_mutex(ept_sync_lock);
ept_sync_lock = ((void *)0);
}
/************************************************************************************
* @fn rpmsg_recv_msg
*
* @brief Called by app layer to receive message from the other side in blocking mode.
*
* @param rpmsg: rpmsg-lite instance.
* @param msg: data storage address
* @param msg_len: message length
*
* @return the endpoint address used by the other side to send this message
*/
uint32_t rpmsg_recv_msg(struct rpmsg_lite_instance *rpmsg, struct rpmsg_msg_t **msg, uint32_t *msg_len)
{
uint32_t src_addr;
rpmsg_queue_recv_nocopy(rpmsg, queue_async, (uint32_t *)&src_addr, (char **)msg, msg_len, RL_BLOCK);
return src_addr;
}
/************************************************************************************
* @fn rpmsg_get_remote_instance
*
* @brief Called by other module to get created remote_rpmsg instance.
*
* @return remote rpmsg instance
*/
struct rpmsg_lite_instance *rpmsg_get_remote_instance(void)
{
return remote_rpmsg;
}
/************************************************************************************
* @fn rpmsg_get_master_instance
*
* @brief Called by other module to get created master_rpmsg instance.
*
* @return master rpmsg instance
*/
struct rpmsg_lite_instance *rpmsg_get_master_instance(void)
{
return master_rpmsg;
}
static int32_t rpmsg_queue_rx_tmp(void *payload, uint32_t payload_len, uint32_t src, void *priv)
{
struct rpmsg_msg_t *msg = payload;
if ((src == EPT_ADDR_SYNC) && (msg->msg_type == RPMSG_MSG_TYPE_MASTER_READY)) {
ept_sync->rx_cb = rpmsg_queue_rx_cb;
rpmsg_lite_release_rx_buffer_dur_recover(rpmsg_get_remote_instance(), msg);
}
return 0;
}
/************************************************************************************
* @fn rpmsg_remote_recover
*
* @brief In wake up procedure, this function is used to recover rpmsg. This function
* should be called before interrupt is enabled.
*/
void rpmsg_remote_recover(void)
{
GLOBAL_INT_DISABLE();
rpmsg_lite_remote_env_reset(rpmsg_get_remote_instance());
ept_sync->rx_cb = rpmsg_queue_rx_tmp;
while (ept_sync->rx_cb == rpmsg_queue_rx_tmp) {
void ipc_mcu_irq(void);
ipc_mcu_irq();
}
GLOBAL_INT_RESTORE();
}
//void rpmsg_task(void *arg)
//{
// struct rpmsg_msg_t *msg;
// uint32_t msg_len;
// uint32_t src_addr;
// while (1) {
// rpmsg_queue_recv_nocopy(arg, queue_async, (uint32_t *)&src_addr, (char **)&msg, &msg_len, RL_BLOCK);
//
// if (msg_callback) {
// msg_callback(arg, msg);
// }
// }
//}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,189 @@
/*
* Copyright (c) 2014, Mentor Graphics Corporation
* Copyright (c) 2015 Xilinx, Inc.
* Copyright (c) 2016 Freescale Semiconductor, Inc.
* Copyright 2016-2019 NXP
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "rpmsg_lite.h"
#include "rpmsg_ns.h"
#include <stdint.h>
#define RL_NS_NAME_SIZE (32)
/*!
* struct rpmsg_ns_msg - dynamic name service announcement message
* @name: name of remote service that is published
* @addr: address of remote service that is published
* @flags: indicates whether service is created or destroyed
*
* This message is sent across to publish a new service, or announce
* about its removal. When we receive these messages, an appropriate
* rpmsg channel (i.e device) is created/destroyed. In turn, the ->probe()
* or ->remove() handler of the appropriate rpmsg driver will be invoked
* (if/as-soon-as one is registered).
*/
RL_PACKED_BEGIN
struct rpmsg_ns_msg
{
char name[RL_NS_NAME_SIZE];
uint32_t addr;
uint32_t flags;
} RL_PACKED_END;
/*!
* @brief
* Nameservice callback, called in interrupt context
*
* @param payload Pointer to the buffer containing received data
* @param payload_len Size of data received, in bytes
* @param src Pointer to address of the endpoint from which data is received
* @param priv Private data provided during endpoint creation
*
* @return RL_RELEASE, message is always freed
*
*/
static int32_t rpmsg_ns_rx_cb(void *payload, uint32_t payload_len, uint32_t src, void *priv)
{
struct rpmsg_ns_msg *ns_msg_ptr = payload;
struct rpmsg_ns_callback_data *cb_ctxt = priv;
RL_ASSERT(priv != RL_NULL);
RL_ASSERT(cb_ctxt->cb != RL_NULL);
/* Drop likely bad messages received at nameservice address */
if (payload_len == sizeof(struct rpmsg_ns_msg))
{
cb_ctxt->cb(ns_msg_ptr->addr, ns_msg_ptr->name, ns_msg_ptr->flags, cb_ctxt->user_data);
}
return RL_RELEASE;
}
#if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1)
rpmsg_ns_handle rpmsg_ns_bind(struct rpmsg_lite_instance *rpmsg_lite_dev,
rpmsg_ns_new_ept_cb app_cb,
void *user_data,
rpmsg_ns_static_context *ns_ept_ctxt)
#else
rpmsg_ns_handle rpmsg_ns_bind(struct rpmsg_lite_instance *rpmsg_lite_dev, rpmsg_ns_new_ept_cb app_cb, void *user_data)
#endif /* RL_USE_STATIC_API */
{
struct rpmsg_ns_context *ns_ctxt;
if (app_cb == RL_NULL)
{
return RL_NULL;
}
#if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1)
if (ns_ept_ctxt == RL_NULL)
{
return RL_NULL;
}
ns_ctxt = &ns_ept_ctxt->ns_ctxt;
/* Set-up the nameservice callback context */
ns_ept_ctxt->cb_ctxt.user_data = user_data;
ns_ept_ctxt->cb_ctxt.cb = app_cb;
ns_ctxt->cb_ctxt = &ns_ept_ctxt->cb_ctxt;
ns_ctxt->ept = rpmsg_lite_create_ept(rpmsg_lite_dev, RL_NS_EPT_ADDR, rpmsg_ns_rx_cb, (void *)ns_ctxt->cb_ctxt,
&ns_ept_ctxt->ept_ctxt);
#else
{
struct rpmsg_ns_callback_data *cb_ctxt;
cb_ctxt = env_allocate_memory(sizeof(struct rpmsg_ns_callback_data));
if (cb_ctxt == RL_NULL)
{
return RL_NULL;
}
ns_ctxt = env_allocate_memory(sizeof(struct rpmsg_ns_context));
if (ns_ctxt == RL_NULL)
{
env_free_memory(cb_ctxt);
return RL_NULL;
}
/* Set-up the nameservice callback context */
cb_ctxt->user_data = user_data;
cb_ctxt->cb = app_cb;
ns_ctxt->cb_ctxt = cb_ctxt;
ns_ctxt->ept = rpmsg_lite_create_ept(rpmsg_lite_dev, RL_NS_EPT_ADDR, rpmsg_ns_rx_cb, (void *)ns_ctxt->cb_ctxt);
}
#endif /* RL_USE_STATIC_API */
return (rpmsg_ns_handle)ns_ctxt;
}
int32_t rpmsg_ns_unbind(struct rpmsg_lite_instance *rpmsg_lite_dev, rpmsg_ns_handle handle)
{
struct rpmsg_ns_context *ns_ctxt = (struct rpmsg_ns_context *)handle;
#if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1)
return rpmsg_lite_destroy_ept(rpmsg_lite_dev, ns_ctxt->ept);
#else
{
int32_t retval;
retval = rpmsg_lite_destroy_ept(rpmsg_lite_dev, ns_ctxt->ept);
env_free_memory(ns_ctxt->cb_ctxt);
env_free_memory(ns_ctxt);
return retval;
}
#endif
}
int32_t rpmsg_ns_announce(struct rpmsg_lite_instance *rpmsg_lite_dev,
struct rpmsg_lite_endpoint *new_ept,
const char *ept_name,
uint32_t flags)
{
struct rpmsg_ns_msg ns_msg;
if (ept_name == RL_NULL)
{
return RL_ERR_PARAM;
}
if (new_ept == RL_NULL)
{
return RL_ERR_PARAM;
}
env_strncpy(ns_msg.name, ept_name, RL_NS_NAME_SIZE);
ns_msg.flags = flags;
ns_msg.addr = new_ept->addr;
return rpmsg_lite_send(rpmsg_lite_dev, new_ept, RL_NS_EPT_ADDR, (char *)&ns_msg, sizeof(struct rpmsg_ns_msg),
RL_BLOCK);
}

View File

@ -0,0 +1,218 @@
/*
* Copyright (c) 2014, Mentor Graphics Corporation
* Copyright (c) 2015 Xilinx, Inc.
* Copyright (c) 2016 Freescale Semiconductor, Inc.
* Copyright 2016-2019 NXP
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "rpmsg_lite.h"
#include "rpmsg_queue.h"
typedef struct
{
uint32_t src;
void *data;
uint32_t len;
} rpmsg_queue_rx_cb_data_t;
int32_t rpmsg_queue_rx_cb(void *payload, uint32_t payload_len, uint32_t src, void *priv)
{
rpmsg_queue_rx_cb_data_t msg;
RL_ASSERT(priv != RL_NULL);
msg.data = payload;
msg.len = payload_len;
msg.src = src;
/* if message is successfully added into queue then hold rpmsg buffer */
if (0 != env_put_queue(priv, &msg, 0))
{
/* hold the rx buffer */
return RL_HOLD;
}
return RL_RELEASE;
}
rpmsg_queue_handle rpmsg_queue_create(struct rpmsg_lite_instance *rpmsg_lite_dev)
{
int32_t status;
void *q = RL_NULL;
if (rpmsg_lite_dev == RL_NULL)
{
return RL_NULL;
}
/* create message queue for channel default endpoint */
status = env_create_queue(&q, (int32_t)rpmsg_lite_dev->rvq->vq_nentries, (int32_t)sizeof(rpmsg_queue_rx_cb_data_t));
if ((status != 0) || (q == RL_NULL))
{
return RL_NULL;
}
return ((rpmsg_queue_handle)q);
}
int32_t rpmsg_queue_destroy(struct rpmsg_lite_instance *rpmsg_lite_dev, rpmsg_queue_handle q)
{
if (rpmsg_lite_dev == RL_NULL)
{
return RL_ERR_PARAM;
}
if (q == RL_NULL)
{
return RL_ERR_PARAM;
}
env_delete_queue((void *)q);
return RL_SUCCESS;
}
int32_t rpmsg_queue_recv(struct rpmsg_lite_instance *rpmsg_lite_dev,
rpmsg_queue_handle q,
uint32_t *src,
char *data,
uint32_t maxlen,
uint32_t *len,
uint32_t timeout)
{
rpmsg_queue_rx_cb_data_t msg = {0};
int32_t retval = RL_SUCCESS;
if (rpmsg_lite_dev == RL_NULL)
{
return RL_ERR_PARAM;
}
if (q == RL_NULL)
{
return RL_ERR_PARAM;
}
if (data == RL_NULL)
{
return RL_ERR_PARAM;
}
/* Get an element out of the message queue for the selected endpoint */
if (0 != env_get_queue((void *)q, &msg, timeout))
{
if (src != RL_NULL)
{
*src = msg.src;
}
if (len != RL_NULL)
{
*len = msg.len;
}
if (maxlen >= msg.len)
{
env_memcpy(data, msg.data, msg.len);
}
else
{
retval = RL_ERR_BUFF_SIZE;
}
/* Release used buffer. */
return ((RL_SUCCESS == rpmsg_lite_release_rx_buffer(rpmsg_lite_dev, msg.data)) ? retval : RL_ERR_PARAM);
}
else
{
return RL_ERR_NO_BUFF; /* failed */
}
}
int32_t rpmsg_queue_recv_nocopy(struct rpmsg_lite_instance *rpmsg_lite_dev,
rpmsg_queue_handle q,
uint32_t *src,
char **data,
uint32_t *len,
uint32_t timeout)
{
rpmsg_queue_rx_cb_data_t msg = {0};
if (rpmsg_lite_dev == RL_NULL)
{
return RL_ERR_PARAM;
}
if (data == RL_NULL)
{
return RL_ERR_PARAM;
}
if (q == RL_NULL)
{
return RL_ERR_PARAM;
}
/* Get an element out of the message queue for the selected endpoint */
if (0 != env_get_queue((void *)q, &msg, timeout))
{
if (src != RL_NULL)
{
*src = msg.src;
}
if (len != RL_NULL)
{
*len = msg.len;
}
*data = msg.data;
return RL_SUCCESS; /* success */
}
return RL_ERR_NO_BUFF; /* failed */
}
int32_t rpmsg_queue_nocopy_free(struct rpmsg_lite_instance *rpmsg_lite_dev, void *data)
{
if (rpmsg_lite_dev == RL_NULL)
{
return RL_ERR_PARAM;
}
if (data == RL_NULL)
{
return RL_ERR_PARAM;
}
/* Release used buffer. */
return ((RL_SUCCESS == rpmsg_lite_release_rx_buffer(rpmsg_lite_dev, data)) ? RL_SUCCESS : RL_ERR_PARAM);
}
int32_t rpmsg_queue_get_current_size(rpmsg_queue_handle q)
{
if (q == RL_NULL)
{
return RL_ERR_PARAM;
}
/* Return actual queue size. */
return env_get_current_queue_size((void *)q);
}

View File

@ -0,0 +1,745 @@
/*-
* Copyright (c) 2011, Bryan Venteicher <bryanv@FreeBSD.org>
* Copyright (c) 2016 Freescale Semiconductor, Inc.
* Copyright 2016-2019 NXP
* All rights reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice unmodified, this list of conditions, and the following
* disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "rpmsg_env.h"
#include "virtqueue.h"
/* Prototype for internal functions. */
static void vq_ring_update_avail(struct virtqueue *vq, uint16_t desc_idx);
static void vq_ring_update_used(struct virtqueue *vq, uint16_t head_idx, uint32_t len);
static uint16_t vq_ring_add_buffer(
struct virtqueue *vq, struct vring_desc *desc, uint16_t head_idx, void *buffer, uint32_t length);
static int32_t vq_ring_enable_interrupt(struct virtqueue *vq, uint16_t ndesc);
static int32_t vq_ring_must_notify_host(struct virtqueue *vq);
static void vq_ring_notify_host(struct virtqueue *vq);
static uint16_t virtqueue_nused(struct virtqueue *vq);
/*!
* virtqueue_create - Creates new VirtIO queue
*
* @param id - VirtIO queue ID , must be unique
* @param name - Name of VirtIO queue
* @param ring - Pointer to vring_alloc_info control block
* @param callback - Pointer to callback function, invoked
* when message is available on VirtIO queue
* @param notify - Pointer to notify function, used to notify
* other side that there is job available for it
* @param v_queue - Created VirtIO queue.
*
* @return - Function status
*/
int32_t virtqueue_create(uint16_t id,
const char *name,
struct vring_alloc_info *ring,
void (*callback_fc)(struct virtqueue *vq),
void (*notify_fc)(struct virtqueue *vq),
struct virtqueue **v_queue)
{
struct virtqueue *vq = VQ_NULL;
volatile int32_t status = VQUEUE_SUCCESS;
uint32_t vq_size = 0;
VQ_PARAM_CHK(ring == VQ_NULL, status, ERROR_VQUEUE_INVLD_PARAM);
VQ_PARAM_CHK(ring->num_descs == 0, status, ERROR_VQUEUE_INVLD_PARAM);
VQ_PARAM_CHK(ring->num_descs & (ring->num_descs - 1), status, ERROR_VRING_ALIGN);
if (status == VQUEUE_SUCCESS)
{
vq_size = sizeof(struct virtqueue);
vq = (struct virtqueue *)env_allocate_memory(vq_size);
if (vq == VQ_NULL)
{
return (ERROR_NO_MEM);
}
env_memset(vq, 0x00, vq_size);
env_strncpy(vq->vq_name, name, VIRTQUEUE_MAX_NAME_SZ);
vq->vq_queue_index = id;
vq->vq_alignment = (int32_t)(ring->align);
vq->vq_nentries = ring->num_descs;
vq->callback_fc = callback_fc;
vq->notify_fc = notify_fc;
// indirect addition is not supported
vq->vq_ring_size = vring_size(ring->num_descs, ring->align);
vq->vq_ring_mem = (void *)ring->phy_addr;
vring_init(&vq->vq_ring, vq->vq_nentries, vq->vq_ring_mem, (uint32_t)vq->vq_alignment);
*v_queue = vq;
}
return (status);
}
/*!
* virtqueue_create_static - Creates new VirtIO queue - static version
*
* @param id - VirtIO queue ID , must be unique
* @param name - Name of VirtIO queue
* @param ring - Pointer to vring_alloc_info control block
* @param callback - Pointer to callback function, invoked
* when message is available on VirtIO queue
* @param notify - Pointer to notify function, used to notify
* other side that there is job available for it
* @param v_queue - Created VirtIO queue.
* @param vq_ctxt - Statically allocated virtqueue context
*
* @return - Function status
*/
int32_t virtqueue_create_static(uint16_t id,
const char *name,
struct vring_alloc_info *ring,
void (*callback_fc)(struct virtqueue *vq),
void (*notify_fc)(struct virtqueue *vq),
struct virtqueue **v_queue,
struct vq_static_context *vq_ctxt)
{
struct virtqueue *vq = VQ_NULL;
volatile int32_t status = VQUEUE_SUCCESS;
uint32_t vq_size = 0;
VQ_PARAM_CHK(vq_ctxt == VQ_NULL, status, ERROR_VQUEUE_INVLD_PARAM);
VQ_PARAM_CHK(ring == VQ_NULL, status, ERROR_VQUEUE_INVLD_PARAM);
VQ_PARAM_CHK(ring->num_descs == 0, status, ERROR_VQUEUE_INVLD_PARAM);
VQ_PARAM_CHK(ring->num_descs & (ring->num_descs - 1), status, ERROR_VRING_ALIGN);
if (status == VQUEUE_SUCCESS)
{
vq_size = sizeof(struct virtqueue);
vq = &vq_ctxt->vq;
env_memset(vq, 0x00, vq_size);
env_strncpy(vq->vq_name, name, VIRTQUEUE_MAX_NAME_SZ);
vq->vq_queue_index = id;
vq->vq_alignment = (int32_t)(ring->align);
vq->vq_nentries = ring->num_descs;
vq->callback_fc = callback_fc;
vq->notify_fc = notify_fc;
// indirect addition is not supported
vq->vq_ring_size = vring_size(ring->num_descs, ring->align);
vq->vq_ring_mem = (void *)ring->phy_addr;
vring_init(&vq->vq_ring, vq->vq_nentries, vq->vq_ring_mem, (uint32_t)vq->vq_alignment);
*v_queue = vq;
}
return (status);
}
void virtqueue_reinit(struct virtqueue *vq)
{
vq->vq_free_cnt = 0;
vq->vq_queued_cnt = 0;
vq->vq_used_cons_idx = 0;
vq->vq_available_idx = 0;
}
/*!
* virtqueue_add_buffer() - Enqueues new buffer in vring for consumption
* by other side.
*
* @param vq - Pointer to VirtIO queue control block.
* @param head_idx - Index of buffer to be added to the avail ring
*
* @return - Function status
*/
int32_t virtqueue_add_buffer(struct virtqueue *vq, uint16_t head_idx)
{
volatile int32_t status = VQUEUE_SUCCESS;
VQ_PARAM_CHK(vq == VQ_NULL, status, ERROR_VQUEUE_INVLD_PARAM);
VQUEUE_BUSY(vq, avail_write);
if (status == VQUEUE_SUCCESS)
{
VQ_RING_ASSERT_VALID_IDX(vq, head_idx);
/*
* Update vring_avail control block fields so that other
* side can get buffer using it.
*/
vq_ring_update_avail(vq, head_idx);
}
VQUEUE_IDLE(vq, avail_write);
return (status);
}
/*!
* virtqueue_fill_avail_buffers - Enqueues single buffer in vring, updates avail
*
* @param vq - Pointer to VirtIO queue control block
* @param buffer - Address of buffer
* @param len - Length of buffer
*
* @return - Function status
*/
int32_t virtqueue_fill_avail_buffers(struct virtqueue *vq, void *buffer, uint32_t len)
{
struct vring_desc *dp;
uint16_t head_idx;
volatile int32_t status = VQUEUE_SUCCESS;
VQ_PARAM_CHK(vq == VQ_NULL, status, ERROR_VQUEUE_INVLD_PARAM);
VQUEUE_BUSY(vq, avail_write);
if (status == VQUEUE_SUCCESS)
{
head_idx = vq->vq_desc_head_idx;
dp = &vq->vq_ring.desc[head_idx];
#if defined(RL_USE_ENVIRONMENT_CONTEXT) && (RL_USE_ENVIRONMENT_CONTEXT == 1)
dp->addr = env_map_vatopa(vq->env, buffer);
#else
dp->addr = env_map_vatopa(buffer);
#endif
dp->len = len;
dp->flags = VRING_DESC_F_WRITE;
vq->vq_desc_head_idx++;
vq_ring_update_avail(vq, head_idx);
}
VQUEUE_IDLE(vq, avail_write);
return (status);
}
/*!
* virtqueue_get_buffer - Returns used buffers from VirtIO queue
*
* @param vq - Pointer to VirtIO queue control block
* @param len - Length of consumed buffer
* @param idx - Index to buffer descriptor pool
*
* @return - Pointer to used buffer
*/
void *virtqueue_get_buffer(struct virtqueue *vq, uint32_t *len, uint16_t *idx)
{
struct vring_used_elem *uep;
uint16_t used_idx, desc_idx;
if ((vq == VQ_NULL) || (vq->vq_used_cons_idx == vq->vq_ring.used->idx))
{
return (VQ_NULL);
}
VQUEUE_BUSY(vq, used_read);
used_idx = (uint16_t)(vq->vq_used_cons_idx & ((uint16_t)(vq->vq_nentries - 1U)));
uep = &vq->vq_ring.used->ring[used_idx];
env_rmb();
desc_idx = (uint16_t)uep->id;
if (len != VQ_NULL)
{
*len = uep->len;
}
if (idx != VQ_NULL)
{
*idx = desc_idx;
}
vq->vq_used_cons_idx++;
VQUEUE_IDLE(vq, used_read);
#if defined(RL_USE_ENVIRONMENT_CONTEXT) && (RL_USE_ENVIRONMENT_CONTEXT == 1)
return env_map_patova(vq->env, ((uint32_t)(vq->vq_ring.desc[desc_idx].addr)));
#else
return env_map_patova((uint32_t)(vq->vq_ring.desc[desc_idx].addr));
#endif
}
/*!
* virtqueue_get_buffer_length - Returns size of a buffer
*
* @param vq - Pointer to VirtIO queue control block
* @param idx - Index to buffer descriptor pool
*
* @return - Buffer length
*/
uint32_t virtqueue_get_buffer_length(struct virtqueue *vq, uint16_t idx)
{
return vq->vq_ring.desc[idx].len;
}
/*!
* virtqueue_free - Frees VirtIO queue resources
*
* @param vq - Pointer to VirtIO queue control block
*
*/
void virtqueue_free(struct virtqueue *vq)
{
if (vq != VQ_NULL)
{
if (vq->vq_ring_mem != VQ_NULL)
{
vq->vq_ring_size = 0;
vq->vq_ring_mem = VQ_NULL;
}
env_free_memory(vq);
}
}
/*!
* virtqueue_free - Frees VirtIO queue resources - static version
*
* @param vq - Pointer to VirtIO queue control block
*
*/
void virtqueue_free_static(struct virtqueue *vq)
{
if (vq != VQ_NULL)
{
if (vq->vq_ring_mem != VQ_NULL)
{
vq->vq_ring_size = 0;
vq->vq_ring_mem = VQ_NULL;
}
}
}
/*!
* virtqueue_get_available_buffer - Returns buffer available for use in the
* VirtIO queue
*
* @param vq - Pointer to VirtIO queue control block
* @param avail_idx - Pointer to index used in vring desc table
* @param len - Length of buffer
*
* @return - Pointer to available buffer
*/
void *virtqueue_get_available_buffer(struct virtqueue *vq, uint16_t *avail_idx, uint32_t *len)
{
uint16_t head_idx = 0;
void *buffer;
if (vq->vq_available_idx == vq->vq_ring.avail->idx)
{
return (VQ_NULL);
}
VQUEUE_BUSY(vq, avail_read);
head_idx = (uint16_t)(vq->vq_available_idx++ & ((uint16_t)(vq->vq_nentries - 1U)));
*avail_idx = vq->vq_ring.avail->ring[head_idx];
env_rmb();
#if defined(RL_USE_ENVIRONMENT_CONTEXT) && (RL_USE_ENVIRONMENT_CONTEXT == 1)
buffer = env_map_patova(vq->env, ((uint32_t)(vq->vq_ring.desc[*avail_idx].addr));
#else
buffer = env_map_patova((uint32_t)(vq->vq_ring.desc[*avail_idx].addr));
#endif
*len = vq->vq_ring.desc[*avail_idx].len;
VQUEUE_IDLE(vq, avail_read);
return (buffer);
}
/*!
* virtqueue_add_consumed_buffer - Returns consumed buffer back to VirtIO queue
*
* @param vq - Pointer to VirtIO queue control block
* @param head_idx - Index of vring desc containing used buffer
* @param len - Length of buffer
*
* @return - Function status
*/
int32_t virtqueue_add_consumed_buffer(struct virtqueue *vq, uint16_t head_idx, uint32_t len)
{
if (head_idx > vq->vq_nentries)
{
return (ERROR_VRING_NO_BUFF);
}
VQUEUE_BUSY(vq, used_write);
vq_ring_update_used(vq, head_idx, len);
VQUEUE_IDLE(vq, used_write);
return (VQUEUE_SUCCESS);
}
/*!
* virtqueue_fill_used_buffers - Fill used buffer ring
*
* @param vq - Pointer to VirtIO queue control block
* @param buffer - Buffer to add
* @param len - Length of buffer
*
* @return - Function status
*/
int32_t virtqueue_fill_used_buffers(struct virtqueue *vq, void *buffer, uint32_t len)
{
uint16_t head_idx;
uint16_t idx;
VQUEUE_BUSY(vq, used_write);
head_idx = vq->vq_desc_head_idx;
VQ_RING_ASSERT_VALID_IDX(vq, head_idx);
/* Enqueue buffer onto the ring. */
idx = vq_ring_add_buffer(vq, vq->vq_ring.desc, head_idx, buffer, len);
vq->vq_desc_head_idx = idx;
vq_ring_update_used(vq, head_idx, len);
VQUEUE_IDLE(vq, used_write);
return (VQUEUE_SUCCESS);
}
/*!
* virtqueue_enable_cb - Enables callback generation
*
* @param vq - Pointer to VirtIO queue control block
*
* @return - Function status
*/
int32_t virtqueue_enable_cb(struct virtqueue *vq)
{
return (vq_ring_enable_interrupt(vq, 0));
}
/*!
* virtqueue_enable_cb - Disables callback generation
*
* @param vq - Pointer to VirtIO queue control block
*
*/
void virtqueue_disable_cb(struct virtqueue *vq)
{
VQUEUE_BUSY(vq, avail_write);
if ((vq->vq_flags & VIRTQUEUE_FLAG_EVENT_IDX) != 0UL)
{
vring_used_event(&vq->vq_ring) = vq->vq_used_cons_idx - vq->vq_nentries - 1U;
}
else
{
vq->vq_ring.avail->flags |= (uint16_t)VRING_AVAIL_F_NO_INTERRUPT;
}
VQUEUE_IDLE(vq, avail_write);
}
/*!
* virtqueue_kick - Notifies other side that there is buffer available for it.
*
* @param vq - Pointer to VirtIO queue control block
*/
void virtqueue_kick(struct virtqueue *vq)
{
VQUEUE_BUSY(vq, avail_write);
/* Ensure updated avail->idx is visible to host. */
env_mb();
if (0 != vq_ring_must_notify_host(vq))
{
vq_ring_notify_host(vq);
}
vq->vq_queued_cnt = 0;
VQUEUE_IDLE(vq, avail_write);
}
/*!
* virtqueue_dump Dumps important virtqueue fields , use for debugging purposes
*
* @param vq - Pointer to VirtIO queue control block
*/
void virtqueue_dump(struct virtqueue *vq)
{
if (vq == VQ_NULL)
{
return;
}
env_print(
"VQ: %s - size=%d; used=%d; queued=%d; "
"desc_head_idx=%d; avail.idx=%d; used_cons_idx=%d; "
"used.idx=%d; avail.flags=0x%x; used.flags=0x%x\r\n",
vq->vq_name, vq->vq_nentries, virtqueue_nused(vq), vq->vq_queued_cnt, vq->vq_desc_head_idx,
vq->vq_ring.avail->idx, vq->vq_used_cons_idx, vq->vq_ring.used->idx, vq->vq_ring.avail->flags,
vq->vq_ring.used->flags);
}
/*!
* virtqueue_get_desc_size - Returns vring descriptor size
*
* @param vq - Pointer to VirtIO queue control block
*
* @return - Descriptor length
*/
uint32_t virtqueue_get_desc_size(struct virtqueue *vq)
{
uint16_t head_idx;
uint16_t avail_idx;
uint32_t len;
if (vq->vq_available_idx == vq->vq_ring.avail->idx)
{
return 0;
}
head_idx = (uint16_t)(vq->vq_available_idx & ((uint16_t)(vq->vq_nentries - 1U)));
avail_idx = vq->vq_ring.avail->ring[head_idx];
len = vq->vq_ring.desc[avail_idx].len;
return (len);
}
/**************************************************************************
* Helper Functions *
**************************************************************************/
/*!
*
* vq_ring_add_buffer
*
*/
static uint16_t vq_ring_add_buffer(
struct virtqueue *vq, struct vring_desc *desc, uint16_t head_idx, void *buffer, uint32_t length)
{
struct vring_desc *dp;
if (buffer == VQ_NULL)
{
return head_idx;
}
VQASSERT(vq, head_idx != VQ_RING_DESC_CHAIN_END, "premature end of free desc chain");
dp = &desc[head_idx];
#if defined(RL_USE_ENVIRONMENT_CONTEXT) && (RL_USE_ENVIRONMENT_CONTEXT == 1)
dp->addr = env_map_vatopa(vq->env, buffer);
#else
dp->addr = env_map_vatopa(buffer);
#endif
dp->len = length;
dp->flags = VRING_DESC_F_WRITE;
return (head_idx + 1U);
}
/*!
*
* vq_ring_init
*
*/
void vq_ring_init(struct virtqueue *vq)
{
struct vring *vr;
uint32_t i, size;
size = (uint32_t)(vq->vq_nentries);
vr = &vq->vq_ring;
for (i = 0U; i < size - 1U; i++)
{
vr->desc[i].next = (uint16_t)(i + 1U);
}
vr->desc[i].next = (uint16_t)VQ_RING_DESC_CHAIN_END;
}
/*!
*
* vq_ring_update_avail
*
*/
static void vq_ring_update_avail(struct virtqueue *vq, uint16_t desc_idx)
{
uint16_t avail_idx;
/*
* Place the head of the descriptor chain into the next slot and make
* it usable to the host. The chain is made available now rather than
* deferring to virtqueue_notify() in the hopes that if the host is
* currently running on another CPU, we can keep it processing the new
* descriptor.
*/
avail_idx = (uint16_t)(vq->vq_ring.avail->idx & ((uint16_t)(vq->vq_nentries - 1U)));
vq->vq_ring.avail->ring[avail_idx] = desc_idx;
env_wmb();
vq->vq_ring.avail->idx++;
/* Keep pending count until virtqueue_notify(). */
vq->vq_queued_cnt++;
}
/*!
*
* vq_ring_update_used
*
*/
static void vq_ring_update_used(struct virtqueue *vq, uint16_t head_idx, uint32_t len)
{
uint16_t used_idx;
struct vring_used_elem *used_desc = VQ_NULL;
/*
* Place the head of the descriptor chain into the next slot and make
* it usable to the host. The chain is made available now rather than
* deferring to virtqueue_notify() in the hopes that if the host is
* currently running on another CPU, we can keep it processing the new
* descriptor.
*/
used_idx = vq->vq_ring.used->idx & (vq->vq_nentries - 1U);
used_desc = &(vq->vq_ring.used->ring[used_idx]);
used_desc->id = head_idx;
used_desc->len = len;
env_wmb();
vq->vq_ring.used->idx++;
}
/*!
*
* vq_ring_enable_interrupt
*
*/
static int32_t vq_ring_enable_interrupt(struct virtqueue *vq, uint16_t ndesc)
{
/*
* Enable interrupts, making sure we get the latest index of
* what's already been consumed.
*/
if ((vq->vq_flags & VIRTQUEUE_FLAG_EVENT_IDX) != 0UL)
{
vring_used_event(&vq->vq_ring) = vq->vq_used_cons_idx + ndesc;
}
else
{
vq->vq_ring.avail->flags &= ~(uint16_t)VRING_AVAIL_F_NO_INTERRUPT;
}
env_mb();
/*
* Enough items may have already been consumed to meet our threshold
* since we last checked. Let our caller know so it processes the new
* entries.
*/
if (virtqueue_nused(vq) > ndesc)
{
return (1);
}
return (0);
}
/*!
*
* virtqueue_interrupt
*
*/
void virtqueue_notification(struct virtqueue *vq)
{
if (vq != VQ_NULL)
{
if (vq->callback_fc != VQ_NULL)
{
vq->callback_fc(vq);
}
}
}
/*!
*
* vq_ring_must_notify_host
*
*/
static int32_t vq_ring_must_notify_host(struct virtqueue *vq)
{
uint16_t new_idx, prev_idx;
uint16_t event_idx;
if ((vq->vq_flags & VIRTQUEUE_FLAG_EVENT_IDX) != 0UL)
{
new_idx = vq->vq_ring.avail->idx;
prev_idx = new_idx - vq->vq_queued_cnt;
event_idx = (uint16_t)vring_avail_event(&vq->vq_ring);
return ((vring_need_event(event_idx, new_idx, prev_idx) != 0) ? 1 : 0);
}
return (((vq->vq_ring.used->flags & ((uint16_t)VRING_USED_F_NO_NOTIFY)) == 0U) ? 1 : 0);
}
/*!
*
* vq_ring_notify_host
*
*/
static void vq_ring_notify_host(struct virtqueue *vq)
{
if (vq->notify_fc != VQ_NULL)
{
vq->notify_fc(vq);
}
}
/*!
*
* virtqueue_nused
*
*/
static uint16_t virtqueue_nused(struct virtqueue *vq)
{
uint16_t used_idx, nused;
used_idx = vq->vq_ring.used->idx;
nused = (uint16_t)(used_idx - vq->vq_used_cons_idx);
VQASSERT(vq, nused <= vq->vq_nentries, "used more than available");
return (nused);
}