esp_common: Add API for IPC to run small pieces of code on the other CPU, in the context of the level 4 interrupt

This commit is contained in:
Konstantin Kondrashov
2021-08-03 14:35:29 +08:00
committed by Zim Kalinowski
parent a0c548ccd4
commit 4972605b16
51 changed files with 1062 additions and 379 deletions

View File

@@ -0,0 +1,154 @@
// Copyright 2015-2016 Espressif Systems (Shanghai) PTE LTD
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <stddef.h>
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#include "esp_err.h"
#include "esp_ipc.h"
#include "esp_ipc_isr.h"
#include "esp_attr.h"
#include "freertos/FreeRTOS.h"
#include "freertos/task.h"
#include "freertos/semphr.h"
#ifndef CONFIG_FREERTOS_UNICORE
static TaskHandle_t s_ipc_task_handle[portNUM_PROCESSORS];
static SemaphoreHandle_t s_ipc_mutex[portNUM_PROCESSORS]; // This mutex is used as a global lock for esp_ipc_* APIs
static SemaphoreHandle_t s_ipc_sem[portNUM_PROCESSORS]; // Two semaphores used to wake each of ipc tasks
static SemaphoreHandle_t s_ipc_ack[portNUM_PROCESSORS]; // Semaphore used to acknowledge that task was woken up,
// or function has finished running
static volatile esp_ipc_func_t s_func[portNUM_PROCESSORS]; // Function which should be called by high priority task
static void * volatile s_func_arg[portNUM_PROCESSORS]; // Argument to pass into s_func
typedef enum {
IPC_WAIT_FOR_START,
IPC_WAIT_FOR_END
} esp_ipc_wait_t;
static volatile esp_ipc_wait_t s_ipc_wait[portNUM_PROCESSORS];// This variable tells high priority task when it should give
// s_ipc_ack semaphore: before s_func is called, or
// after it returns
static void IRAM_ATTR ipc_task(void* arg)
{
const int cpuid = (int) arg;
assert(cpuid == xPortGetCoreID());
while (true) {
// Wait for IPC to be initiated.
// This will be indicated by giving the semaphore corresponding to
// this CPU.
if (xSemaphoreTake(s_ipc_sem[cpuid], portMAX_DELAY) != pdTRUE) {
// TODO: when can this happen?
abort();
}
esp_ipc_func_t func = s_func[cpuid];
void* arg = s_func_arg[cpuid];
if (s_ipc_wait[cpuid] == IPC_WAIT_FOR_START) {
xSemaphoreGive(s_ipc_ack[cpuid]);
}
(*func)(arg);
if (s_ipc_wait[cpuid] == IPC_WAIT_FOR_END) {
xSemaphoreGive(s_ipc_ack[cpuid]);
}
}
// TODO: currently this is unreachable code. Introduce esp_ipc_uninit
// function which will signal to both tasks that they can shut down.
// Not critical at this point, we don't have a use case for stopping
// IPC yet.
// Also need to delete the semaphore here.
vTaskDelete(NULL);
}
/*
* Initialize inter-processor call module. This function is called automatically
* on CPU start and should not be called from the application.
*
* This function start two tasks, one on each CPU. These tasks are started
* with high priority. These tasks are normally inactive, waiting until one of
* the esp_ipc_call_* functions to be used. One of these tasks will be
* woken up to execute the callback provided to esp_ipc_call_nonblocking or
* esp_ipc_call_blocking.
*/
static void esp_ipc_init(void) __attribute__((constructor));
static void esp_ipc_init(void)
{
#ifdef CONFIG_ESP_IPC_ISR_ENABLE
esp_ipc_isr_init();
#endif
char task_name[15];
for (int i = 0; i < portNUM_PROCESSORS; ++i) {
snprintf(task_name, sizeof(task_name), "ipc%d", i);
s_ipc_mutex[i] = xSemaphoreCreateMutex();
s_ipc_ack[i] = xSemaphoreCreateBinary();
s_ipc_sem[i] = xSemaphoreCreateBinary();
portBASE_TYPE res = xTaskCreatePinnedToCore(ipc_task, task_name, CONFIG_ESP_IPC_TASK_STACK_SIZE, (void*) i,
configMAX_PRIORITIES - 1, &s_ipc_task_handle[i], i);
assert(res == pdTRUE);
(void)res;
}
}
static esp_err_t esp_ipc_call_and_wait(uint32_t cpu_id, esp_ipc_func_t func, void* arg, esp_ipc_wait_t wait_for)
{
if (cpu_id >= portNUM_PROCESSORS) {
return ESP_ERR_INVALID_ARG;
}
if (xTaskGetSchedulerState() != taskSCHEDULER_RUNNING) {
return ESP_ERR_INVALID_STATE;
}
#ifdef CONFIG_ESP_IPC_USES_CALLERS_PRIORITY
TaskHandle_t task_handler = xTaskGetCurrentTaskHandle();
UBaseType_t priority_of_current_task = uxTaskPriorityGet(task_handler);
UBaseType_t priority_of_running_ipc_task = uxTaskPriorityGet(s_ipc_task_handle[cpu_id]);
if (priority_of_running_ipc_task < priority_of_current_task) {
vTaskPrioritySet(s_ipc_task_handle[cpu_id], priority_of_current_task);
}
xSemaphoreTake(s_ipc_mutex[cpu_id], portMAX_DELAY);
vTaskPrioritySet(s_ipc_task_handle[cpu_id], priority_of_current_task);
#else
xSemaphoreTake(s_ipc_mutex[0], portMAX_DELAY);
#endif
s_func[cpu_id] = func;
s_func_arg[cpu_id] = arg;
s_ipc_wait[cpu_id] = wait_for;
xSemaphoreGive(s_ipc_sem[cpu_id]);
xSemaphoreTake(s_ipc_ack[cpu_id], portMAX_DELAY);
#ifdef CONFIG_ESP_IPC_USES_CALLERS_PRIORITY
xSemaphoreGive(s_ipc_mutex[cpu_id]);
#else
xSemaphoreGive(s_ipc_mutex[0]);
#endif
return ESP_OK;
}
esp_err_t esp_ipc_call(uint32_t cpu_id, esp_ipc_func_t func, void* arg)
{
return esp_ipc_call_and_wait(cpu_id, func, arg, IPC_WAIT_FOR_START);
}
esp_err_t esp_ipc_call_blocking(uint32_t cpu_id, esp_ipc_func_t func, void* arg)
{
return esp_ipc_call_and_wait(cpu_id, func, arg, IPC_WAIT_FOR_END);
}
#endif // not CONFIG_FREERTOS_UNICORE

View File

@@ -0,0 +1,215 @@
/*
* SPDX-FileCopyrightText: 2017-2021 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <stddef.h>
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#include "esp_err.h"
#include "esp_attr.h"
#include "soc/cpu.h"
#include "soc/soc.h"
#include "soc/dport_access.h"
#ifdef CONFIG_IDF_TARGET_ESP32
#include "soc/dport_reg.h"
#else
#include "soc/periph_defs.h"
#include "soc/system_reg.h"
#endif
#include "freertos/FreeRTOS.h"
#include "freertos/task.h"
#include "freertos/portmacro.h"
#include "esp_intr_alloc.h"
#include "esp_ipc_isr.h"
#include "xtensa/core-macros.h"
#include "sdkconfig.h"
static portMUX_TYPE s_ipc_isr_mux = portMUX_INITIALIZER_UNLOCKED;
uint32_t volatile esp_ipc_isr_start_fl; // the flag shows that it is about to run esp_ipc_func()
uint32_t volatile esp_ipc_isr_end_fl = 1; // the flag shows that esp_ipc_func() is done
esp_ipc_isr_func_t volatile esp_ipc_func; // the function which will be run in the ipc_isr context
void * volatile esp_ipc_func_arg; // the argument of esp_ipc_func()
typedef enum {
STALL_STATE_IDLE = 0,
STALL_STATE_RUNNING = 1,
} stall_state_t;
static stall_state_t volatile s_stall_state = STALL_STATE_IDLE;
static int32_t volatile s_count_of_nested_calls[portNUM_PROCESSORS] = { 0 };
static BaseType_t s_stored_interrupt_level;
static uint32_t volatile esp_ipc_isr_finish_cmd;
/**
* @brief Type of calling
*/
typedef enum {
IPC_ISR_WAIT_FOR_START = 0, /*!< The caller is waiting for the start */
IPC_ISR_WAIT_FOR_END = 1, /*!< The caller is waiting for the end */
} esp_ipc_isr_wait_t;
#define IPC_ISR_ENTER_CRITICAL() portENTER_CRITICAL_SAFE(&s_ipc_isr_mux)
#define IPC_ISR_EXIT_CRITICAL() portEXIT_CRITICAL_SAFE(&s_ipc_isr_mux)
static void esp_ipc_isr_call_and_wait(esp_ipc_isr_func_t func, void* arg, esp_ipc_isr_wait_t wait_for);
/* Initializing IPC_ISR */
static void esp_ipc_isr_init_cpu(void* arg)
{
(void) arg;
const uint32_t cpuid = xPortGetCoreID();
uint32_t intr_source = ETS_FROM_CPU_INTR2_SOURCE + cpuid; // ETS_FROM_CPU_INTR2_SOURCE and ETS_FROM_CPU_INTR3_SOURCE
ESP_INTR_DISABLE(ETS_IPC_ISR_INUM);
intr_matrix_set(cpuid, intr_source, ETS_IPC_ISR_INUM);
ESP_INTR_ENABLE(ETS_IPC_ISR_INUM);
/* If this fails then the minimum stack size for this config is too close to running out */
assert(uxTaskGetStackHighWaterMark(NULL) > 128);
if (cpuid != 0) {
s_stall_state = STALL_STATE_RUNNING;
}
vTaskDelete(NULL);
}
void esp_ipc_isr_init(void)
{
for (unsigned i = 0; i < portNUM_PROCESSORS; ++i) {
portBASE_TYPE res = xTaskCreatePinnedToCore(esp_ipc_isr_init_cpu, "ipc_isr_init", configMINIMAL_STACK_SIZE, NULL, 5, NULL, i);
assert(res == pdTRUE);
(void)res;
}
}
/* End initializing IPC_ISR */
/* Public API functions */
void IRAM_ATTR esp_ipc_isr_asm_call(esp_ipc_isr_func_t func, void* arg)
{
IPC_ISR_ENTER_CRITICAL();
esp_ipc_isr_call_and_wait(func, arg, IPC_ISR_WAIT_FOR_START);
IPC_ISR_EXIT_CRITICAL();
}
void IRAM_ATTR esp_ipc_isr_asm_call_blocking(esp_ipc_isr_func_t func, void* arg)
{
IPC_ISR_ENTER_CRITICAL();
esp_ipc_isr_call_and_wait(func, arg, IPC_ISR_WAIT_FOR_END);
IPC_ISR_EXIT_CRITICAL();
}
// This asm function is from esp_ipc_isr_routines.S.
// It is waiting for the finish_cmd command in a loop.
void esp_ipc_isr_waiting_for_finish_cmd(void* finish_cmd);
/*
* esp_ipc_isr_stall_other_cpu is used for:
* - stall other CPU,
* - do protection when dual core access DPORT internal register and APB register via DPORT simultaneously.
* This function will be initialize after FreeRTOS startup.
* When cpu0 wants to access DPORT register, it should notify cpu1 enter in high-priority interrupt for be mute.
* When cpu1 already in high-priority interrupt, cpu0 can access DPORT register.
* Currently, cpu1 will wait for cpu0 finish access and exit high-priority interrupt.
*/
void IRAM_ATTR esp_ipc_isr_stall_other_cpu(void)
{
if (s_stall_state == STALL_STATE_RUNNING) {
BaseType_t intLvl = portENTER_CRITICAL_NESTED();
const uint32_t cpu_id = xPortGetCoreID();
if (s_count_of_nested_calls[cpu_id]++ == 0) {
IPC_ISR_ENTER_CRITICAL();
s_stored_interrupt_level = intLvl;
esp_ipc_isr_finish_cmd = 0;
esp_ipc_isr_call_and_wait(&esp_ipc_isr_waiting_for_finish_cmd, (void*)&esp_ipc_isr_finish_cmd, IPC_ISR_WAIT_FOR_START);
return;
}
/* Interrupts are already disabled by the parent, we're nested here. */
portEXIT_CRITICAL_NESTED(intLvl);
}
}
void IRAM_ATTR esp_ipc_isr_release_other_cpu(void)
{
if (s_stall_state == STALL_STATE_RUNNING) {
const uint32_t cpu_id = xPortGetCoreID();
if (--s_count_of_nested_calls[cpu_id] == 0) {
esp_ipc_isr_finish_cmd = 1;
IPC_ISR_EXIT_CRITICAL();
portEXIT_CRITICAL_NESTED(s_stored_interrupt_level);
} else if (s_count_of_nested_calls[cpu_id] < 0) {
assert(0);
}
}
}
void IRAM_ATTR esp_ipc_isr_stall_pause(void)
{
IPC_ISR_ENTER_CRITICAL();
s_stall_state = STALL_STATE_IDLE;
IPC_ISR_EXIT_CRITICAL();
}
void IRAM_ATTR esp_ipc_isr_stall_abort(void)
{
s_stall_state = STALL_STATE_IDLE;
}
void IRAM_ATTR esp_ipc_isr_stall_resume(void)
{
IPC_ISR_ENTER_CRITICAL();
s_stall_state = STALL_STATE_RUNNING;
IPC_ISR_EXIT_CRITICAL();
}
void esp_dport_access_stall_other_cpu_start(void) __attribute__((alias("esp_ipc_isr_stall_other_cpu")));
void esp_dport_access_stall_other_cpu_end(void) __attribute__((alias("esp_ipc_isr_release_other_cpu")));
void esp_dport_access_int_pause(void) __attribute__((alias("esp_ipc_isr_stall_pause")));
void esp_dport_access_int_abort(void) __attribute__((alias("esp_ipc_isr_stall_abort")));
void esp_dport_access_int_resume(void) __attribute__((alias("esp_ipc_isr_stall_resume")));
/* End public API functions */
/* Private functions*/
static void IRAM_ATTR esp_ipc_isr_call_and_wait(esp_ipc_isr_func_t func, void* arg, esp_ipc_isr_wait_t wait_for)
{
const uint32_t cpu_id = xPortGetCoreID();
// waiting for the end of the previous call
while (!esp_ipc_isr_end_fl) {};
esp_ipc_func = func;
esp_ipc_func_arg = arg;
esp_ipc_isr_start_fl = 0;
esp_ipc_isr_end_fl = 0;
if (cpu_id == 0) {
// it runs an interrupt on cpu1
DPORT_REG_WRITE(SYSTEM_CPU_INTR_FROM_CPU_3_REG, SYSTEM_CPU_INTR_FROM_CPU_3);
} else {
// it runs an interrupt on cpu0
DPORT_REG_WRITE(SYSTEM_CPU_INTR_FROM_CPU_2_REG, SYSTEM_CPU_INTR_FROM_CPU_2);
}
// IPC_ISR handler will be called and `...isr_start` and `...isr_end` will be updated there
if (wait_for == IPC_ISR_WAIT_FOR_START) {
while (!esp_ipc_isr_start_fl) {};
} else {
// IPC_ISR_WAIT_FOR_END
while (!esp_ipc_isr_end_fl) {};
}
}
/* End private functions*/

View File

@@ -0,0 +1,100 @@
/*
* SPDX-FileCopyrightText: 2017-2021 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <xtensa/coreasm.h>
#include <xtensa/corebits.h>
#include <xtensa/config/system.h>
#include "freertos/xtensa_context.h"
#include "esp_private/panic_reason.h"
#include "sdkconfig.h"
#include "soc/soc.h"
#include "soc/dport_reg.h"
/* High-priority interrupt - IPC_ISR handler */
#define L4_INTR_STACK_SIZE 16
#define L4_INTR_A0_OFFSET 0
#define L4_INTR_A2_OFFSET 4
#define L4_INTR_A3_OFFSET 8
#define L4_INTR_A4_OFFSET 12
.data
_l4_intr_stack:
.space L4_INTR_STACK_SIZE
.section .iram1,"ax"
.global esp_ipc_isr_handler
.type esp_ipc_isr_handler,@function
.align 4
esp_ipc_isr_handler:
/* Allocate exception frame and save minimal context. */
/* Because the interrupt cause code has protection that only
allows one cpu to enter in the IPC_ISR section of the L4
interrupt at one time, there's no need to have two
_l4_intr_stack for each cpu */
/* Save A0, A2, A3, A4 so we can use those registers further*/
movi a0, _l4_intr_stack
s32i a2, a0, L4_INTR_A2_OFFSET
s32i a3, a0, L4_INTR_A3_OFFSET
s32i a4, a0, L4_INTR_A4_OFFSET
rsr a2, EXCSAVE_4
s32i a2, a0, L4_INTR_A0_OFFSET
/* disable nested iterrupts */
/* PS.EXCM is changed from 1 to 0 . It allows using usually exception handler instead of the Double exception handler. */
/* PS_UM = 1 */
movi a0, PS_INTLEVEL(5) | PS_UM
wsr a0, PS
rsync
/* restore PS will be done by rfi the end */
/*
* Reset isr interrupt flags
*/
/* This int is edge-triggered and needs clearing. */
movi a3, (1 << ETS_IPC_ISR_INUM)
wsr a3, INTCLEAR
/* get CORE_ID */
getcoreid a3
beqz a3, 1f
/* current cpu is 1 */
movi a3, SYSTEM_CPU_INTR_FROM_CPU_3_REG
movi a4, 0
s32i a4, a3, 0 /* clear intr */
j 2f
1:
/* current cpu is 0 */
movi a3, SYSTEM_CPU_INTR_FROM_CPU_2_REG
movi a4, 0
s32i a4, a3, 0 /* clear intr */
2:
/* set the start flag */
movi a0, esp_ipc_isr_start_fl
s32i a0, a0, 0
/* Call the esp_ipc_function(void* arg) */
movi a0, esp_ipc_func
l32i a0, a0, 0
movi a2, esp_ipc_func_arg
l32i a2, a2, 0
callx0 a0
/* Done. Restore registers and return. */
movi a0, _l4_intr_stack
l32i a2, a0, L4_INTR_A2_OFFSET
l32i a3, a0, L4_INTR_A3_OFFSET
l32i a4, a0, L4_INTR_A4_OFFSET
/* set the end flag */
movi a0, esp_ipc_isr_end_fl
s32i a0, a0, 0
/* restore a0 */
rsr a0, EXCSAVE_4
/* restores PS from EPS[4] and jumps to the address in EPC[4] */
rfi 4

View File

@@ -0,0 +1,28 @@
/*
* SPDX-FileCopyrightText: 2017-2021 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <xtensa/coreasm.h>
#include <xtensa/corebits.h>
#include <xtensa/config/system.h>
#include <xtensa/hal.h>
/* esp_ipc_isr_waiting_for_finish_cmd(void* finish_cmd)
*
* It should be called by the CALLX0 command from the handler of High-priority interrupt (4 lvl).
* Only these registers [a2, a3, a4] can be used here.
*/
.section .iram1, "ax"
.align 4
.global esp_ipc_isr_waiting_for_finish_cmd
.type esp_ipc_isr_waiting_for_finish_cmd, @function
// Args:
// a2 - finish_cmd (pointer on esp_ipc_isr_finish_cmd)
esp_ipc_isr_waiting_for_finish_cmd:
/* waiting for the finish command */
.check_finish_cmd:
l32i a3, a2, 0
beqz a3, .check_finish_cmd
ret