feat(riscv): implement coprocessors save area and FPU support

This commit mainly targets the ESP32-P4. It adds supports for coprocessors on
RISC-V based targets. The coprocessor save area, describing the used coprocessors
is stored at the end of the stack of each task (highest address) whereas each
coprocessor save area is allocated at the beginning of the task (lowest address).
The context of each coprocessor is saved lazily, by the task that want to use it.
This commit is contained in:
Omar Chebib
2023-09-06 19:17:24 +08:00
parent b0124b9b9f
commit a8b1475fe7
11 changed files with 707 additions and 92 deletions

View File

@@ -59,6 +59,18 @@
#include "soc/hp_system_reg.h"
#endif
#if ( SOC_CPU_COPROC_NUM > 0 )
#include "esp_private/panic_internal.h"
/* Since `portFORCE_INLINE` is not defined in `portmacro.h`, we must define it here since it is
* used by `atomic.h`. */
#define portFORCE_INLINE inline
#include "freertos/atomic.h"
#endif // ( SOC_CPU_COPROC_NUM > 0 )
_Static_assert(portBYTE_ALIGNMENT == 16, "portBYTE_ALIGNMENT must be set to 16");
#if CONFIG_ESP_SYSTEM_HW_STACK_GUARD
/**
@@ -82,6 +94,13 @@ volatile UBaseType_t port_uxCriticalNesting[portNUM_PROCESSORS] = {0};
volatile UBaseType_t port_uxOldInterruptState[portNUM_PROCESSORS] = {0};
volatile UBaseType_t xPortSwitchFlag[portNUM_PROCESSORS] = {0};
#if ( SOC_CPU_COPROC_NUM > 0 )
/* Current owner of the coprocessors for each core */
StaticTask_t* port_uxCoprocOwner[portNUM_PROCESSORS][SOC_CPU_COPROC_NUM];
#endif /* SOC_CPU_COPROC_NUM > 0 */
/*
*******************************************************************************
* Interrupt stack. The size of the interrupt stack is determined by the config
@@ -104,6 +123,10 @@ StackType_t *xIsrStackBottom[portNUM_PROCESSORS] = {0};
BaseType_t xPortStartScheduler(void)
{
#if ( SOC_CPU_COPROC_NUM > 0 )
/* Disable FPU so that the first task to use it will trigger an exception */
rv_utils_disable_fpu();
#endif
/* Initialize all kernel state tracking variables */
BaseType_t coreID = xPortGetCoreID();
port_uxInterruptNesting[coreID] = 0;
@@ -238,6 +261,58 @@ static void vPortTaskWrapper(TaskFunction_t pxCode, void *pvParameters)
}
#endif // CONFIG_FREERTOS_TASK_FUNCTION_WRAPPER
#if ( SOC_CPU_COPROC_NUM > 0 )
/**
* @brief Retrieve or allocate coprocessors save area from the given pxTopOfStack address.
*
* @param pxTopOfStack End of the stack address. This represents the highest address of a Task's stack.
*/
FORCE_INLINE_ATTR RvCoprocSaveArea* pxRetrieveCoprocSaveAreaFromStackPointer(UBaseType_t pxTopOfStack)
{
return (RvCoprocSaveArea*) STACKPTR_ALIGN_DOWN(16, pxTopOfStack - sizeof(RvCoprocSaveArea));
}
/**
* @brief Allocate and initialize the coprocessors save area on the stack
*
* @param[in] uxStackPointer Current stack pointer address
*
* @return Stack pointer that points to allocated and initialized the coprocessor save area
*/
FORCE_INLINE_ATTR UBaseType_t uxInitialiseCoprocSaveArea(UBaseType_t uxStackPointer)
{
RvCoprocSaveArea* sa = pxRetrieveCoprocSaveAreaFromStackPointer(uxStackPointer);
memset(sa, 0, sizeof(RvCoprocSaveArea));
return (UBaseType_t) sa;
}
static void vPortCleanUpCoprocArea(void *pvTCB)
{
StaticTask_t* task = (StaticTask_t*) pvTCB;
/* Get a pointer to the task's coprocessor save area */
const UBaseType_t bottomstack = (UBaseType_t) task->pxDummy8;
RvCoprocSaveArea* sa = pxRetrieveCoprocSaveAreaFromStackPointer(bottomstack);
/* If the Task used any coprocessor, check if it is the actual owner of any.
* If yes, reset the owner. */
if (sa->sa_enable != 0) {
/* Get the core the task is pinned on */
const BaseType_t coreID = task->xDummyCoreID;
for (int i = 0; i < SOC_CPU_COPROC_NUM; i++) {
StaticTask_t** owner = &port_uxCoprocOwner[coreID][i];
/* If the owner is `task`, replace it with NULL atomically */
Atomic_CompareAndSwapPointers_p32((void**) owner, NULL, task);
}
}
}
#endif /* SOC_CPU_COPROC_NUM > 0 */
/**
* @brief Initialize the task's starting interrupt stack frame
*
@@ -304,12 +379,38 @@ StackType_t *pxPortInitialiseStack(StackType_t *pxTopOfStack, TaskFunction_t pxC
- All stack areas are aligned to 16 byte boundary
- We use UBaseType_t for all of stack area initialization functions for more convenient pointer arithmetic
In the case of targets that have coprocessors, the stack is presented as follows:
HIGH ADDRESS
|---------------------------| <- pxTopOfStack on entry
| Coproc. Save Area | <- RvCoprocSaveArea
| ------------------------- |
| TLS Variables |
| ------------------------- | <- Start of useable stack
| Starting stack frame |
| ------------------------- | <- pxTopOfStack on return (which is the tasks current SP)
| | |
| | |
| V |
|---------------------------|
| Coproc. m Saved Context | <- Coprocessor context save area after allocation
|---------------------------|
| Coproc. n Saved Context | <- Another coprocessor context save area after allocation
----------------------------- <- Bottom of stack
LOW ADDRESS
Where m != n, n < SOC_CPU_COPROC_NUM, m < SOC_CPU_COPROC_NUM
*/
UBaseType_t uxStackPointer = (UBaseType_t)pxTopOfStack;
configASSERT((uxStackPointer & portBYTE_ALIGNMENT_MASK) == 0);
// IDF-7770: Support FPU context save area for P4
#if ( SOC_CPU_COPROC_NUM > 0 )
// Initialize the coprocessors save area
uxStackPointer = uxInitialiseCoprocSaveArea(uxStackPointer);
configASSERT((uxStackPointer & portBYTE_ALIGNMENT_MASK) == 0);
#endif // SOC_CPU_COPROC_NUM > 0
// Initialize GCC TLS area
uint32_t threadptr_reg_init;
@@ -647,8 +748,104 @@ void vPortTCBPreDeleteHook( void *pxTCB )
/* Call TLS pointers deletion callbacks */
vPortTLSPointersDelCb( pxTCB );
#endif /* CONFIG_FREERTOS_TLSP_DELETION_CALLBACKS */
#if ( SOC_CPU_COPROC_NUM > 0 )
/* Cleanup coproc save area */
vPortCleanUpCoprocArea( pxTCB );
#endif /* SOC_CPU_COPROC_NUM > 0 */
}
#if ( SOC_CPU_COPROC_NUM > 0 )
// ----------------------- Coprocessors --------------------------
/**
* @brief Pin the given task to the given core
*
* This function is called when a task uses a coprocessor. Since the coprocessors registers
* are saved lazily, as soon as a task starts using one, it must always be scheduled on the core
* it is currently executing on.
*/
void vPortTaskPinToCore(StaticTask_t* task, int coreid)
{
task->xDummyCoreID = coreid;
}
/**
* @brief Get coprocessor save area out of the given task. If the coprocessor area is not created,
* it shall be allocated.
*/
RvCoprocSaveArea* pxPortGetCoprocArea(StaticTask_t* task, int coproc)
{
const UBaseType_t bottomstack = (UBaseType_t) task->pxDummy8;
RvCoprocSaveArea* sa = pxRetrieveCoprocSaveAreaFromStackPointer(bottomstack);
/* Check if the allocator is NULL. Since we don't have a way to get the end of the stack
* during its initialization, we have to do this here */
if (sa->sa_allocator == 0) {
sa->sa_allocator = (UBaseType_t) task->pxDummy6;
}
/* Check if coprocessor area is allocated */
if (sa->sa_coprocs[coproc] == NULL) {
const uint32_t coproc_sa_sizes[] = {
RV_COPROC0_SIZE, RV_COPROC1_SIZE
};
/* Allocate the save area at end of the allocator */
UBaseType_t allocated = sa->sa_allocator + coproc_sa_sizes[coproc];
sa->sa_coprocs[coproc] = (void*) allocated;
/* Update the allocator address for next use */
sa->sa_allocator = allocated;
}
return sa;
}
/**
* @brief Update given coprocessor owner and get the address of former owner's save area.
*
* This function is called when the current running task has poked a coprocessor's register which
* was used by a previous task. We have to save the coprocessor context (registers) inside the
* current owner's save area and change the ownership. The coprocessor will be marked as used in
* the new owner's coprocessor save area.
*
* @param coreid Current core
* @param coproc Coprocessor to save context of
*
* @returns Coprocessor former owner's save area
*/
RvCoprocSaveArea* pxPortUpdateCoprocOwner(int coreid, int coproc, StaticTask_t* owner)
{
RvCoprocSaveArea* sa = NULL;
/* Address of coprocessor owner */
StaticTask_t** owner_addr = &port_uxCoprocOwner[ coreid ][ coproc ];
/* Atomically exchange former owner with the new one */
StaticTask_t* former = Atomic_SwapPointers_p32((void**) owner_addr, owner);
/* Get the save area of former owner */
if (former != NULL) {
sa = pxPortGetCoprocArea(former, coproc);
}
return sa;
}
/**
* @brief Aborts execution when a coprocessor was used in an ISR context
*/
void vPortCoprocUsedInISR(void* frame)
{
extern void xt_unhandled_exception(void*);
/* Since this function is called from an exception handler, the interrupts are disabled,
* as such, it is not possible to trigger another exception as would `abort` do.
* Simulate an abort without actually triggering an exception. */
g_panic_abort = true;
g_panic_abort_details = (char *) "ERROR: Coprocessors must not be used in ISRs!\n";
xt_unhandled_exception(frame);
}
#endif /* SOC_CPU_COPROC_NUM > 0 */
/* ---------------------------------------------- Misc Implementations -------------------------------------------------
*
* ------------------------------------------------------------------------------------------------------------------ */

View File

@@ -7,8 +7,9 @@
#include "portmacro.h"
#include "freertos/FreeRTOSConfig.h"
#include "soc/soc_caps.h"
#include "riscv/rvruntime-frames.h"
.extern pxCurrentTCBs
.extern pxCurrentTCBs
#if CONFIG_ESP_SYSTEM_HW_STACK_GUARD
#include "esp_private/hw_stack_guard.h"
@@ -22,8 +23,6 @@
.global xPortSwitchFlag
#if CONFIG_ESP_SYSTEM_HW_STACK_GUARD
.global xIsrStackBottom
.global port_offset_pxStack
.global port_offset_pxEndOfStack
.global esp_hw_stack_guard_monitor_stop
.global esp_hw_stack_guard_monitor_start
.global esp_hw_stack_guard_set_bounds
@@ -31,6 +30,210 @@
.section .text
#if SOC_CPU_COPROC_NUM > 0
#if SOC_CPU_HAS_FPU
/* Bit to set in mstatus to enable the FPU */
#define CSR_MSTATUS_FPU_ENABLE (1 << 13)
/* Bit to clear in mstatus to disable the FPU */
#define CSR_MSTATUS_FPU_DISABLE (3 << 13)
.macro save_fpu_regs frame=sp
fsw ft0, RV_FPU_FT0(\frame)
fsw ft1, RV_FPU_FT1(\frame)
fsw ft2, RV_FPU_FT2(\frame)
fsw ft3, RV_FPU_FT3(\frame)
fsw ft4, RV_FPU_FT4(\frame)
fsw ft5, RV_FPU_FT5(\frame)
fsw ft6, RV_FPU_FT6(\frame)
fsw ft7, RV_FPU_FT7(\frame)
fsw fs0, RV_FPU_FS0(\frame)
fsw fs1, RV_FPU_FS1(\frame)
fsw fa0, RV_FPU_FA0(\frame)
fsw fa1, RV_FPU_FA1(\frame)
fsw fa2, RV_FPU_FA2(\frame)
fsw fa3, RV_FPU_FA3(\frame)
fsw fa4, RV_FPU_FA4(\frame)
fsw fa5, RV_FPU_FA5(\frame)
fsw fa6, RV_FPU_FA6(\frame)
fsw fa7, RV_FPU_FA7(\frame)
fsw fs2, RV_FPU_FS2(\frame)
fsw fs3, RV_FPU_FS3(\frame)
fsw fs4, RV_FPU_FS4(\frame)
fsw fs5, RV_FPU_FS5(\frame)
fsw fs6, RV_FPU_FS6(\frame)
fsw fs7, RV_FPU_FS7(\frame)
fsw fs8, RV_FPU_FS8(\frame)
fsw fs9, RV_FPU_FS9(\frame)
fsw fs10, RV_FPU_FS10(\frame)
fsw fs11, RV_FPU_FS11(\frame)
fsw ft8, RV_FPU_FT8 (\frame)
fsw ft9, RV_FPU_FT9 (\frame)
fsw ft10, RV_FPU_FT10(\frame)
fsw ft11, RV_FPU_FT11(\frame)
.endm
.macro restore_fpu_regs frame=sp
flw ft0, RV_FPU_FT0(\frame)
flw ft1, RV_FPU_FT1(\frame)
flw ft2, RV_FPU_FT2(\frame)
flw ft3, RV_FPU_FT3(\frame)
flw ft4, RV_FPU_FT4(\frame)
flw ft5, RV_FPU_FT5(\frame)
flw ft6, RV_FPU_FT6(\frame)
flw ft7, RV_FPU_FT7(\frame)
flw fs0, RV_FPU_FS0(\frame)
flw fs1, RV_FPU_FS1(\frame)
flw fa0, RV_FPU_FA0(\frame)
flw fa1, RV_FPU_FA1(\frame)
flw fa2, RV_FPU_FA2(\frame)
flw fa3, RV_FPU_FA3(\frame)
flw fa4, RV_FPU_FA4(\frame)
flw fa5, RV_FPU_FA5(\frame)
flw fa6, RV_FPU_FA6(\frame)
flw fa7, RV_FPU_FA7(\frame)
flw fs2, RV_FPU_FS2(\frame)
flw fs3, RV_FPU_FS3(\frame)
flw fs4, RV_FPU_FS4(\frame)
flw fs5, RV_FPU_FS5(\frame)
flw fs6, RV_FPU_FS6(\frame)
flw fs7, RV_FPU_FS7(\frame)
flw fs8, RV_FPU_FS8(\frame)
flw fs9, RV_FPU_FS9(\frame)
flw fs10, RV_FPU_FS10(\frame)
flw fs11, RV_FPU_FS11(\frame)
flw ft8, RV_FPU_FT8(\frame)
flw ft9, RV_FPU_FT9(\frame)
flw ft10, RV_FPU_FT10(\frame)
flw ft11, RV_FPU_FT11(\frame)
.endm
.macro fpu_read_dirty_bit reg
csrr \reg, mstatus
srli \reg, \reg, 13
andi \reg, \reg, 1
.endm
.macro fpu_clear_dirty_bit reg
li \reg, 1 << 13
csrc mstatus, \reg
.endm
.macro fpu_enable reg
li \reg, CSR_MSTATUS_FPU_ENABLE
csrs mstatus, \reg
.endm
.macro fpu_disable reg
li \reg, CSR_MSTATUS_FPU_DISABLE
csrc mstatus, \reg
.endm
.global vPortTaskPinToCore
.global vPortCoprocUsedInISR
.global pxPortUpdateCoprocOwner
/**
* @brief Save the current FPU context in the FPU owner's save area
*
* @param sp Interuptee's RvExcFrame address
*
* Note: Since this routine is ONLY meant to be called from _panic_handler routine,
* it is possible to alter `s0-s11` registers
*/
.global rtos_save_fpu_coproc
.type rtos_save_fpu_coproc, @function
rtos_save_fpu_coproc:
/* If we are in an interrupt context, we have to abort. We don't allow using the FPU from ISR */
#if ( configNUM_CORES > 1 )
csrr a2, mhartid /* a2 = coreID */
slli a2, a2, 2 /* a2 = coreID * 4 */
la a1, port_uxInterruptNesting /* a1 = &port_uxInterruptNesting */
add a1, a1, a2 /* a1 = &port_uxInterruptNesting[coreID] */
lw a1, 0(a1) /* a1 = port_uxInterruptNesting[coreID] */
#else /* ( configNUM_CORES <= 1 ) */
lw a1, (port_uxInterruptNesting) /* a1 = port_uxInterruptNesting */
#endif /* ( configNUM_CORES > 1 ) */
/* SP still contains the RvExcFrame address */
mv a0, sp
bnez a1, vPortCoprocUsedInISR
/* Enable the FPU needed by the current task */
fpu_enable a1
mv s0, ra
call rtos_current_tcb
/* If the current TCB is NULL, the FPU is used during initialization, even before
* the scheduler started. Consider this a valid usage, the FPU will be disabled
* as soon as the scheduler is started anyway*/
beqz a0, rtos_save_fpu_coproc_norestore
mv s1, a0 /* s1 = pxCurrentTCBs */
/* Prepare parameters of pxPortUpdateCoprocOwner */
mv a2, a0
li a1, FPU_COPROC_IDX
csrr a0, mhartid
call pxPortUpdateCoprocOwner
/* If the save area is NULL, no need to save context */
beqz a0, rtos_save_fpu_coproc_nosave
/* Save the FPU context in the structure */
lw a0, RV_COPROC_SA+FPU_COPROC_IDX*4(a0) /* a0 = RvCoprocSaveArea->sa_coprocs[FPU_COPROC_IDX] */
save_fpu_regs a0
csrr a1, fcsr
sw a1, RV_FPU_FCSR(a0)
rtos_save_fpu_coproc_nosave:
/* Pin current task to current core */
mv a0, s1
csrr a1, mhartid
call vPortTaskPinToCore
/* Check if we have to restore a previous FPU context from the current TCB */
mv a0, s1
call pxPortGetCoprocArea
/* Get the enable flags from the coprocessor save area */
lw a1, RV_COPROC_ENABLE(a0)
/* To avoid having branches below, set the FPU enable flag now */
ori a2, a1, 1 << FPU_COPROC_IDX
sw a2, RV_COPROC_ENABLE(a0)
/* Check if the former FPU enable bit was set */
andi a2, a1, 1 << FPU_COPROC_IDX
beqz a2, rtos_save_fpu_coproc_norestore
/* FPU enable bit was set, restore the FPU context */
lw a0, RV_COPROC_SA+FPU_COPROC_IDX*4(a0) /* a0 = RvCoprocSaveArea->sa_coprocs[FPU_COPROC_IDX] */
restore_fpu_regs a0
lw a1, RV_FPU_FCSR(a0)
csrw fcsr, a1
rtos_save_fpu_coproc_norestore:
/* Return from routine via s0, instead of ra */
jr s0
.size rtos_save_fpu_coproc, .-rtos_save_fpu_coproc
#endif /* SOC_CPU_HAS_FPU */
#endif /* SOC_CPU_COPROC_NUM > 0 */
/**
* @brief Get current TCB on current core
*/
.type rtos_current_tcb, @function
rtos_current_tcb:
#if ( configNUM_CORES > 1 )
csrr a1, mhartid
slli a1, a1, 2
la a0, pxCurrentTCBs /* a0 = &pxCurrentTCBs */
add a0, a0, a1 /* a0 = &pxCurrentTCBs[coreID] */
lw a0, 0(a0) /* a0 = pxCurrentTCBs[coreID] */
#else
/* Recover the stack of next task */
lw a0, pxCurrentTCBs
#endif /* ( configNUM_CORES > 1 ) */
ret
.size, .-rtos_current_tcb
/**
* This function makes the RTOS aware about an ISR entering. It takes the
* current task stack pointer and places it into the pxCurrentTCBs.
@@ -65,6 +268,13 @@ rtos_int_enter:
/* If we reached here from another low-priority ISR, i.e, port_uxInterruptNesting[coreID] > 0, then skip stack pushing to TCB */
bnez a1, rtos_int_enter_end /* if (port_uxInterruptNesting[coreID] > 0) jump to rtos_int_enter_end */
#if SOC_CPU_COPROC_NUM > 0
/* Disable the FPU to forbid the ISR from using it. We don't need to re-enable it manually since the caller
* will restore `mstatus` before returning from interrupt. */
fpu_disable a0
#endif /* SOC_CPU_COPROC_NUM > 0 */
#if CONFIG_ESP_SYSTEM_HW_STACK_GUARD
/* esp_hw_stack_guard_monitor_stop(); pass the scratch registers */
ESP_HW_STACK_GUARD_MONITOR_STOP_CUR_CORE a0 a1
@@ -106,11 +316,19 @@ rtos_int_enter_end:
ret
/**
* Restore the stack pointer of the next task to run.
* @brief Restore the stack pointer of the next task to run.
*
* @param a0 Former mstatus
*
* @returns New mstatus (potentially with coprocessors disabled)
*/
.global rtos_int_exit
.type rtos_int_exit, @function
rtos_int_exit:
/* To speed up this routine and because this current routine is only meant to be called from the interrupt
* handler, let's use callee-saved registers instead of stack space. Registers `s3-s11` are not used by
* the caller */
mv s11, a0
#if ( configNUM_CORES > 1 )
csrr a1, mhartid /* a1 = coreID */
slli a1, a1, 2 /* a1 = a1 * 4 */
@@ -120,21 +338,21 @@ rtos_int_exit:
#else
lw a0, port_xSchedulerRunning /* a0 = port_xSchedulerRunning */
#endif /* ( configNUM_CORES > 1 ) */
beqz a0, rtos_int_exit_end /* if (port_uxSchewdulerRunning == 0) jump to rtos_int_exit_end */
beqz a0, rtos_int_exit_end /* if (port_uxSchedulerRunning == 0) jump to rtos_int_exit_end */
/* Update nesting interrupts counter */
la a0, port_uxInterruptNesting /* a0 = &port_uxInterruptNesting */
la a2, port_uxInterruptNesting /* a2 = &port_uxInterruptNesting */
#if ( configNUM_CORES > 1 )
add a0, a0, a1 /* a0 = &port_uxInterruptNesting[coreID] // a1 already contains coreID * 4 */
add a2, a2, a1 /* a2 = &port_uxInterruptNesting[coreID] // a1 already contains coreID * 4 */
#endif /* ( configNUM_CORES > 1 ) */
lw a2, 0(a0) /* a2 = port_uxInterruptNesting[coreID] */
lw a0, 0(a2) /* a0 = port_uxInterruptNesting[coreID] */
/* Already zero, protect against underflow */
beqz a2, isr_skip_decrement /* if (port_uxInterruptNesting[coreID] == 0) jump to isr_skip_decrement */
addi a2, a2, -1 /* a2 = a2 - 1 */
sw a2, 0(a0) /* port_uxInterruptNesting[coreID] = a2 */
beqz a0, isr_skip_decrement /* if (port_uxInterruptNesting[coreID] == 0) jump to isr_skip_decrement */
addi a0, a0, -1 /* a0 = a0 - 1 */
sw a0, 0(a2) /* port_uxInterruptNesting[coreID] = a0 */
/* May still have interrupts pending, skip section below and exit */
bnez a2, rtos_int_exit_end
bnez a0, rtos_int_exit_end
isr_skip_decrement:
/* If the CPU reached this label, a2 (uxInterruptNesting) is 0 for sure */
@@ -147,11 +365,27 @@ isr_skip_decrement:
lw a2, 0(a0) /* a2 = xPortSwitchFlag[coreID] */
beqz a2, no_switch /* if (xPortSwitchFlag[coreID] == 0) jump to no_switch */
/* Preserve return address and schedule next task. To speed up the process, instead of allocating stack
* space, let's use a callee-saved register: s0. Since the caller is not using it, let's use it. */
mv s0, ra
/* Preserve return address and schedule next task. To speed up the process, and because this current routine
* is only meant to be called from the interrupt handle, let's save some speed and space by using callee-saved
* registers instead of stack space. Registers `s3-s11` are not used by the caller */
mv s10, ra
#if ( SOC_CPU_COPROC_NUM > 0 )
/* In the cases where the newly scheduled task is different from the previously running one,
* we have to disable the coprocessor(s) to let them trigger an exception on first use.
* Else, if the same task is scheduled, do not change the coprocessor(s) state. */
call rtos_current_tcb
mv s9, a0
call vTaskSwitchContext
mv ra, s0
call rtos_current_tcb
beq a0, s9, rtos_int_exit_no_change
/* Disable the coprocessors in s11 register (former mstatus) */
li a0, ~CSR_MSTATUS_FPU_DISABLE
and s11, s11, a0
rtos_int_exit_no_change:
#else /* ( SOC_CPU_COPROC_NUM == 0 ) */
call vTaskSwitchContext
#endif /* ( SOC_CPU_COPROC_NUM > 0 ) */
mv ra, s10
/* Clears the switch pending flag */
la a0, xPortSwitchFlag /* a0 = &xPortSwitchFlag */
@@ -198,4 +432,5 @@ no_switch:
#endif /* CONFIG_ESP_SYSTEM_HW_STACK_GUARD */
rtos_int_exit_end:
mv a0, s11 /* a0 = new mstatus */
ret