Merge branch 'change/freertos_local_crit_section_macro_v5.2' into 'release/v5.2'

change(freertos/idf): Refactor thread safety convenience macros (v5.2)

See merge request espressif/esp-idf!26997
This commit is contained in:
Marius Vikhammer
2023-11-16 11:23:25 +08:00
5 changed files with 144 additions and 198 deletions

View File

@@ -65,74 +65,6 @@
#include <stdio.h>
#endif /* configUSE_STATS_FORMATTING_FUNCTIONS == 1 ) */
/* Some code sections require extra critical sections when building for SMP
* ( configNUMBER_OF_CORES > 1 ). */
#if ( configNUMBER_OF_CORES > 1 )
/* Macros that enter/exit a critical section only when building for SMP */
#define taskENTER_CRITICAL_SMP_ONLY( pxLock ) taskENTER_CRITICAL( pxLock )
#define taskEXIT_CRITICAL_SMP_ONLY( pxLock ) taskEXIT_CRITICAL( pxLock )
#define taskENTER_CRITICAL_ISR_SMP_ONLY( pxLock ) taskENTER_CRITICAL_ISR( pxLock )
#define taskEXIT_CRITICAL_ISR_SMP_ONLY( pxLock ) taskEXIT_CRITICAL_ISR( pxLock )
#define taskENTER_CRITICAL_SAFE_SMP_ONLY( pxLock ) prvTaskEnterCriticalSafeSMPOnly( pxLock )
#define taskEXIT_CRITICAL_SAFE_SMP_ONLY( pxLock ) prvTaskExitCriticalSafeSMPOnly( pxLock )
/* Macros that enter/exit a critical section only when building for single-core */
#define taskENTER_CRITICAL_SC_ONLY( pxLock ) taskENTER_CRITICAL( pxLock )
#define taskEXIT_CRITICAL_SC_ONLY( pxLock ) taskEXIT_CRITICAL( pxLock )
/* Macros that enable/disable interrupts only when building for SMP */
#define taskDISABLE_INTERRUPTS_ISR_SMP_ONLY() portSET_INTERRUPT_MASK_FROM_ISR()
#define taskEnable_INTERRUPTS_ISR_SMP_ONLY( uxStatus ) portCLEAR_INTERRUPT_MASK_FROM_ISR( uxStatus )
static inline __attribute__( ( always_inline ) )
void prvTaskEnterCriticalSafeSMPOnly( portMUX_TYPE * pxLock )
{
if( portCHECK_IF_IN_ISR() == pdFALSE )
{
taskENTER_CRITICAL( pxLock );
}
else
{
#ifdef __clang_analyzer__
/* Teach clang-tidy that ISR version macro can be different */
configASSERT( 1 );
#endif
taskENTER_CRITICAL_ISR( pxLock );
}
}
static inline __attribute__( ( always_inline ) )
void prvTaskExitCriticalSafeSMPOnly( portMUX_TYPE * pxLock )
{
if( portCHECK_IF_IN_ISR() == pdFALSE )
{
taskEXIT_CRITICAL( pxLock );
}
else
{
#ifdef __clang_analyzer__
/* Teach clang-tidy that ISR version macro can be different */
configASSERT( 1 );
#endif
taskEXIT_CRITICAL_ISR( pxLock );
}
}
#else /* configNUMBER_OF_CORES > 1 */
/* Macros that enter/exit a critical section only when building for SMP */
#define taskENTER_CRITICAL_SMP_ONLY( pxLock )
#define taskEXIT_CRITICAL_SMP_ONLY( pxLock )
#define taskENTER_CRITICAL_ISR_SMP_ONLY( pxLock )
#define taskEXIT_CRITICAL_ISR_SMP_ONLY( pxLock )
#define taskENTER_CRITICAL_SAFE_SMP_ONLY( pxLock )
#define taskEXIT_CRITICAL_SAFE_SMP_ONLY( pxLock )
/* Macros that enter/exit a critical section only when building for single-core */
#define taskENTER_CRITICAL_SC_ONLY( pxLock ) taskENTER_CRITICAL( pxLock )
#define taskEXIT_CRITICAL_SC_ONLY( pxLock ) taskEXIT_CRITICAL( pxLock )
/* Macros that enable/disable interrupts only when building for SMP */
#define taskDISABLE_INTERRUPTS_ISR_SMP_ONLY() ( ( UBaseType_t ) 0 )
#define taskEnable_INTERRUPTS_ISR_SMP_ONLY( uxStatus ) ( ( void ) uxStatus )
#endif /* configNUMBER_OF_CORES > 1 */
#if ( configUSE_PREEMPTION == 0 )
/* If the cooperative scheduler is being used then a yield should not be
@@ -1475,7 +1407,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB )
/* For SMP, we need to take the kernel lock here as we are about to
* access kernel data structures. */
taskENTER_CRITICAL_SMP_ONLY( &xKernelLock );
prvENTER_CRITICAL_SMP_ONLY( &xKernelLock );
{
/* Force a reschedule if it is the currently running task that has just
* been deleted. */
@@ -1493,7 +1425,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB )
}
}
/* Release the previously taken kernel lock. */
taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock );
prvEXIT_CRITICAL_SMP_ONLY( &xKernelLock );
}
#endif /* INCLUDE_vTaskDelete */
@@ -2447,7 +2379,7 @@ void vTaskStartScheduler( void )
/* For SMP, we need to take the kernel lock here as we are about to
* access kernel data structures. */
taskENTER_CRITICAL_SMP_ONLY( &xKernelLock );
prvENTER_CRITICAL_SMP_ONLY( &xKernelLock );
{
#if ( ( configUSE_NEWLIB_REENTRANT == 1 ) || ( configUSE_C_RUNTIME_TLS_SUPPORT == 1 ) )
{
@@ -2462,7 +2394,7 @@ void vTaskStartScheduler( void )
xTickCount = ( TickType_t ) configINITIAL_TICK_COUNT;
}
/* Release the previously taken kernel lock. */
taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock );
prvEXIT_CRITICAL_SMP_ONLY( &xKernelLock );
/* If configGENERATE_RUN_TIME_STATS is defined then the following
* macro must be defined to configure the timer/counter used to generate
@@ -2512,12 +2444,12 @@ void vTaskEndScheduler( void )
/* For SMP, we need to take the kernel lock here as we are about to access
* kernel data structures. */
taskENTER_CRITICAL_SMP_ONLY( &xKernelLock );
prvENTER_CRITICAL_SMP_ONLY( &xKernelLock );
{
xSchedulerRunning = pdFALSE;
}
/* Release the previously taken kernel lock. */
taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock );
prvEXIT_CRITICAL_SMP_ONLY( &xKernelLock );
vPortEndScheduler();
}
/*----------------------------------------------------------*/
@@ -2767,7 +2699,7 @@ TickType_t xTaskGetTickCountFromISR( void )
/* For SMP, we need to take the kernel lock here as we are about to access
* kernel data structures. */
taskENTER_CRITICAL_ISR_SMP_ONLY( &xKernelLock );
prvENTER_CRITICAL_ISR_SMP_ONLY( &xKernelLock );
{
uxSavedInterruptStatus = portTICK_TYPE_SET_INTERRUPT_MASK_FROM_ISR();
{
@@ -2776,7 +2708,7 @@ TickType_t xTaskGetTickCountFromISR( void )
portTICK_TYPE_CLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
}
/* Release the previously taken kernel lock. */
taskEXIT_CRITICAL_ISR_SMP_ONLY( &xKernelLock );
prvEXIT_CRITICAL_ISR_SMP_ONLY( &xKernelLock );
return xReturn;
}
@@ -3175,7 +3107,7 @@ BaseType_t xTaskCatchUpTicks( TickType_t xTicksToCatchUp )
* the event list too. Interrupts can touch the event list item,
* even though the scheduler is suspended, so a critical section
* is used. */
taskENTER_CRITICAL_SC_ONLY( &xKernelLock );
prvENTER_CRITICAL_SC_ONLY( &xKernelLock );
{
if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL )
{
@@ -3191,7 +3123,7 @@ BaseType_t xTaskCatchUpTicks( TickType_t xTicksToCatchUp )
mtCOVERAGE_TEST_MARKER();
}
}
taskEXIT_CRITICAL_SC_ONLY( &xKernelLock );
prvEXIT_CRITICAL_SC_ONLY( &xKernelLock );
/* Place the unblocked task into the appropriate ready list. */
prvAddTaskToReadyList( pxTCB );
@@ -3254,7 +3186,7 @@ BaseType_t xTaskIncrementTick( void )
/* For SMP, we need to take the kernel lock here as we are about to access
* kernel data structures (unlike single core which calls this function with
* interrupts disabled). */
taskENTER_CRITICAL_SAFE_SMP_ONLY( &xKernelLock );
prvENTER_CRITICAL_SAFE_SMP_ONLY( &xKernelLock );
{
if( uxSchedulerSuspended[ 0 ] == ( UBaseType_t ) pdFALSE )
{
@@ -3422,7 +3354,7 @@ BaseType_t xTaskIncrementTick( void )
/* Release the previously taken kernel lock as we have finished accessing
* the kernel data structures. */
taskEXIT_CRITICAL_SAFE_SMP_ONLY( &xKernelLock );
prvEXIT_CRITICAL_SAFE_SMP_ONLY( &xKernelLock );
#if ( configUSE_TICK_HOOK == 1 )
{
@@ -3645,7 +3577,7 @@ void vTaskSwitchContext( void )
/* For SMP, we need to take the kernel lock here as we are about to access
* kernel data structures (unlike single core which calls this function with
* either interrupts disabled or when the scheduler hasn't started yet). */
taskENTER_CRITICAL_SAFE_SMP_ONLY( &xKernelLock );
prvENTER_CRITICAL_SAFE_SMP_ONLY( &xKernelLock );
{
/* Get current core ID as we can no longer be preempted. */
const BaseType_t xCurCoreID = portGET_CORE_ID();
@@ -3730,7 +3662,7 @@ void vTaskSwitchContext( void )
/* Release the previously taken kernel lock as we have finished accessing
* the kernel data structures. */
taskEXIT_CRITICAL_SAFE_SMP_ONLY( &xKernelLock );
prvEXIT_CRITICAL_SAFE_SMP_ONLY( &xKernelLock );
}
/*-----------------------------------------------------------*/
@@ -3745,7 +3677,7 @@ void vTaskPlaceOnEventList( List_t * const pxEventList,
/* For SMP, we need to take the kernel lock here as we are about to access
* kernel data structures. */
taskENTER_CRITICAL_SMP_ONLY( &xKernelLock );
prvENTER_CRITICAL_SMP_ONLY( &xKernelLock );
{
/* Place the event list item of the TCB in the appropriate event list.
* This is placed in the list in priority order so the highest priority task
@@ -3763,7 +3695,7 @@ void vTaskPlaceOnEventList( List_t * const pxEventList,
prvAddCurrentTaskToDelayedList( xTicksToWait, pdTRUE );
}
/* Release the previously taken kernel lock. */
taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock );
prvEXIT_CRITICAL_SMP_ONLY( &xKernelLock );
}
/*-----------------------------------------------------------*/
@@ -3791,7 +3723,7 @@ void vTaskPlaceOnUnorderedEventList( List_t * pxEventList,
/* For SMP, we need to take the kernel lock here as we are about to access
* kernel data structures. */
taskENTER_CRITICAL_SMP_ONLY( &xKernelLock );
prvENTER_CRITICAL_SMP_ONLY( &xKernelLock );
{
/* Store the item value in the event list item. It is safe to access the
* event list item here as interrupts won't access the event list item of a
@@ -3808,7 +3740,7 @@ void vTaskPlaceOnUnorderedEventList( List_t * pxEventList,
prvAddCurrentTaskToDelayedList( xTicksToWait, pdTRUE );
}
/* Release the previously taken kernel lock. */
taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock );
prvEXIT_CRITICAL_SMP_ONLY( &xKernelLock );
}
/*-----------------------------------------------------------*/
@@ -3828,7 +3760,7 @@ void vTaskPlaceOnUnorderedEventList( List_t * pxEventList,
/* For SMP, we need to take the kernel lock here as we are about to access
* kernel data structures. */
taskENTER_CRITICAL_SMP_ONLY( &xKernelLock );
prvENTER_CRITICAL_SMP_ONLY( &xKernelLock );
{
/* Place the event list item of the TCB in the appropriate event list.
* In this case it is assume that this is the only task that is going to
@@ -4627,7 +4559,7 @@ static void prvCheckTasksWaitingTermination( void )
/* A critical section is required for SMP in case another core modifies
* the task simultaneously. */
taskENTER_CRITICAL_SMP_ONLY( &xKernelLock );
prvENTER_CRITICAL_SMP_ONLY( &xKernelLock );
{
pxTaskStatus->xHandle = ( TaskHandle_t ) pxTCB;
pxTaskStatus->pcTaskName = ( const char * ) &( pxTCB->pcTaskName[ 0 ] );
@@ -4729,7 +4661,7 @@ static void prvCheckTasksWaitingTermination( void )
}
}
/* Exit the previously entered critical section. */
taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock );
prvEXIT_CRITICAL_SMP_ONLY( &xKernelLock );
}
#endif /* configUSE_TRACE_FACILITY */
@@ -4951,11 +4883,11 @@ static void prvResetNextTaskUnblockTime( void )
* For single-core a critical section is not required as this is not
* called from an interrupt and the current TCB will always be the same
* for any individual execution thread. */
uxSavedInterruptStatus = taskDISABLE_INTERRUPTS_ISR_SMP_ONLY();
uxSavedInterruptStatus = prvDISABLE_INTERRUPTS_ISR_SMP_ONLY();
{
xReturn = pxCurrentTCBs[ portGET_CORE_ID() ];
}
taskEnable_INTERRUPTS_ISR_SMP_ONLY( uxSavedInterruptStatus );
prvENABLE_INTERRUPTS_ISR_SMP_ONLY( uxSavedInterruptStatus );
return xReturn;
}
@@ -4979,7 +4911,7 @@ static void prvResetNextTaskUnblockTime( void )
*
* We use the ISR versions of interrupt macros as this function could be
* called inside critical sections. */
uxSavedInterruptStatus = taskDISABLE_INTERRUPTS_ISR_SMP_ONLY();
uxSavedInterruptStatus = prvDISABLE_INTERRUPTS_ISR_SMP_ONLY();
{
if( xSchedulerRunning == pdFALSE )
{
@@ -4997,7 +4929,7 @@ static void prvResetNextTaskUnblockTime( void )
}
}
}
taskEnable_INTERRUPTS_ISR_SMP_ONLY( uxSavedInterruptStatus );
prvENABLE_INTERRUPTS_ISR_SMP_ONLY( uxSavedInterruptStatus );
return xReturn;
}
@@ -5014,7 +4946,7 @@ static void prvResetNextTaskUnblockTime( void )
/* For SMP, we need to take the kernel lock here as we are about to
* access kernel data structures. */
taskENTER_CRITICAL_SMP_ONLY( &xKernelLock );
prvENTER_CRITICAL_SMP_ONLY( &xKernelLock );
{
/* Get current core ID as we can no longer be preempted. */
const BaseType_t xCurCoreID = portGET_CORE_ID();
@@ -5097,7 +5029,7 @@ static void prvResetNextTaskUnblockTime( void )
}
}
/* Release the previously taken kernel lock. */
taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock );
prvEXIT_CRITICAL_SMP_ONLY( &xKernelLock );
return xReturn;
}
@@ -5114,7 +5046,7 @@ static void prvResetNextTaskUnblockTime( void )
/* For SMP, we need to take the kernel lock here as we are about to
* access kernel data structures. */
taskENTER_CRITICAL_SMP_ONLY( &xKernelLock );
prvENTER_CRITICAL_SMP_ONLY( &xKernelLock );
{
if( pxMutexHolder != NULL )
{
@@ -5184,7 +5116,7 @@ static void prvResetNextTaskUnblockTime( void )
}
}
/* Release the previously taken kernel lock. */
taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock );
prvEXIT_CRITICAL_SMP_ONLY( &xKernelLock );
return xReturn;
}
@@ -5203,7 +5135,7 @@ static void prvResetNextTaskUnblockTime( void )
/* For SMP, we need to take the kernel lock here as we are about to
* access kernel data structures. */
taskENTER_CRITICAL_SMP_ONLY( &xKernelLock );
prvENTER_CRITICAL_SMP_ONLY( &xKernelLock );
{
if( pxMutexHolder != NULL )
{
@@ -5299,7 +5231,7 @@ static void prvResetNextTaskUnblockTime( void )
}
}
/* Release the previously taken kernel lock. */
taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock );
prvEXIT_CRITICAL_SMP_ONLY( &xKernelLock );
}
#endif /* configUSE_MUTEXES */
@@ -5634,7 +5566,7 @@ TickType_t uxTaskResetEventItemValue( void )
/* For SMP, we need to take the kernel lock here to ensure nothing else
* modifies the task's event item value simultaneously. */
taskENTER_CRITICAL_SMP_ONLY( &xKernelLock );
prvENTER_CRITICAL_SMP_ONLY( &xKernelLock );
{
/* Get current core ID as we can no longer be preempted. */
const BaseType_t xCurCoreID = portGET_CORE_ID();
@@ -5645,7 +5577,7 @@ TickType_t uxTaskResetEventItemValue( void )
* queues and semaphores. */
listSET_LIST_ITEM_VALUE( &( pxCurrentTCBs[ xCurCoreID ]->xEventListItem ), ( ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) pxCurrentTCBs[ xCurCoreID ]->uxPriority ) ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
}
taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock );
prvEXIT_CRITICAL_SMP_ONLY( &xKernelLock );
/* Release the previously taken kernel lock. */
return uxReturn;
@@ -5660,7 +5592,7 @@ TickType_t uxTaskResetEventItemValue( void )
/* For SMP, we need to take the kernel lock here as we are about to
* access kernel data structures. */
taskENTER_CRITICAL_SMP_ONLY( &xKernelLock );
prvENTER_CRITICAL_SMP_ONLY( &xKernelLock );
{
/* Get current core ID as we can no longer be preempted. */
const BaseType_t xCurCoreID = portGET_CORE_ID();
@@ -5675,7 +5607,7 @@ TickType_t uxTaskResetEventItemValue( void )
xReturn = pxCurrentTCBs[ xCurCoreID ];
}
/* Release the previously taken kernel lock. */
taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock );
prvEXIT_CRITICAL_SMP_ONLY( &xKernelLock );
return xReturn;
}