freertos(IDF): Sync cosmetic differences to v10.4.3

This commit synchronize the cosmetic differences of IDF FreeRTOS to upstream
Vanilla v10.4.3. Comsetic differences include:

- Out of date doxygen API descriptions
  - Misnamed parameters
  - Missing examples
  - Fixed/added missing @cond/@code directives
- Extra/missing comments/lines
- Code formatting (uncrustify)

Other changes:

- Some ESP_PLATFORM directives were also removed
- xTaskIncrementTickOtherCores() now depends on "configNUM_CORES > 1"
- Updated some multi-core dummy variable names in FreeRTOS.h
This commit is contained in:
Darian Leung
2022-11-08 17:35:06 +08:00
parent ae3383ddc5
commit fce4ee0b80
8 changed files with 485 additions and 320 deletions

View File

@@ -3360,93 +3360,91 @@ BaseType_t xTaskIncrementTick( void )
return xSwitchRequired;
}
#ifdef ESP_PLATFORM
#if ( configNUM_CORES > 1 )
BaseType_t xTaskIncrementTickOtherCores( void )
#if ( configNUM_CORES > 1 )
BaseType_t xTaskIncrementTickOtherCores( void )
{
/* Minor optimization. This function can never switch cores mid
* execution */
BaseType_t xCoreID = xPortGetCoreID();
BaseType_t xSwitchRequired = pdFALSE;
/* This function should never be called by Core 0. */
configASSERT( xCoreID != 0 );
/* Called by the portable layer each time a tick interrupt occurs.
* Increments the tick then checks to see if the new tick value will cause any
* tasks to be unblocked. */
traceTASK_INCREMENT_TICK( xTickCount );
if( uxSchedulerSuspended[ xCoreID ] == ( UBaseType_t ) pdFALSE )
{
/* Minor optimization. This function can never switch cores mid
* execution */
BaseType_t xCoreID = xPortGetCoreID();
BaseType_t xSwitchRequired = pdFALSE;
/* We need a critical section here as we are about to access kernel data
* structures:
* - Other cores could be accessing them simultaneously
* - Unlike other ports, we call xTaskIncrementTick() without disabling
* nested interrupts, which in turn is disabled by the critical
* section. */
taskENTER_CRITICAL_ISR( &xKernelLock );
/* This function should never be called by Core 0. */
configASSERT( xCoreID != 0 );
/* Called by the portable layer each time a tick interrupt occurs.
* Increments the tick then checks to see if the new tick value will cause any
* tasks to be unblocked. */
traceTASK_INCREMENT_TICK( xTickCount );
if( uxSchedulerSuspended[ xCoreID ] == ( UBaseType_t ) pdFALSE )
{
/* We need a critical section here as we are about to access kernel data
* structures:
* - Other cores could be accessing them simultaneously
* - Unlike other ports, we call xTaskIncrementTick() without disabling
* nested interrupts, which in turn is disabled by the critical
* section. */
taskENTER_CRITICAL_ISR( &xKernelLock );
/* A task being unblocked cannot cause an immediate context switch
* if preemption is turned off. */
#if ( configUSE_PREEMPTION == 1 )
{
/* Check if core 0 calling xTaskIncrementTick() has
* unblocked a task that can be run. */
if( uxTopReadyPriority > pxCurrentTCB[ xCoreID ]->uxPriority )
{
xSwitchRequired = pdTRUE;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
#endif /* if ( configUSE_PREEMPTION == 1 ) */
/* Tasks of equal priority to the currently running task will share
* processing time (time slice) if preemption is on, and the application
* writer has not explicitly turned time slicing off. */
#if ( ( configUSE_PREEMPTION == 1 ) && ( configUSE_TIME_SLICING == 1 ) )
{
if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ pxCurrentTCB[ xCoreID ]->uxPriority ] ) ) > ( UBaseType_t ) 1 )
{
xSwitchRequired = pdTRUE;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
#endif /* ( ( configUSE_PREEMPTION == 1 ) && ( configUSE_TIME_SLICING == 1 ) ) */
/* Exit the critical section as we have finished accessing the kernel data structures. */
taskEXIT_CRITICAL_ISR( &xKernelLock );
#if ( configUSE_PREEMPTION == 1 )
{
if( xYieldPending[ xCoreID ] != pdFALSE )
{
xSwitchRequired = pdTRUE;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
#endif /* configUSE_PREEMPTION */
}
#if ( configUSE_TICK_HOOK == 1 )
/* A task being unblocked cannot cause an immediate context switch
* if preemption is turned off. */
#if ( configUSE_PREEMPTION == 1 )
{
vApplicationTickHook();
/* Check if core 0 calling xTaskIncrementTick() has
* unblocked a task that can be run. */
if( uxTopReadyPriority > pxCurrentTCB[ xCoreID ]->uxPriority )
{
xSwitchRequired = pdTRUE;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
#endif
#endif /* if ( configUSE_PREEMPTION == 1 ) */
return xSwitchRequired;
/* Tasks of equal priority to the currently running task will share
* processing time (time slice) if preemption is on, and the application
* writer has not explicitly turned time slicing off. */
#if ( ( configUSE_PREEMPTION == 1 ) && ( configUSE_TIME_SLICING == 1 ) )
{
if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ pxCurrentTCB[ xCoreID ]->uxPriority ] ) ) > ( UBaseType_t ) 1 )
{
xSwitchRequired = pdTRUE;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
#endif /* ( ( configUSE_PREEMPTION == 1 ) && ( configUSE_TIME_SLICING == 1 ) ) */
/* Exit the critical section as we have finished accessing the kernel data structures. */
taskEXIT_CRITICAL_ISR( &xKernelLock );
#if ( configUSE_PREEMPTION == 1 )
{
if( xYieldPending[ xCoreID ] != pdFALSE )
{
xSwitchRequired = pdTRUE;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
#endif /* configUSE_PREEMPTION */
}
#endif /* ( configNUM_CORES > 1 ) */
#endif // ESP_PLATFORM
#if ( configUSE_TICK_HOOK == 1 )
{
vApplicationTickHook();
}
#endif
return xSwitchRequired;
}
#endif /* ( configNUM_CORES > 1 ) */
/*-----------------------------------------------------------*/