freertos: Migrate kernel tests to test app

This commit renames the "integration" tests to "kernel" test and migrates them
to the test app as a component.
This commit is contained in:
Darian Leung
2022-10-15 01:19:15 +08:00
parent 60edaa4152
commit df4bfeee5b
24 changed files with 35 additions and 4 deletions

View File

@@ -4,13 +4,18 @@ cmake_minimum_required(VERSION 3.16)
# FreeRTOS tests of different types (e.g., kernel, port, performance etc.)are
# split into different directores in the test app's root directory. Each test
# type is treated as separate component
set(test_types)
set(test_types
"kernel")
list(APPEND EXTRA_COMPONENT_DIRS
${test_types}) # Add each test type as a component
${test_types} # Add each test type as a component
"$ENV{IDF_PATH}/tools/unit-test-app/components") # For test_utils component
# "Trim" the build. Include the minimal set of components, main, and anything it depends on.
set(COMPONENTS main)
#"Trim" the build. Include the minimal set of components, main, and anything it depends on.
# Note: This is commented out for now due to pthread depending on vPortCleanUpTCB provided by "test_freertos_hooks.c".
# pthread is no used in FreeRTOS unit tests, but is pulled in by esp_system due to another dependency.
# Todo: Resolve this by either moving the "test_freertos_hooks.c" out, or solving the component dependencies.
#set(COMPONENTS main)
include($ENV{IDF_PATH}/tools/cmake/project.cmake)
project(freertos_test)

View File

@@ -0,0 +1,26 @@
# Register all of the "kernel" tests as a component
# For refactored FreeRTOS unit tests, we need to support #include "xxx.h" of FreeRTOS headers
idf_component_get_property(FREERTOS_ORIG_INCLUDE_PATH freertos ORIG_INCLUDE_PATH)
set(src_dirs
"." # For freertos_test_utils.c
"event_groups"
"queue"
"stream_buffer"
"tasks"
"timers")
set(priv_include_dirs
"." # For portTestMacro.h
"${FREERTOS_ORIG_INCLUDE_PATH}") # FreeRTOS headers via`#include "xxx.h"`
# In order for the cases defined by `TEST_CASE` in "kernel" to be linked into
# the final elf, the component can be registered as WHOLE_ARCHIVE
idf_component_register(SRC_DIRS ${src_dirs}
PRIV_INCLUDE_DIRS ${priv_include_dirs}
PRIV_REQUIRES test_utils esp_timer driver
WHOLE_ARCHIVE)
# Todo: Fix no-format errors
target_compile_options(${COMPONENT_LIB} PRIVATE "-Wno-format")

View File

@@ -0,0 +1,73 @@
# FreeRTOS Tests Guidelines
The FreeRTOS tests are currently being refactored/reorganized with the goal of being upstreamed. This document describes the set of guidelines to which the tests are refactored/reorganized according to.
## Unity Port
These test cases assume that the FreeRTOS port has also ported the [Unity Test Framework](https://github.com/ThrowTheSwitch/Unity). Because each FreeRTOS test case will require the scheduler to be started, the way that each test case is invoked will differ form regular Unity ports.
Regular Unity ports will assume that the `main()` function invokes each test using the `RUN_TEST()` macro. However, these test cases assume the following about the Unity port:
- Each test case is invoked from a `UnityTask` instead of `main()`. Thus each test case is run from the context of the `UnityTask`.
- The `UnityTask` is created using `xTaskCreate...()` (and pinned to core 0 if SMP) from the port's startup (i.e., `main()`)
- The port's startup (i.e., `main()`) should also start the scheduler using `vTaskStartScheduler()`
- Note that this is similar to the startup of most FreeRTOS Demos.
- Each test case is defined using the `TEST_CASE(name, ...)` macro. The `VA_ARGS` of the macro allows each port to specify a set of extra arguments (such as test case labels/tags) to be used into their CI pipelines.
- A `portTestMacro.h` must be provided by each port. This header will contain
- Some constants used by test cases such as default task stack sizes (e.g., `configTEST_DEFAULT_STACK_SIZE`)
- Some port implementation specific functions/macros required by test cases such as getting current system time (e.g., `portTEST_GET_TIME()`).
## Test Organization
- Test cases are grouped into sub-directories roughly matching the header files of FreeRTOS (e.g., task, queue, semaphore, event groups etc).
- Each source file should ideally test a particular behavior (e.g., priority scheduling, queue send, scheduler suspend). This should usually result in one test case per behavior, thus one test case per source file
- Some test case behaviors may depend on configuration (e.g., priority scheduling in single core vs SMP). In such cases
- If the affect is small, use an `#if (config... == 1)` to wrap the affected areas
- If the affect is large, write a separate test case in a separate source file and wrap the entire test case with `#if (config... == 1)`.
## Test Case Template
Each test case should have the following:
- Test case description describing
- Purpose of the test case
- Test case procedure
- Excepted outcome/behavior of the test case
- The test case code wrapped in its required `config...` macros
- The expected outcomes should be tested using the `TEST_ASSERT_...()` macros provided by unity
```c
// In test_priority_scheduling.c
/*
Test Priority Scheduling (Single Core)
Purpose:
- Test that the single-core scheduler always schedules the highest priority ready task
Procedure:
- Raise the unityTask priority to (configMAX_PRIORITIES - 1)
- unityTask creates the following lower priority tasks
- task_A (configMAX_PRIORITIES - 2)
- task_B (configMAX_PRIORITIES - 3)
- UnityTask blocks for a short period of time to allow task_A to run
- Clean up and restore unityTask's original priority
Expected:
- task_A should run after unityTask blocks
- task_B should never have run
*/
#if ( configNUM_CORES == 1 )
static BaseType_t test_static_var = 0;
static void test_static_func(void)
{
...
}
TEST_CASE("Tasks: Priority scheduling single core", "[freertos]")
{
...
}
#endif /* configNUM_CORES == 1 */
```

View File

@@ -0,0 +1,213 @@
/*
* SPDX-FileCopyrightText: 2022 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <stdio.h>
#include "freertos/FreeRTOS.h"
#include "freertos/task.h"
#include "freertos/semphr.h"
#include "freertos/queue.h"
#include "freertos/event_groups.h"
#include "driver/gptimer.h"
#include "unity.h"
#include "unity_test_utils.h"
#define NUM_TASKS 8
#define ALL_TASK_BITS ((1 << NUM_TASKS) - 1)
#define BIT_CALL(task) (1 << (task))
#define BIT_RESPONSE(task) (1 << ((task) + NUM_TASKS))
#define ALL_CALL_BITS (ALL_TASK_BITS)
#define ALL_RESPONSE_BITS (ALL_TASK_BITS << NUM_TASKS)
static const int COUNT = 1000;
static EventGroupHandle_t eg;
static SemaphoreHandle_t done_sem;
static void task_event_group_call_response(void *param)
{
int task_num = (int)param;
printf("Started %d\n", task_num);
for (int i = 0; i < COUNT; i++) {
/* Wait until the common "call" bit is set, starts off all tasks
(clear on return) */
TEST_ASSERT( xEventGroupWaitBits(eg, BIT_CALL(task_num), true, false, portMAX_DELAY) );
/* Set our individual "response" bit */
xEventGroupSetBits(eg, BIT_RESPONSE(task_num));
}
printf("Task %d done\n", task_num);
xSemaphoreGive(done_sem);
// Wait to be deleted
vTaskSuspend(NULL);
}
TEST_CASE("FreeRTOS Event Groups", "[freertos]")
{
eg = xEventGroupCreate();
done_sem = xSemaphoreCreateCounting(NUM_TASKS, 0);
TaskHandle_t task_handles[NUM_TASKS];
/* Note: task_event_group_call_response all have higher priority than this task, so on this core
they will always preempt this task.
This is important because we need to know all tasks have blocked on their own BIT_CALL(task_num) each time we
signal it, or they get out of sync.
*/
for (int c = 0; c < NUM_TASKS; c++) {
xTaskCreatePinnedToCore(task_event_group_call_response, "tsk_call_resp", 4096, (void *)c, configMAX_PRIORITIES - 1, &task_handles[c], c % portNUM_PROCESSORS);
}
/* Tasks all start instantly, but this task will resume running at the same time as the higher priority tasks on the
other processor may still be setting up, so allow time for them to also block on BIT_CALL()... */
vTaskDelay(10);
for (int i = 0; i < COUNT; i++) {
/* signal all the "CALL" bits of each task */
xEventGroupSetBits(eg, ALL_CALL_BITS);
/* Wait until all tasks have set their respective response bits */
TEST_ASSERT_EQUAL_HEX16(ALL_RESPONSE_BITS, xEventGroupWaitBits(eg, ALL_RESPONSE_BITS, true, true, portMAX_DELAY));
}
/* Ensure all tasks have suspend themselves */
for (int c = 0; c < NUM_TASKS; c++) {
TEST_ASSERT( xSemaphoreTake(done_sem, 100 / portTICK_PERIOD_MS) );
}
for (int c = 0; c < NUM_TASKS; c++) {
unity_utils_task_delete(task_handles[c]);
}
vSemaphoreDelete(done_sem);
vEventGroupDelete(eg);
}
static void task_test_sync(void *param)
{
int task_num = (int)param;
printf("Started %d\n", task_num);
for (int i = 0; i < COUNT; i++) {
/* set our bit, and wait on all tasks to set their bits */
xEventGroupSync(eg, BIT_CALL(task_num), ALL_CALL_BITS, portMAX_DELAY);
/* clear our bit */
xEventGroupClearBits(eg, BIT_CALL(task_num));
}
int after_done = xEventGroupSetBits(eg, BIT_RESPONSE(task_num));
printf("Done %d = 0x%08x\n", task_num, after_done);
xSemaphoreGive(done_sem);
vTaskDelete(NULL);
}
TEST_CASE("FreeRTOS Event Group Sync", "[freertos]")
{
eg = xEventGroupCreate();
done_sem = xSemaphoreCreateCounting(NUM_TASKS, 0);
for (int c = 0; c < NUM_TASKS; c++) {
xTaskCreatePinnedToCore(task_test_sync, "task_test_sync", 4096, (void *)c, configMAX_PRIORITIES - 1, NULL, c % portNUM_PROCESSORS);
}
for (int c = 0; c < NUM_TASKS; c++) {
printf("Waiting on %d (0x%08x)\n", c, BIT_RESPONSE(c));
TEST_ASSERT( xEventGroupWaitBits(eg, BIT_RESPONSE(c), false, false, portMAX_DELAY) );
}
/* Ensure all tasks cleaned up correctly */
for (int c = 0; c < NUM_TASKS; c++) {
TEST_ASSERT( xSemaphoreTake(done_sem, 100 / portTICK_PERIOD_MS) );
}
vSemaphoreDelete(done_sem);
vEventGroupDelete(eg);
}
/*-----------------Test case for event group trace facilities-----------------*/
#ifdef CONFIG_FREERTOS_USE_TRACE_FACILITY
/*
* Test event group Trace Facility functions such as
* xEventGroupClearBitsFromISR(), xEventGroupSetBitsFromISR()
*/
//Use a timer to trigger an ISr
#define BITS 0xAA
static gptimer_handle_t gptimer;
static bool test_set_bits;
static bool test_clear_bits;
static bool on_timer_alarm_cb(gptimer_handle_t timer, const gptimer_alarm_event_data_t *edata, void *user_ctx)
{
portBASE_TYPE task_woken = pdFALSE;
gptimer_stop(timer);
if (test_set_bits) {
xEventGroupSetBitsFromISR(eg, BITS, &task_woken);
test_set_bits = false;
} else if (test_clear_bits) {
xEventGroupClearBitsFromISR(eg, BITS);
xSemaphoreGiveFromISR(done_sem, &task_woken);
test_clear_bits = false;
}
//Switch context if necessary
return task_woken == pdTRUE;
}
TEST_CASE("FreeRTOS Event Group ISR", "[freertos]")
{
done_sem = xSemaphoreCreateBinary();
eg = xEventGroupCreate();
test_set_bits = false;
test_clear_bits = false;
//Setup timer for ISR
gptimer_config_t config = {
.clk_src = GPTIMER_CLK_SRC_DEFAULT,
.direction = GPTIMER_COUNT_UP,
.resolution_hz = 1000000,
};
TEST_ESP_OK(gptimer_new_timer(&config, &gptimer));
gptimer_alarm_config_t alarm_config = {
.reload_count = 0,
.alarm_count = 200000,
};
gptimer_event_callbacks_t cbs = {
.on_alarm = on_timer_alarm_cb,
};
TEST_ESP_OK(gptimer_register_event_callbacks(gptimer, &cbs, NULL));
TEST_ESP_OK(gptimer_enable(gptimer));
TEST_ESP_OK(gptimer_set_alarm_action(gptimer, &alarm_config));
//Test set bits
printf("test set bits\r\n");
test_set_bits = true;
TEST_ESP_OK(gptimer_start(gptimer));
TEST_ASSERT_EQUAL(BITS, xEventGroupWaitBits(eg, BITS, pdFALSE, pdTRUE, portMAX_DELAY)); //Let ISR set event group bits
//Test clear bits
printf("test clear bits\r\n");
xEventGroupSetBits(eg, BITS); //Set bits to be cleared
test_clear_bits = true;
TEST_ESP_OK(gptimer_set_raw_count(gptimer, 0));
TEST_ESP_OK(gptimer_start(gptimer));
xSemaphoreTake(done_sem, portMAX_DELAY); //Wait for ISR to clear bits
vTaskDelay(10); //Event group clear bits runs via daemon task, delay so daemon can run
TEST_ASSERT_EQUAL(0, xEventGroupGetBits(eg)); //Check bits are cleared
//Clean up
TEST_ESP_OK(gptimer_disable(gptimer));
TEST_ESP_OK(gptimer_del_timer(gptimer));
vEventGroupDelete(eg);
vSemaphoreDelete(done_sem);
vTaskDelay(10); //Give time for idle task to clear up deleted tasks
}
#endif //CONFIG_FREERTOS_USE_TRACE_FACILITY

View File

@@ -0,0 +1,72 @@
/*
* SPDX-FileCopyrightText: 2022 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#include "FreeRTOS.h"
#include "semphr.h"
#include "freertos_test_utils.h"
#if ( configNUM_CORES > 1 )
typedef struct {
const TestFunction_t pxTestCode;
void * const pvTestCodeArg;
const SemaphoreHandle_t xTaskDoneSem;
} TestArgs_t;
static void test_func_task( void * pvParameters )
{
TestArgs_t * pxTestArgs = ( TestArgs_t * ) pvParameters;
/* Call the test function */
pxTestArgs->pxTestCode( pxTestArgs->pvTestCodeArg );
/* Indicate completion to the creation task and wait to be deleted. */
xSemaphoreGive( pxTestArgs->xTaskDoneSem );
vTaskSuspend( NULL );
}
void vTestOnAllCores( TestFunction_t pxTestCode, void * pvTestCodeArg, uint32_t ulStackDepth, UBaseType_t uxPriority )
{
SemaphoreHandle_t xTaskDoneSem = xSemaphoreCreateCounting( configNUM_CORES, 0 );
TaskHandle_t xTaskHandles[ configNUM_CORES ];
TestArgs_t xTestArgs = {
.pxTestCode = pxTestCode,
.pvTestCodeArg = pvTestCodeArg,
.xTaskDoneSem = xTaskDoneSem,
};
/* Create a separate task on each core to run the test function */
for( BaseType_t xCoreID = 0; xCoreID < configNUM_CORES; xCoreID++ ) {
#if ( CONFIG_FREERTOS_SMP == 1 )
xTaskCreateAffinitySet( test_func_task,
"task",
ulStackDepth,
( void * ) &xTestArgs,
uxPriority,
( UBaseType_t ) ( 1 << xCoreID ),
&( xTaskHandles[ xCoreID ] ) );
#else
xTaskCreatePinnedToCore( test_func_task,
"task",
ulStackDepth,
( void * ) &xTestArgs,
uxPriority,
&( xTaskHandles[ xCoreID ] ),
xCoreID );
#endif
}
/* Wait for each tasks to complete test */
for( BaseType_t xCoreID = 0; xCoreID < configNUM_CORES; xCoreID++ ) {
xSemaphoreTake( xTaskDoneSem, portMAX_DELAY );
}
/* Cleanup */
for( BaseType_t xCoreID = 0; xCoreID < configNUM_CORES; xCoreID++ ) {
vTaskDelete( xTaskHandles[ xCoreID ] );
}
vSemaphoreDelete( xTaskDoneSem );
}
#endif /* ( configNUM_CORES > 1 ) */

View File

@@ -0,0 +1,32 @@
/*
* SPDX-FileCopyrightText: 2022 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <stdint.h>
#include "FreeRTOS.h"
#if ( configNUM_CORES > 1 )
/**
* @brief Prototype for test function.
*
* A test function can be passed to vTestOnAllCores() which will run the test function from a task on each core.
*/
typedef void (* TestFunction_t)( void * );
/**
* @brief Run a test function on each core
*
* This function will internally create a task pinned to each core, where each task will call the provided test
* function. This function will block until all cores finish executing the test function.
*
* @param pxTestCode Test function
* @param pvTestCodeArg Argument provided to test function
* @param ulStackDepth Stack depth of the created tasks
* @param uxPriority Priority of the created tasks
*/
void vTestOnAllCores( TestFunction_t pxTestCode, void * pvTestCodeArg, uint32_t ulStackDepth, UBaseType_t uxPriority );
#endif /* ( configNUM_CORES > 1 ) */

View File

@@ -0,0 +1,16 @@
/*
* SPDX-FileCopyrightText: 2022 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#include "test_utils.h"
#define configTEST_DEFAULT_STACK_SIZE 4096
#define configTEST_UNITY_TASK_PRIORITY UNITY_FREERTOS_PRIORITY
#define portTEST_REF_CLOCK_TYPE uint32_t
#define portTEST_REF_CLOCK_INIT() ref_clock_init()
#define portTEST_REF_CLOCK_DEINIT() ref_clock_deinit()
#define portTEST_REF_CLOCK_GET_TIME() ((uint32_t) ref_clock_get())
#define portTEST_TICKS_TO_REF_CLOCK(ticks) ((ticks) * (1000000/configTICK_RATE_HZ))

View File

@@ -0,0 +1,109 @@
/*
* SPDX-FileCopyrightText: 2022 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
/*
* Test FreeRTOS debug functions and utilities.
* - Queue registry functions vQueueAddToRegistry(), vQueueUnregisterQueue(),
* and pcQueueGetName(backported)
*
*
*/
#include <stdio.h>
#include "freertos/FreeRTOS.h"
#include "freertos/task.h"
#include "freertos/semphr.h"
#include "freertos/queue.h"
#include "unity.h"
#include "test_utils.h"
#if (CONFIG_FREERTOS_QUEUE_REGISTRY_SIZE > 0)
#define NO_OF_QUEUES_PER_CORE ((int)((CONFIG_FREERTOS_QUEUE_REGISTRY_SIZE - 3)/portNUM_PROCESSORS)) //Save space for some preallocated tasks
#define NO_OF_QUEUES_TOTAL (NO_OF_QUEUES_PER_CORE * portNUM_PROCESSORS)
#define QUEUE_NAME_MAX_LENGTH 30
static SemaphoreHandle_t start_sem[portNUM_PROCESSORS];
static SemaphoreHandle_t done_sem = NULL;
static char *names[NO_OF_QUEUES_TOTAL];
static QueueHandle_t handles[NO_OF_QUEUES_TOTAL];
void test_queue_registry_task(void *arg)
{
int core = xPortGetCoreID();
int offset = core * NO_OF_QUEUES_PER_CORE;
//Create queues and accompanying queue names
for(int i = 0; i < NO_OF_QUEUES_PER_CORE; i++){
handles[i + offset] = xQueueCreate(1,1); //Create queues
names[i + offset] = calloc(QUEUE_NAME_MAX_LENGTH, sizeof(char));
sprintf(names[i + offset], "Queue%d%d", core, i);
}
xSemaphoreTake(start_sem[core], portMAX_DELAY); //Wait for start vQueueAddToRegistry()
for(int i = 0; i < NO_OF_QUEUES_PER_CORE; i++){
vQueueAddToRegistry(handles[i + offset] , names[i + offset]); //Register queues to queue registry
}
xSemaphoreGive(done_sem); //Signal that vQueueAddToRegistry() has completed
vTaskDelay(1);
xSemaphoreTake(start_sem[core], portMAX_DELAY); //Wait to start vQueueUnregisterQueue()
for(int i = 0; i < NO_OF_QUEUES_PER_CORE; i++){
vQueueDelete(handles[i + offset]); //Internally calls vQueueUnregisterQueue
}
xSemaphoreGive(done_sem); //Signal done
vTaskDelete(NULL); //Delete self
}
TEST_CASE("Test FreeRTOS Queue Registry", "[freertos]")
{
//Create synchronization semaphores and tasks to test queue registry
done_sem = xSemaphoreCreateCounting(portNUM_PROCESSORS, 0);
for(int i = 0; i < portNUM_PROCESSORS; i++){
start_sem[i] = xSemaphoreCreateBinary();
xTaskCreatePinnedToCore(test_queue_registry_task, "testing task", 4096, NULL, UNITY_FREERTOS_PRIORITY+1, NULL, i);
}
portDISABLE_INTERRUPTS();
for(int i = 0; i < portNUM_PROCESSORS; i++){
xSemaphoreGive(start_sem[i]); //Trigger start
}
portENABLE_INTERRUPTS();
for(int i = 0; i < portNUM_PROCESSORS; i++){
xSemaphoreTake(done_sem, portMAX_DELAY); //Wait for tasks to complete vQueueAddToRegistry
}
for(int i = 0; i < NO_OF_QUEUES_TOTAL; i++){
const char *addr = pcQueueGetName(handles[i]);
TEST_ASSERT(addr == names[i]) //Check vQueueAddToRegistry was successful
}
portDISABLE_INTERRUPTS();
for(int i = 0; i < portNUM_PROCESSORS; i++){
xSemaphoreGive(start_sem[i]); //Trigger start
}
portENABLE_INTERRUPTS();
for(int i = 0; i < portNUM_PROCESSORS; i++){
xSemaphoreTake(done_sem, portMAX_DELAY); //Wait for tasks to complete vQueueUnregisterQueue
}
for(int i = 0; i < NO_OF_QUEUES_TOTAL; i++){
const char *addr = pcQueueGetName(handles[i]);
TEST_ASSERT(addr == NULL) //Check vQueueUnregisterQueue was successful
handles[i] = NULL;
}
//Cleanup
for(int i = 0; i < NO_OF_QUEUES_TOTAL; i++){
free(names[i]);
names[i] = NULL;
}
for(int i = 0; i < portNUM_PROCESSORS; i++){
vSemaphoreDelete(start_sem[i]);
start_sem[i] = NULL;
}
vSemaphoreDelete(done_sem);
done_sem = NULL;
}
#endif //(CONFIG_FREERTOS_QUEUE_REGISTRY_SIZE > 0)

View File

@@ -0,0 +1,187 @@
/*
* SPDX-FileCopyrightText: 2022 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <stdio.h>
#include <stdlib.h>
#include "freertos/FreeRTOS.h"
#include "freertos/task.h"
#include "freertos/queue.h"
#include "freertos/semphr.h"
#include "freertos/idf_additions.h"
#include "unity.h"
#include "test_utils.h"
#define QUEUE_LEN 4
static void allocate_resources(int num_queues, int queue_len, QueueHandle_t *queue_list_ret, QueueSetHandle_t *queue_set_ret)
{
// Create queue set
*queue_set_ret = xQueueCreateSet(num_queues * queue_len);
TEST_ASSERT_NOT_EQUAL(NULL, *queue_set_ret);
// Create queues and add them to the queue set
for (int i = 0; i < num_queues; i++) {
queue_list_ret[i] = xQueueCreate(queue_len, sizeof(BaseType_t));
TEST_ASSERT_NOT_EQUAL(NULL, queue_list_ret[i]);
TEST_ASSERT_EQUAL(pdPASS, xQueueAddToSet(queue_list_ret[i], *queue_set_ret));
}
}
static void free_resources(int num_queues, QueueHandle_t *queue_list, QueueSetHandle_t queue_set)
{
// Remove queues form queue set and delete the queues
for (int i = 0; i < num_queues; i++) {
TEST_ASSERT_EQUAL(pdPASS, xQueueRemoveFromSet(queue_list[i], queue_set));
vQueueDelete(queue_list[i]);
}
vQueueDelete(queue_set);
}
/*
Test queue sets basic
Purpose:
- Test that queue set works as expected
Procedure:
- Create NUM_QUEUES queues and add them to the same queue set
- Fill each queue sequentially with QUEUE_LEN items
Expected:
- Each call to xQueueSend() should generate a member in the queue set
- The order of the members should match the order in which xQueueSend() was called
- The item sent by the xQueueSend() is correct when read
*/
#define NUM_QUEUES 5
TEST_CASE("Test Queue sets", "[freertos]")
{
// Create queues and queue set
QueueHandle_t queues[NUM_QUEUES];
QueueSetHandle_t queue_set;
allocate_resources(NUM_QUEUES, QUEUE_LEN, queues, &queue_set);
// Fill each queue sequentially with QUEUE_LEN items
for (int i = 0; i < NUM_QUEUES; i++) {
for (int j = 0; j < QUEUE_LEN; j++) {
BaseType_t item = j;
TEST_ASSERT_EQUAL(pdTRUE, xQueueSend(queues[i], &item, 0));
}
}
for (int i = 0; i < NUM_QUEUES; i++) {
for (int j = 0; j < QUEUE_LEN; j++) {
// Check the queue set member
QueueSetMemberHandle_t member = xQueueSelectFromSet(queue_set, 0);
TEST_ASSERT_EQUAL(queues[i], member);
// Check the queue's items
BaseType_t item;
TEST_ASSERT_EQUAL(pdTRUE, xQueueReceive(member, &item, 0));
TEST_ASSERT_EQUAL(j, item);
}
}
// Check that there are no more members
TEST_ASSERT_EQUAL(NULL, xQueueSelectFromSet(queue_set, 0));
// Cleanup queues and queue set
free_resources(NUM_QUEUES, queues, queue_set);
}
#ifndef CONFIG_FREERTOS_UNICORE
/*
Test queue set SMP thread safety
Purpose:
- Test that queue set works when being used from different cores simultaneously
Procedure:
- Create a queue for each core and add them to the same queue set
- Create a task on each core to send QUEUE_LEN items to their assigned queue
- Synchronize the tasks so that the start sending at the same time
Expected:
- Each call to xQueueSend() should generate a member in the queue set
- The item sent by the xQueueSend() is correct when read
*/
static volatile bool start_other_cores;
static SemaphoreHandle_t done_sem = NULL;
static void send_func(void *arg)
{
QueueHandle_t queue = (QueueHandle_t)arg;
BaseType_t core_id = xPortGetCoreID();
if (core_id == 0) {
// We are core 0. Trigger the other cores to start
start_other_cores = true;
} else {
// Wait to be started by main core
while (!start_other_cores) {
;
}
}
// Fill the queue assigned to the current core
for (int i = 0; i < QUEUE_LEN; i++) {
TEST_ASSERT_EQUAL(pdTRUE, xQueueSend(queue, &core_id, 0));
}
if (core_id != 0) {
// Indicate completion to core 0 and self delete
xSemaphoreGive(done_sem);
vTaskDelete(NULL);
}
}
TEST_CASE("Test queue sets multi-core", "[freertos]")
{
// Create done semaphore
done_sem = xSemaphoreCreateCounting(portNUM_PROCESSORS - 1, 0);
TEST_ASSERT_NOT_EQUAL(NULL, done_sem);
// Create queues and queue set
QueueHandle_t queues[portNUM_PROCESSORS];
QueueSetHandle_t queue_set;
allocate_resources(portNUM_PROCESSORS, QUEUE_LEN, queues, &queue_set);
// Create tasks of the same priority for all cores except for core 0
for (int i = 1; i < portNUM_PROCESSORS; i++) {
TEST_ASSERT_EQUAL(pdTRUE, xTaskCreatePinnedToCore(send_func, "send", 2048, (void *)queues[i], UNITY_FREERTOS_PRIORITY, NULL, i));
}
// Core 0 calls send_func as well triggering the simultaneous sends from all cores
send_func((void *)queues[0]);
// Wait for all other cores to be done
for (int i = 1; i < portNUM_PROCESSORS; i++) {
xSemaphoreTake(done_sem, portMAX_DELAY);
}
// Read queues from the queue set, then read an item from the queue
uint32_t queues_check_count[portNUM_PROCESSORS] = {0};
QueueSetMemberHandle_t member = xQueueSelectFromSet(queue_set, 0);
while (member != NULL) {
// Read the core ID from the queue, check that core ID is sane
BaseType_t core_id;
TEST_ASSERT_EQUAL(pdTRUE, xQueueReceive(member, &core_id, 0));
TEST_ASSERT_LESS_THAN(portNUM_PROCESSORS, core_id);
queues_check_count[core_id]++;
// Get next member
member = xQueueSelectFromSet(queue_set, 0);
}
// Check that all items from all queues have been read
for (int i = 0; i < portNUM_PROCESSORS; i++) {
TEST_ASSERT_EQUAL(QUEUE_LEN, queues_check_count[i]);
}
// Cleanup queues and queue set
free_resources(portNUM_PROCESSORS, queues, queue_set);
// Cleanup done sem
vSemaphoreDelete(done_sem);
done_sem = NULL;
}
#endif // CONFIG_FREERTOS_UNICORE

View File

@@ -0,0 +1,109 @@
/*
* SPDX-FileCopyrightText: 2022 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "freertos/FreeRTOS.h"
#include "freertos/task.h"
#include "freertos/semphr.h"
#include "freertos/stream_buffer.h"
#include "freertos/message_buffer.h"
#include "unity.h"
#include "test_utils.h"
typedef struct {
StreamBufferHandle_t sb;
SemaphoreHandle_t end_test;
bool send_fail;
bool receive_fail;
bool produce_isr;
}test_context;
static void producer_task(void *arg)
{
test_context *tc = arg;
uint8_t produced = 0;
printf("Starting sender task... \n");
while(produced < 100) {
if(!tc->produce_isr) {
BaseType_t result = xStreamBufferSend(tc->sb, &produced, 1, 0);
if(!result) {
tc->send_fail = true;
xSemaphoreGive(tc->end_test);
vTaskDelete(NULL);
} else {
produced++;
}
}
vTaskDelay(1);
}
tc->send_fail = false;
vTaskDelete(NULL);
}
static void receiver_task(void *arg)
{
test_context *tc = arg;
uint8_t expected_consumed = 0;
printf("Starting receiver task... \n");
for(;;){
uint8_t read_byte = 0xFF;
uint32_t result = xStreamBufferReceive(tc->sb, &read_byte, 1, 1000);
if((read_byte != expected_consumed) || !result) {
tc->receive_fail = true;
xSemaphoreGive(tc->end_test);
vTaskDelete(NULL);
} else {
expected_consumed++;
if(expected_consumed == 99) {
break;
}
}
}
tc->receive_fail = false;
xSemaphoreGive(tc->end_test);
vTaskDelete(NULL);
}
TEST_CASE("Send-receive stream buffer test", "[freertos]")
{
BaseType_t result;
test_context tc;
tc.sb = xStreamBufferCreate(128, 1);
tc.end_test = xSemaphoreCreateBinary();
TEST_ASSERT(tc.sb);
TEST_ASSERT(tc.end_test);
tc.send_fail = false;
tc.receive_fail = false;
tc.produce_isr = false;
result = xTaskCreatePinnedToCore(producer_task, "sender", 4096, &tc, UNITY_FREERTOS_PRIORITY + 2, NULL, 0);
TEST_ASSERT(result == pdTRUE);
result = xTaskCreatePinnedToCore(receiver_task, "receiver", 4096, &tc, UNITY_FREERTOS_PRIORITY + 1, NULL, 1);
TEST_ASSERT(result == pdTRUE);
result = xSemaphoreTake(tc.end_test, 2000);
TEST_ASSERT(result == pdTRUE);
vTaskDelay(1);
TEST_ASSERT(tc.send_fail == false);
TEST_ASSERT(tc.receive_fail == false);
vStreamBufferDelete(tc.sb);
vSemaphoreDelete(tc.end_test);
}

View File

@@ -0,0 +1,92 @@
/*
* SPDX-FileCopyrightText: 2022 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#include "freertos/FreeRTOS.h"
#include "freertos/task.h"
#include "unity.h"
#include "test_utils.h"
/*
Test eTaskGetState()
Purpose:
- Test that eTaskGetState() returns the correct state for a particular task
Procedure:
- Create tasks in every state (and repeat for each core)
- Note: eDeleted is not tested due to needing to control when the idle tasks run
- Call eTaskGetState() on each created task
Expected:
- eTaskGetState() should return the correct state for each created task
*/
static void blocked_task(void *arg)
{
vTaskDelay(portMAX_DELAY-1);
// Shouldn't need to self delete, but added for extra safety
vTaskDelete(NULL);
}
static void suspended_task(void *arg)
{
vTaskSuspend(NULL);
// Shouldn't need to self delete, but added for extra safety
vTaskDelete(NULL);
}
static void loop_task(void *arg)
{
// Short delay to allow other created tasks to run
vTaskDelay(2);
while (1) {
;
}
}
TEST_CASE("Test eTaskGetState", "[freertos]")
{
TaskHandle_t blocked_tasks[portNUM_PROCESSORS];
TaskHandle_t suspended_tasks[portNUM_PROCESSORS];
TaskHandle_t ready_tasks[portNUM_PROCESSORS];
TaskHandle_t running_tasks[portNUM_PROCESSORS];
// Create tasks of each state on each core
for (int i = 0; i < portNUM_PROCESSORS; i++) {
TEST_ASSERT_EQUAL(pdPASS, xTaskCreatePinnedToCore(blocked_task, "blkd", configMINIMAL_STACK_SIZE, NULL, UNITY_FREERTOS_PRIORITY - 1, &blocked_tasks[i], i));
TEST_ASSERT_EQUAL(pdPASS, xTaskCreatePinnedToCore(suspended_task, "susp", configMINIMAL_STACK_SIZE, NULL, UNITY_FREERTOS_PRIORITY - 1, &suspended_tasks[i], i));
TEST_ASSERT_EQUAL(pdPASS, xTaskCreatePinnedToCore(loop_task, "rdy", configMINIMAL_STACK_SIZE, NULL, UNITY_FREERTOS_PRIORITY - 1, &ready_tasks[i], i));
if (i == UNITY_FREERTOS_CPU) {
running_tasks[i] = xTaskGetCurrentTaskHandle();
} else {
xTaskCreatePinnedToCore(loop_task, "run", configMINIMAL_STACK_SIZE, NULL, UNITY_FREERTOS_PRIORITY, &running_tasks[i], i);
}
}
// Short delay to allow created tasks to run
vTaskDelay(10);
// Check the state of the created tasks
for (int i = 0; i < portNUM_PROCESSORS; i++) {
TEST_ASSERT_EQUAL(eBlocked, eTaskGetState(blocked_tasks[i]));
TEST_ASSERT_EQUAL(eSuspended, eTaskGetState(suspended_tasks[i]));
TEST_ASSERT_EQUAL(eReady, eTaskGetState(ready_tasks[i]));
TEST_ASSERT_EQUAL(eRunning, eTaskGetState(running_tasks[i]));
}
// Clean up created tasks
for (int i = 0; i < portNUM_PROCESSORS; i++) {
vTaskDelete(blocked_tasks[i]);
vTaskDelete(suspended_tasks[i]);
vTaskDelete(ready_tasks[i]);
if (i != UNITY_FREERTOS_CPU) {
vTaskDelete(running_tasks[i]);
}
}
// Short delay to allow task memory to be cleaned
vTaskDelay(10);
}

View File

@@ -0,0 +1,169 @@
/*
* SPDX-FileCopyrightText: 2021-2022 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <stdbool.h>
#include "freertos/FreeRTOS.h"
#include "freertos/task.h"
#include "freertos/queue.h"
#include "esp_rom_sys.h"
#include "esp_cpu.h"
#include "unity.h"
#include "test_utils.h"
/*
Test Best Effort Round Robin Scheduling:
The following test case tests the "Best Effort Round Robin Scheduling" that fixes the skipping behavior found in older
versions of the ESP-IDF SMP FreeRTOS (see docs for more details about Best Effort Round Robin Scheduling).
This test...
- Only runs under dual core configuration
- Will disable the tick interrupts of both cores
Test flow as follows:
1. Stop preemption on core 0 by raising the priority of the unity task
2. Stop preemption on core 0 by creating a blocker task
3. Disable tick interrupts on both cores
4. Create N spin tasks on each core, each with a sequential task_code
5. Unblock those spin tasks in a sequential order
6. Lower priority of unity task and stop the blocker task so that spin tasks are run
7. Each time a spin task is run (i.e., an iteration) it will send its task code to a queue
8. Spin tasks will clean themselves up
9. The queue should contain the task codes of the spin tasks in the order they were started in, thus showing that round
robin schedules the tasks in a sequential order.
*/
#if !defined(CONFIG_FREERTOS_UNICORE) && (defined(CONFIG_FREERTOS_CORETIMER_0) || defined(CONFIG_FREERTOS_CORETIMER_1))
#define SPIN_TASK_PRIO (CONFIG_UNITY_FREERTOS_PRIORITY + 1)
#define SPIN_TASK_NUM_ITER 3
#define TASK_STACK_SIZE 1024
#define NUM_PINNED_SPIN_TASK_PER_CORE 3
#if defined(CONFIG_FREERTOS_CORETIMER_0)
#define TICK_INTR_IDX 6
#else //defined(CONFIG_FREERTOS_CORETIMER_1)
#define TICK_INTR_IDX 15
#endif
static QueueHandle_t core0_run_order_queue;
static QueueHandle_t core1_run_order_queue;
static uint32_t total_iter_count[configNUM_CORES] = {0};
static void spin_task(void *arg)
{
uint32_t task_code = (uint32_t)arg;
QueueHandle_t run_order_queue = ((task_code >> 4) == 0) ? core0_run_order_queue : core1_run_order_queue;
//Wait to be started
ulTaskNotifyTake(pdTRUE, portMAX_DELAY);
for (int i = 0; i < SPIN_TASK_NUM_ITER; i++) {
xQueueSend(run_order_queue, &task_code, 0);
//No need for critical sections as tick interrupt is disabled
total_iter_count[xPortGetCoreID()]++;
taskYIELD();
}
//Last iteration of the last spin task on this core. Reenable this core's tick interrupt
if (total_iter_count[xPortGetCoreID()] == (NUM_PINNED_SPIN_TASK_PER_CORE * SPIN_TASK_NUM_ITER)) {
esp_cpu_intr_enable(1 <<TICK_INTR_IDX);
}
vTaskDelete(NULL);
}
static void blocker_task(void *arg)
{
volatile bool *exit_loop = (volatile bool *)arg;
//Disable tick interrupts on core 1 the duration of the test
taskDISABLE_INTERRUPTS();
esp_cpu_intr_disable(1 << TICK_INTR_IDX);
taskENABLE_INTERRUPTS();
while (!*exit_loop) {
;
}
//Wait to be resumed
vTaskSuspend(NULL);
//Reenable tick interrupt on core 1
taskDISABLE_INTERRUPTS();
esp_cpu_intr_enable(1 << TICK_INTR_IDX);
taskENABLE_INTERRUPTS();
vTaskDelete(NULL);
}
TEST_CASE("Test FreeRTOS Scheduling Round Robin", "[freertos]")
{
core0_run_order_queue = xQueueCreate(SPIN_TASK_NUM_ITER * NUM_PINNED_SPIN_TASK_PER_CORE, sizeof(uint32_t));
core1_run_order_queue = xQueueCreate(SPIN_TASK_NUM_ITER * NUM_PINNED_SPIN_TASK_PER_CORE, sizeof(uint32_t));
/* Increase priority of unity task so that the spin tasks don't preempt us
during task creation. */
vTaskPrioritySet(NULL, SPIN_TASK_PRIO + 1);
/* Create a task on core 1 of the same priority to block core 1 */
volatile bool suspend_blocker = false;
TaskHandle_t blocker_task_hdl;
xTaskCreatePinnedToCore(blocker_task, "blk", TASK_STACK_SIZE, (void *)&suspend_blocker, SPIN_TASK_PRIO + 1, &blocker_task_hdl, 1);
//Disable tick interrupts on core 0 the duration of the test
taskDISABLE_INTERRUPTS();
esp_cpu_intr_disable(1 << TICK_INTR_IDX);
taskENABLE_INTERRUPTS();
TaskHandle_t core0_task_hdls[NUM_PINNED_SPIN_TASK_PER_CORE];
TaskHandle_t core1_task_hdls[NUM_PINNED_SPIN_TASK_PER_CORE];
for (int i = 0; i < NUM_PINNED_SPIN_TASK_PER_CORE; i++) {
//Create a spin task pinned to core 0
xTaskCreatePinnedToCore(spin_task, "spin", TASK_STACK_SIZE, (void *)(0x00 + i), SPIN_TASK_PRIO, &core0_task_hdls[i], 0);
//Create a spin task pinned to core 1
xTaskCreatePinnedToCore(spin_task, "spin", TASK_STACK_SIZE, (void *)(0x10 + i), SPIN_TASK_PRIO, &core1_task_hdls[i], 1);
}
/* Start the tasks in a particular order. This order should be reflected in
in the round robin scheduling on each core */
for (int i = 0; i < NUM_PINNED_SPIN_TASK_PER_CORE; i++) {
//Start a spin task on core 0
xTaskNotifyGive(core0_task_hdls[i]);
//Start a spin task on core 1
xTaskNotifyGive(core1_task_hdls[i]);
}
//Lower priority of this task and stop blocker task to allow the spin tasks to be scheduled
suspend_blocker = true;
vTaskPrioritySet(NULL, UNITY_FREERTOS_PRIORITY);
//Give a enough delay to allow all iterations of the round robin to occur
esp_rom_delay_us(10000);
for (int i = 0; i < SPIN_TASK_NUM_ITER; i++) {
for (int j = 0; j < NUM_PINNED_SPIN_TASK_PER_CORE; j++) {
uint32_t core0_entry;
uint32_t core1_entry;
TEST_ASSERT_EQUAL(pdTRUE, xQueueReceive(core0_run_order_queue, &core0_entry, 0));
TEST_ASSERT_EQUAL(pdTRUE, xQueueReceive(core1_run_order_queue, &core1_entry, 0));
TEST_ASSERT_EQUAL(0x00 + j, core0_entry);
TEST_ASSERT_EQUAL(0x10 + j, core1_entry);
}
}
//Resume the blocker task for cleanup
vTaskResume(blocker_task_hdl);
//Reenable tick interrupt on core 0
taskDISABLE_INTERRUPTS();
esp_cpu_intr_enable(1 << TICK_INTR_IDX);
taskENABLE_INTERRUPTS();
vTaskDelay(10); //Wait for blocker task to clean up
//Clean up queues
vQueueDelete(core0_run_order_queue);
vQueueDelete(core1_run_order_queue);
}
#endif //!defined(CONFIG_FREERTOS_UNICORE) && (efined(CONFIG_FREERTOS_CORETIMER_0) || defined(CONFIG_FREERTOS_CORETIMER_1))

View File

@@ -0,0 +1,159 @@
/*
* SPDX-FileCopyrightText: 2022 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
/*
* Test backported deletion behavior by creating tasks of various affinities and
* check if the task memory is freed immediately under the correct conditions.
*
* The behavior of vTaskDelete() results in the immediate freeing of task memory
* and the immediate execution of deletion callbacks for tasks which are not
* running, provided they are not pinned to the other core (due to FPU cleanup
* requirements).
*
* If the condition is not met, freeing of task memory and execution of
* deletion callbacks will still be carried out by the Idle Task.
*/
#include <stdio.h>
#include "freertos/FreeRTOS.h"
#include "freertos/task.h"
#include "freertos/semphr.h"
#include "esp_heap_caps.h"
#include "unity.h"
#include "test_utils.h"
#include "esp_rom_sys.h"
#define NO_OF_TSKS 3
#define DELAY_TICKS 2
/* Caps of all memory which is allocated from when a task is created */
#define HEAP_CAPS (portTcbMemoryCaps | portStackMemoryCaps)
#define DELAY_US_ITERATIONS 1000
static void tsk_self_del(void *param)
{
vTaskDelete(NULL); //Deleting self means deleting currently running task
}
static void tsk_extern_del(void *param)
{
vTaskDelay(portMAX_DELAY); //Await external deletion
}
static void tsk_self_del_us_delay(void *param)
{
uint32_t delay = (uint32_t)param;
esp_rom_delay_us(delay);
vTaskDelete(NULL);
}
TEST_CASE("FreeRTOS Delete Tasks", "[freertos]")
{
/* -------------- Test vTaskDelete() on currently running tasks ----------------*/
uint32_t before_count = uxTaskGetNumberOfTasks();
uint32_t before_heap = heap_caps_get_free_size(HEAP_CAPS);
for(int i = 0; i < portNUM_PROCESSORS; i++){
for(int j = 0; j < NO_OF_TSKS; j++){
TEST_ASSERT_EQUAL(pdTRUE, xTaskCreatePinnedToCore(tsk_self_del, "tsk_self", 1024, NULL, configMAX_PRIORITIES - 1, NULL, i));
}
}
vTaskDelay(DELAY_TICKS); //Minimal delay to see if Idle task cleans up all tasks awaiting deletion in a single tick
TEST_ASSERT_EQUAL(before_count, uxTaskGetNumberOfTasks());
TEST_ASSERT_EQUAL(before_heap, heap_caps_get_free_size(HEAP_CAPS));
/* ------------- Test vTaskDelete() on not currently running tasks ------------ */
TaskHandle_t handles[NO_OF_TSKS];
before_heap = heap_caps_get_free_size(HEAP_CAPS);
//Create task pinned to the same core that will not run during task deletion
for(int j = 0 ; j < NO_OF_TSKS; j++){
TEST_ASSERT_EQUAL(pdTRUE, xTaskCreatePinnedToCore(tsk_extern_del, "tsk_extern", 4096, NULL, configMAX_PRIORITIES - 1, &handles[j], xPortGetCoreID()));
}
TEST_ASSERT_NOT_EQUAL(before_heap, heap_caps_get_free_size(HEAP_CAPS)); //Check tasks have been created
//Delete the tasks, memory should be freed immediately
for(int j = 0; j < NO_OF_TSKS; j++){
vTaskDelete(handles[j]);
}
TEST_ASSERT_EQUAL(before_heap, heap_caps_get_free_size(HEAP_CAPS));
/* Test self deleting no affinity task is not removed by idle task of other core before context switch */
for(int i = 0; i < DELAY_US_ITERATIONS; i+= 10){
vTaskDelay(1); //Sync to next tick interrupt
xTaskCreatePinnedToCore(tsk_self_del_us_delay, "delay", 1024, (void *)i, UNITY_FREERTOS_PRIORITY - 1, NULL, tskNO_AFFINITY);
esp_rom_delay_us(10); //Busy wait to ensure no affinity task runs on opposite core
}
}
typedef struct {
SemaphoreHandle_t sem;
volatile bool deleted; // Check the deleted task doesn't keep running after being deleted
} tsk_blocks_param_t;
/* Task blocks as often as possible
(two or more of these can share the same semaphore and "juggle" it around)
*/
static void tsk_blocks_frequently(void *param)
{
tsk_blocks_param_t *p = (tsk_blocks_param_t *)param;
SemaphoreHandle_t sem = p->sem;
srand(xTaskGetTickCount() ^ (int)xTaskGetCurrentTaskHandle());
while (1) {
assert(!p->deleted);
esp_rom_delay_us(rand() % 10);
assert(!p->deleted);
xSemaphoreTake(sem, portMAX_DELAY);
assert(!p->deleted);
esp_rom_delay_us(rand() % 10);
assert(!p->deleted);
xSemaphoreGive(sem);
}
}
TEST_CASE("FreeRTOS Delete Blocked Tasks", "[freertos]")
{
TaskHandle_t blocking_tasks[portNUM_PROCESSORS + 1]; // one per CPU, plus one unpinned task
tsk_blocks_param_t params[portNUM_PROCESSORS + 1] = { 0 };
unsigned before = heap_caps_get_free_size(MALLOC_CAP_8BIT);
printf("Free memory at start %u\n", before);
/* Any bugs will depend on relative timing of destroying the tasks, so create & delete many times.
Stop early if it looks like some resources have not been properly cleaned up.
(1000 iterations takes about 9 seconds on ESP32 dual core)
*/
for(unsigned iter = 0; iter < 1000; iter++) {
// Create everything
SemaphoreHandle_t sem = xSemaphoreCreateMutex();
for(unsigned i = 0; i < portNUM_PROCESSORS + 1; i++) {
params[i].deleted = false;
params[i].sem = sem;
TEST_ASSERT_EQUAL(pdTRUE,
xTaskCreatePinnedToCore(tsk_blocks_frequently, "tsk_block", 4096, &params[i],
UNITY_FREERTOS_PRIORITY - 1, &blocking_tasks[i],
i < portNUM_PROCESSORS ? i : tskNO_AFFINITY));
}
vTaskDelay(5); // Let the tasks juggle the mutex for a bit
for(unsigned i = 0; i < portNUM_PROCESSORS + 1; i++) {
vTaskDelete(blocking_tasks[i]);
params[i].deleted = true;
}
vTaskDelay(4); // Yield to the idle task for cleanup
vSemaphoreDelete(sem);
// Check we haven't leaked resources yet
TEST_ASSERT_GREATER_OR_EQUAL(before - 256, heap_caps_get_free_size(MALLOC_CAP_8BIT));
}
}

View File

@@ -0,0 +1,198 @@
/*
* SPDX-FileCopyrightText: 2022 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
/*
Test of FreeRTOS task notifications. This test creates a sender and receiver
task under different core permutations. For each permutation, the sender task
will test the xTaskNotify(), xTaskNotifyGive(), xTaskNotifyFromISR(), and
vTaskNotifyGiveFromISR(), whereas the receiver task will test
xTaskNotifyWait() and ulTaskNotifyTake().
*/
#include <stdio.h>
#include <stdlib.h>
#include "freertos/FreeRTOS.h"
#include "freertos/task.h"
#include "freertos/semphr.h"
#include "driver/gptimer.h"
#include "unity.h"
#include "test_utils.h"
#define NO_OF_NOTIFS 4
#define NO_OF_TASKS 2 //Sender and receiver
#define MESSAGE 0xFF
static uint32_t send_core_message = 0;
static TaskHandle_t recv_task_handle;
static bool isr_give = false;
static bool test_start = false;
static gptimer_handle_t gptimers[portNUM_PROCESSORS];
static SemaphoreHandle_t trigger_send_semphr;
static SemaphoreHandle_t task_delete_semphr;
//Test tracking vars
static volatile uint32_t notifs_sent = 0;
static volatile uint32_t notifs_rec = 0;
static bool wrong_core = false;
static bool on_alarm_sender_cb(gptimer_handle_t timer, const gptimer_alarm_event_data_t *edata, void *user_ctx)
{
gptimer_stop(timer);
if (!test_start) {
return false;
}
int curcore = xPortGetCoreID();
if (isr_give) { //Test vTaskNotifyGiveFromISR() on same core
notifs_sent++;
vTaskNotifyGiveFromISR(recv_task_handle, NULL);
} else { //Test xTaskNotifyFromISR()
notifs_sent++;
xTaskNotifyFromISR(recv_task_handle, (MESSAGE << curcore), eSetValueWithOverwrite, NULL);
}
// always trigger a task switch when exit ISR context
return true;
}
static void test_gptimer_start(void *arg)
{
gptimer_handle_t gptimer = (gptimer_handle_t)arg;
gptimer_alarm_config_t alarm_config = {
.reload_count = 0,
.alarm_count = 1000,
};
TEST_ESP_OK(gptimer_set_raw_count(gptimer, 0));
TEST_ESP_OK(gptimer_set_alarm_action(gptimer, &alarm_config));
TEST_ESP_OK(gptimer_start(gptimer));
}
static void sender_task(void *arg)
{
gptimer_handle_t gptimer = (gptimer_handle_t)arg;
int curcore = xPortGetCoreID();
//Test xTaskNotify
xSemaphoreTake(trigger_send_semphr, portMAX_DELAY);
notifs_sent++;
xTaskNotify(recv_task_handle, (MESSAGE << curcore), eSetValueWithOverwrite);
//Test xTaskNotifyGive
xSemaphoreTake(trigger_send_semphr, portMAX_DELAY);
notifs_sent++;
xTaskNotifyGive(recv_task_handle);
//Test xTaskNotifyFromISR
xSemaphoreTake(trigger_send_semphr, portMAX_DELAY);
isr_give = false;
test_gptimer_start(gptimer);
//Test vTaskNotifyGiveFromISR
xSemaphoreTake(trigger_send_semphr, portMAX_DELAY);
isr_give = true;
test_gptimer_start(gptimer);
//Delete Task and Semaphores
xSemaphoreGive(task_delete_semphr);
vTaskDelete(NULL);
}
static void receiver_task(void *arg)
{
uint32_t notify_value;
//Test xTaskNotifyWait from task
xTaskNotifyWait(0, 0xFFFFFFFF, &notify_value, portMAX_DELAY);
if (notify_value != send_core_message) {
wrong_core = true;
}
notifs_rec++;
//Test ulTaskNotifyTake from task
xSemaphoreGive(trigger_send_semphr);
ulTaskNotifyTake(pdTRUE, portMAX_DELAY);
notifs_rec++;
//Test xTaskNotifyWait from ISR
xSemaphoreGive(trigger_send_semphr);
xTaskNotifyWait(0, 0xFFFFFFFF, &notify_value, portMAX_DELAY);
if (notify_value != send_core_message) {
wrong_core = true;
}
notifs_rec++;
//Test ulTaskNotifyTake from ISR
xSemaphoreGive(trigger_send_semphr);
ulTaskNotifyTake(pdTRUE, portMAX_DELAY);
notifs_rec++;
//Test complete, stop timer and delete task
xSemaphoreGive(task_delete_semphr);
vTaskDelete(NULL);
}
static void install_gptimer_on_core(void *arg)
{
int core_id = (int)arg;
gptimer_config_t timer_config = {
.clk_src = GPTIMER_CLK_SRC_DEFAULT,
.direction = GPTIMER_COUNT_UP,
.resolution_hz = 1000000, // 1MHz, 1 tick = 1us
};
TEST_ESP_OK(gptimer_new_timer(&timer_config, &gptimers[core_id]));
gptimer_event_callbacks_t cbs = {
.on_alarm = on_alarm_sender_cb,
};
TEST_ESP_OK(gptimer_register_event_callbacks(gptimers[core_id], &cbs, NULL));
TEST_ESP_OK(gptimer_enable(gptimers[core_id]));
test_gptimer_start(gptimers[core_id]);
xSemaphoreGive(task_delete_semphr);
vTaskDelete(NULL);
}
TEST_CASE("Test Task_Notify", "[freertos]")
{
test_start = false;
trigger_send_semphr = xSemaphoreCreateBinary();
task_delete_semphr = xQueueCreateCountingSemaphore(10, 0);
for (int i = 0; i < portNUM_PROCESSORS; i++) {
xTaskCreatePinnedToCore(install_gptimer_on_core, "install_gptimer", 4096, (void *const)i, UNITY_FREERTOS_PRIORITY + 1, NULL, i);
TEST_ASSERT(xSemaphoreTake(task_delete_semphr, pdMS_TO_TICKS(1000)));
}
// wait the gptimer installation done on specific core
vTaskDelay(10);
// test start
test_start = true;
for (int i = 0; i < portNUM_PROCESSORS; i++) { //Sending Core
for (int j = 0; j < portNUM_PROCESSORS; j++) { //Receiving Core
//Reset Values
notifs_sent = 0;
notifs_rec = 0;
wrong_core = false;
send_core_message = (0xFF << i); //0xFF if core 0, 0xFF0 if core 1
// receiver task has higher priority than sender task
xTaskCreatePinnedToCore(receiver_task, "recv task", 1000, NULL, UNITY_FREERTOS_PRIORITY + 2, &recv_task_handle, j);
xTaskCreatePinnedToCore(sender_task, "send task", 1000, gptimers[i], UNITY_FREERTOS_PRIORITY + 1, NULL, i);
vTaskDelay(5); //Wait for task creation to complete
xSemaphoreGive(trigger_send_semphr); //Trigger sender task
for (int k = 0; k < NO_OF_TASKS; k++) { //Wait for sender and receiver task deletion
TEST_ASSERT(xSemaphoreTake(task_delete_semphr, pdMS_TO_TICKS(2000)));
}
vTaskDelay(5); //Give time tasks to delete
TEST_ASSERT_EQUAL(NO_OF_NOTIFS, notifs_sent);
TEST_ASSERT_EQUAL(NO_OF_NOTIFS, notifs_rec);
TEST_ASSERT_EQUAL(false, wrong_core);
}
}
//Delete Semaphroes and timer ISRs
vSemaphoreDelete(trigger_send_semphr);
vSemaphoreDelete(task_delete_semphr);
for (int i = 0; i < portNUM_PROCESSORS; i++) {
TEST_ESP_OK(gptimer_stop(gptimers[i]));
TEST_ESP_OK(gptimer_disable(gptimers[i]));
TEST_ESP_OK(gptimer_del_timer(gptimers[i]));
}
}

View File

@@ -0,0 +1,104 @@
/*
* SPDX-FileCopyrightText: 2021-2022 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#include "freertos/FreeRTOS.h"
#include "freertos/task.h"
#include "freertos/queue.h"
#include "freertos/semphr.h"
#include "freertos/stream_buffer.h"
#include "freertos/event_groups.h"
#include "unity.h"
#include "test_utils.h"
/*
Test that we can get a task's handle via the task's name using xTaskGetHandle()
*/
static void test_task_get_handle(void *arg)
{
vTaskSuspend(NULL);
}
TEST_CASE("FreeRTOS xTaskGetHandle()", "[freertos]")
{
vTaskDelay(10); //Small delay to let init/daemon tasks finish running
for (int core = 0; core < configNUM_CORES; core++) {
TaskHandle_t test_task_hdl;
TaskHandle_t ret_task_hdl;
TEST_ASSERT_EQUAL(pdTRUE, xTaskCreatePinnedToCore(test_task_get_handle, "test0", 1024, NULL, UNITY_FREERTOS_PRIORITY + 1, &test_task_hdl, core));
vTaskDelay(10); //Delay to let task call vTaskSuspend()
ret_task_hdl = xTaskGetHandle("test0");
TEST_ASSERT_EQUAL(test_task_hdl, ret_task_hdl);
vTaskDelete(test_task_hdl);
vTaskDelay(10); //Delay to let IDLE task clean up
}
}
/*
Test that a blocked task (either on a delay or an event object) can be unblocked using xTaskAbortDelay()
*/
#define QUEUE_LEN 1
#define STREAM_BUFFER_LEN (sizeof(uint32_t))
typedef struct {
QueueHandle_t queue;
SemaphoreHandle_t sem;
SemaphoreHandle_t mux;
StreamBufferHandle_t stream_buffer;
EventGroupHandle_t evt_grp;
} abort_delay_test_obj_t;
static void test_task_abort_delay(void *arg)
{
abort_delay_test_obj_t *test_objs = (abort_delay_test_obj_t *)arg;
//Block indefinitely on an empty queue. Delay should be aborted so we expect a failure to be returned
uint32_t data = 0;
TEST_ASSERT_EQUAL(pdFALSE, xQueueReceive(test_objs->queue, &data, portMAX_DELAY));
TEST_ASSERT_EQUAL(pdFALSE, xSemaphoreTake(test_objs->sem, portMAX_DELAY));
TEST_ASSERT_EQUAL(pdFALSE, xSemaphoreTake(test_objs->mux, portMAX_DELAY));
uint32_t RxData;
size_t xReceivedBytes = xStreamBufferReceive(test_objs->stream_buffer, (void *)&RxData, STREAM_BUFFER_LEN, portMAX_DELAY);
TEST_ASSERT_EQUAL(0, xReceivedBytes);
EventBits_t uxBits = xEventGroupWaitBits(test_objs->evt_grp, 0xFF, pdTRUE, pdTRUE, portMAX_DELAY);
TEST_ASSERT_EQUAL(0, uxBits);
vTaskDelete(NULL);
}
TEST_CASE("FreeRTOS xTaskAbortDelay()", "[freertos]")
{
abort_delay_test_obj_t test_objs;
test_objs.queue = xQueueCreate(QUEUE_LEN, sizeof(uint32_t));
test_objs.sem = xSemaphoreCreateBinary();
test_objs.mux = xSemaphoreCreateMutex();
test_objs.stream_buffer = xStreamBufferCreate(STREAM_BUFFER_LEN, 1);
test_objs.evt_grp = xEventGroupCreate();
for (int core = 0; core < configNUM_CORES; core++) {
//Take the MUX so that test task will block on it
TEST_ASSERT_EQUAL(pdTRUE, xSemaphoreTake(test_objs.mux, 0));
TaskHandle_t task_hdl;
xTaskCreatePinnedToCore(test_task_abort_delay, "test", 1024, (void *)&test_objs, UNITY_FREERTOS_PRIORITY + 1, &task_hdl, core);
for (int i = 0; i < 5; i++) {
vTaskDelay(10);
TEST_ASSERT_EQUAL(pdPASS, xTaskAbortDelay(task_hdl));
}
TEST_ASSERT_EQUAL(pdTRUE, xSemaphoreGive(test_objs.mux));
}
vTaskDelay(10); //Delay to let IDLE task clean up
}

View File

@@ -0,0 +1,82 @@
/*
* SPDX-FileCopyrightText: 2022 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#include "FreeRTOS.h"
#include "task.h"
#include "unity.h"
#include "portTestMacro.h"
/* ------------------------------------------------------------------------------------------------------------------ */
/*
Test Priority Scheduling (Single Core)
Purpose:
- Test that the single-core scheduler always schedules the highest priority ready task
Procedure:
- Raise the unityTask priority to (configMAX_PRIORITIES - 1)
- unityTask creates the following lower priority tasks
- task_A (configMAX_PRIORITIES - 2)
- task_B (configMAX_PRIORITIES - 3)
- UnityTask blocks for a short period of time to allow task_A to run
- Clean up and restore unityTask's original priority
Expected:
- task_A should run after unityTask blocks
- task_B should never have run
*/
#if ( configNUM_CORES == 1 )
#define UNITY_TASK_DELAY_TICKS 10
static BaseType_t task_A_ran;
static BaseType_t task_B_ran;
static void task_A(void *arg)
{
task_A_ran = pdTRUE;
/* Keeping spinning to prevent the lower priority task_B from running */
while (1) {
;
}
}
static void task_B(void *arg)
{
/* The following should never run due to task_B having a lower priority */
task_B_ran = pdTRUE;
while (1) {
;
}
}
TEST_CASE("Tasks: Test priority scheduling", "[freertos]")
{
TaskHandle_t task_A_handle;
TaskHandle_t task_B_handle;
task_A_ran = pdFALSE;
task_B_ran = pdFALSE;
/* Raise the priority of the unityTask */
vTaskPrioritySet(NULL, configMAX_PRIORITIES - 1);
/* Create task_A and task_B */
xTaskCreate(task_A, "task_A", configTEST_DEFAULT_STACK_SIZE, (void *)xTaskGetCurrentTaskHandle(), configMAX_PRIORITIES - 2, &task_A_handle);
xTaskCreate(task_B, "task_B", configTEST_DEFAULT_STACK_SIZE, (void *)xTaskGetCurrentTaskHandle(), configMAX_PRIORITIES - 3, &task_B_handle);
/* Block to allow task_A to be scheduled */
vTaskDelay(UNITY_TASK_DELAY_TICKS);
/* Test that only task_A has run */
TEST_ASSERT_EQUAL(pdTRUE, task_A_ran);
TEST_ASSERT_EQUAL(pdFALSE, task_B_ran);
vTaskDelete(task_A_handle);
vTaskDelete(task_B_handle);
/* Restore the priority of the unityTask */
vTaskPrioritySet(NULL, configTEST_UNITY_TASK_PRIORITY);
}
#endif /* configNUM_CORES == 1 */

View File

@@ -0,0 +1,102 @@
/*
* SPDX-FileCopyrightText: 2022 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#include "sdkconfig.h"
#include <string.h>
#include "FreeRTOS.h"
#include "task.h"
#include "semphr.h"
#include "unity.h"
#include "portTestMacro.h"
/* ------------------------------------------------------------------------------------------------------------------ */
/*
Test Priority Scheduling SMP
Purpose:
- Test that the SMP scheduler always schedules the highest priority ready tasks for each core
Procedure:
- Raise the unityTask priority to (configMAX_PRIORITIES - 1)
- unityTask creates the following lower priority tasks for each core
- task_A (configMAX_PRIORITIES - 2) for each core
- task_B (configMAX_PRIORITIES - 3) for each core
- unityTask blocks for a short period of time to allow all of the task_As to run
- Clean up and restore unityTask's original priority
Expected:
- All of the task_As should be run by the scheduler
- None of the task_Bs should have run
*/
#if ( defined( CONFIG_FREERTOS_SMP ) && ( configNUM_CORES > 1 ) && ( configRUN_MULTIPLE_PRIORITIES == 1 ) ) \
|| ( !defined( CONFIG_FREERTOS_SMP ) && ( configNUM_CORES > 1 ) )
#define UNITY_TASK_DELAY_TICKS 10
static BaseType_t task_A_ran[configNUM_CORES];
static BaseType_t task_B_ran[configNUM_CORES];
static void task_A(void *arg)
{
BaseType_t task_idx = (BaseType_t) arg;
task_A_ran[task_idx] = pdTRUE;
/* Keeping spinning to prevent the lower priority task_B from running */
while (1) {
;
}
}
static void task_B(void *arg)
{
/* The following should never be run due to task_B having a lower priority */
BaseType_t task_idx = (BaseType_t) arg;
task_B_ran[task_idx] = pdTRUE;
while (1) {
;
}
}
TEST_CASE("Tasks: Test priority scheduling (SMP)", "[freertos]")
{
TaskHandle_t task_A_handles[configNUM_CORES];
TaskHandle_t task_B_handles[configNUM_CORES];
memset(task_A_ran, pdFALSE, sizeof(task_A_ran));
memset(task_B_ran, pdFALSE, sizeof(task_B_ran));
/* Raise the priority of the unityTask */
vTaskPrioritySet(NULL, configMAX_PRIORITIES - 1);
/* Create task_A for each core */
for (UBaseType_t x = 0; x < configNUM_CORES; x++) {
xTaskCreate(task_A, "task_A", configTEST_DEFAULT_STACK_SIZE, (void *)x, configMAX_PRIORITIES - 2, &task_A_handles[x]);
}
/* Create task_B for each core */
for (UBaseType_t x = 0; x < configNUM_CORES; x++) {
xTaskCreate(task_B, "task_B", configTEST_DEFAULT_STACK_SIZE, (void *)x, configMAX_PRIORITIES - 3, &task_B_handles[x]);
}
/* Block to ensure all the task_As to be scheduled */
vTaskDelay(UNITY_TASK_DELAY_TICKS);
/* Check that all the task_As have run, and none of the task_Bs have run */
for (UBaseType_t x = 0; x < configNUM_CORES; x++) {
TEST_ASSERT_EQUAL(pdTRUE, task_A_ran[x]);
TEST_ASSERT_EQUAL(pdFALSE, task_B_ran[x]);
}
/* Cleanup */
for (UBaseType_t x = 0; x < configNUM_CORES; x++) {
vTaskDelete(task_A_handles[x]);
vTaskDelete(task_B_handles[x]);
}
/* Restore the priority of the unityTask */
vTaskPrioritySet(NULL, configTEST_UNITY_TASK_PRIORITY);
}
#endif /* ( defined( CONFIG_FREERTOS_SMP ) && ( configNUM_CORES > 1 ) && ( configRUN_MULTIPLE_PRIORITIES == 1 ) )
|| ( !defined( CONFIG_FREERTOS_SMP ) && ( configNUM_CORES > 1 ) ) */

View File

@@ -0,0 +1,163 @@
/*
* SPDX-FileCopyrightText: 2022 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#include "FreeRTOS.h"
#include "task.h"
#include "semphr.h"
#include "unity.h"
#include "freertos_test_utils.h"
#include "portTestMacro.h"
/* ------------------------------------------------------------------------------------------------------------------ */
/*
Test vTaskDelay
Purpose:
- Test that vTaskDelay is accurate
Procedure:
- The test contains TEST_VTASKDELAY_ITERATIONS number of iterations. For each iteration...
- vTaskDelay(1) to align to next tick boundary
- Store current tick count and current time (using ref clock)
- vTaskDelay for TEST_VTASKDELAY_TICKS
- Get post delay tick count and ref clock time
- For single core, run the test directly from the UnityTask
- For SMP, run the test once on each core (using vTestOnAllCores())
Expected:
- The elapsed ticks should be TEST_VTASKDELAY_TICKS, with 1 tick of error allowed (in case ref clock functions last
long enough to cross a tick boundary).
- The elapsed time should be equivalent to TEST_VTASKDELAY_TICKS tick periods, with 1 tick period of error allowed
(in case ref clock functions last longer that a tick period).
*/
#if ( INCLUDE_vTaskDelay == 1 )
#define TEST_VTASKDELAY_TICKS 5 // Number of ticks to delay in test
#define TEST_VTASKDELAY_ITERATIONS 5 // Number of iterations in test
static void test_vTaskDelay(void *arg)
{
for (int i = 0; i < TEST_VTASKDELAY_ITERATIONS; i++) {
TickType_t tick_start, tick_end;
portTEST_REF_CLOCK_TYPE ref_clock_start, ref_clock_end;
/* Delay until the next tick boundary */
vTaskDelay(1);
/* Get the current tick count and ref clock time */
tick_start = xTaskGetTickCount();
ref_clock_start = portTEST_REF_CLOCK_GET_TIME();
vTaskDelay(TEST_VTASKDELAY_TICKS);
/* Get the post delay tick count and ref clock time */
tick_end = xTaskGetTickCount();
ref_clock_end = portTEST_REF_CLOCK_GET_TIME();
/* Check that elapsed ticks and ref clock is accurate. We allow 1 tick of error in case vTaskDelay() was called
* right before/after the tick boundary. */
#if ( configUSE_16_BIT_TICKS == 1 )
TEST_ASSERT_UINT16_WITHIN(1, TEST_VTASKDELAY_TICKS, tick_end - tick_start);
#else
TEST_ASSERT_UINT32_WITHIN(1, TEST_VTASKDELAY_TICKS, tick_end - tick_start);
#endif
TEST_ASSERT_UINT32_WITHIN(portTEST_TICKS_TO_REF_CLOCK(1),
portTEST_TICKS_TO_REF_CLOCK(TEST_VTASKDELAY_TICKS),
ref_clock_end - ref_clock_start);
}
}
TEST_CASE("Tasks: Test vTaskDelay", "[freertos]")
{
portTEST_REF_CLOCK_INIT();
#if ( configNUM_CORES > 1 )
vTestOnAllCores(test_vTaskDelay, NULL, configTEST_DEFAULT_STACK_SIZE, configTEST_UNITY_TASK_PRIORITY - 1);
#else
/* Test vTaskDelay directly on the current core */
test_vTaskDelay(NULL);
#endif
portTEST_REF_CLOCK_DEINIT();
}
#endif /* ( INCLUDE_vTaskDelay == 1 ) */
/* ------------------------------------------------------------------------------------------------------------------ */
/*
Test vTaskDelayUntil
Purpose:
- Test that vTaskDelayUntil is accurate
Procedure:
- The test contains TEST_VTASKDELAYUNTIL_ITERATIONS number of iterations. For each iteration...
- vTaskDelay(1) to align to next tick boundary
- Store current tick count and current time (using ref clock)
- Call vTaskDelayUntil() for TEST_VTASKDELAYUNTIL_TICKS, using the stored tick count as the previous wake time
- Get post delay tick count and ref clock time
- For single core, run the test directly from the UnityTask
- For SMP, run the test once on each core (using vTestOnAllCores())
Expected:
- The elapsed ticks should be exactly TEST_VTASKDELAYUNTIL_TICKS since vTaskDelayUntil() is relative to the previous
wake time
- The elapsed time should be equivalent to TEST_VTASKDELAYUNTIL_TICKS tick periods, with 1 tick period of error
allowed (in case ref clock functions last longer that a tick period).
*/
#if ( INCLUDE_xTaskDelayUntil == 1 )
#define TEST_VTASKDELAYUNTIL_TICKS 5 // Number of ticks to delay in test
#define TEST_VTASKDELAYUNTIL_ITERATIONS 5 // Number of iterations in test
static void test_vTaskDelayUntil(void *arg)
{
/* Delay until the next tick boundary */
vTaskDelay(1);
for (int i = 0; i < TEST_VTASKDELAYUNTIL_ITERATIONS; i++) {
TickType_t tick_start, tick_end, last_wake_tick;
portTEST_REF_CLOCK_TYPE ref_clock_start, ref_clock_end;
/* Get the current tick count and ref clock time */
tick_start = xTaskGetTickCount();
last_wake_tick = tick_start;
ref_clock_start = portTEST_REF_CLOCK_GET_TIME();
vTaskDelayUntil(&last_wake_tick, TEST_VTASKDELAYUNTIL_TICKS);
/* Get the post delay tick count and ref clock time */
tick_end = xTaskGetTickCount();
ref_clock_end = portTEST_REF_CLOCK_GET_TIME();
/* Check that the elapsed ticks is accurate. Elapsed ticks should be exact as vTaskDelayUntil() executes a
* delay relative to last_wake_tick. */
TEST_ASSERT_EQUAL(TEST_VTASKDELAYUNTIL_TICKS, tick_end - tick_start);
TEST_ASSERT_EQUAL(tick_end, last_wake_tick);
/* Check that the elapsed ref clock time is accurate. We allow 1 tick time worth of error to account for the
* the execution time of the ref clock functions. */
TEST_ASSERT_UINT32_WITHIN(portTEST_TICKS_TO_REF_CLOCK(1),
portTEST_TICKS_TO_REF_CLOCK(TEST_VTASKDELAYUNTIL_TICKS),
ref_clock_end - ref_clock_start);
}
}
TEST_CASE("Tasks: Test vTaskDelayUntil", "[freertos]")
{
portTEST_REF_CLOCK_INIT();
#if ( configNUM_CORES > 1 )
vTestOnAllCores(test_vTaskDelayUntil, NULL, configTEST_DEFAULT_STACK_SIZE, configTEST_UNITY_TASK_PRIORITY - 1);
#else
/* Test vTaskDelay directly on the current core */
test_vTaskDelayUntil(NULL);
#endif
portTEST_REF_CLOCK_DEINIT();
}
#endif /* ( INCLUDE_xTaskDelayUntil == 1 ) */

View File

@@ -0,0 +1,88 @@
/*
* SPDX-FileCopyrightText: 2022 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
/*
Unit tests for FreeRTOS task priority get/set
*/
#include <esp_types.h>
#include <stdio.h>
#include <strings.h>
#include "freertos/FreeRTOS.h"
#include "freertos/task.h"
#include "unity.h"
#include "test_utils.h"
static void counter_task(void *param)
{
volatile uint32_t *counter = (volatile uint32_t *)param;
while (1) {
(*counter)++;
}
}
TEST_CASE("Get/Set Priorities", "[freertos]")
{
/* Two tasks per processor */
TaskHandle_t tasks[portNUM_PROCESSORS][2] = { 0 };
unsigned volatile counters[portNUM_PROCESSORS][2] = { 0 };
TEST_ASSERT_EQUAL(UNITY_FREERTOS_PRIORITY, uxTaskPriorityGet(NULL));
/* create a matrix of counter tasks on each core */
for (int cpu = 0; cpu < portNUM_PROCESSORS; cpu++) {
for (int task = 0; task < 2; task++) {
xTaskCreatePinnedToCore(counter_task, "count", 2048, (void *)&(counters[cpu][task]), UNITY_FREERTOS_PRIORITY - task, &(tasks[cpu][task]), cpu);
}
}
/* check they were created with the expected priorities */
for (int cpu = 0; cpu < portNUM_PROCESSORS; cpu++) {
for (int task = 0; task < 2; task++) {
TEST_ASSERT_EQUAL(UNITY_FREERTOS_PRIORITY - task, uxTaskPriorityGet(tasks[cpu][task]));
}
}
vTaskDelay(10);
/* at this point, only the higher priority tasks (first index) should be counting */
for (int cpu = 0; cpu < portNUM_PROCESSORS; cpu++) {
TEST_ASSERT_NOT_EQUAL(0, counters[cpu][0]);
TEST_ASSERT_EQUAL(0, counters[cpu][1]);
}
/* swap priorities! */
for (int cpu = 0; cpu < portNUM_PROCESSORS; cpu++) {
vTaskPrioritySet(tasks[cpu][0], UNITY_FREERTOS_PRIORITY - 1);
vTaskPrioritySet(tasks[cpu][1], UNITY_FREERTOS_PRIORITY);
}
/* check priorities have swapped... */
for (int cpu = 0; cpu < portNUM_PROCESSORS; cpu++) {
TEST_ASSERT_EQUAL(UNITY_FREERTOS_PRIORITY -1, uxTaskPriorityGet(tasks[cpu][0]));
TEST_ASSERT_EQUAL(UNITY_FREERTOS_PRIORITY, uxTaskPriorityGet(tasks[cpu][1]));
}
/* check the tasks which are counting have also swapped now... */
for (int cpu = 0; cpu < portNUM_PROCESSORS; cpu++) {
unsigned old_counters[2];
old_counters[0] = counters[cpu][0];
old_counters[1] = counters[cpu][1];
vTaskDelay(10);
TEST_ASSERT_EQUAL(old_counters[0], counters[cpu][0]);
TEST_ASSERT_NOT_EQUAL(old_counters[1], counters[cpu][1]);
}
/* clean up */
for (int cpu = 0; cpu < portNUM_PROCESSORS; cpu++) {
for (int task = 0; task < 2; task++) {
vTaskDelete(tasks[cpu][task]);
}
}
}

View File

@@ -0,0 +1,394 @@
/*
* SPDX-FileCopyrightText: 2022 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
/* Tests for FreeRTOS task suspend & resume */
#include <stdio.h>
#include <string.h>
#include "sdkconfig.h"
#include "freertos/FreeRTOS.h"
#include "freertos/task.h"
#include "freertos/semphr.h"
#include "freertos/timers.h"
#include "freertos/queue.h"
#include "unity.h"
#include "test_utils.h"
#include "driver/gptimer.h"
#ifndef CONFIG_FREERTOS_UNICORE
#include "esp_ipc.h"
#endif
#include "esp_freertos_hooks.h"
#include "esp_rom_sys.h"
#include "esp_timer.h"
/* Counter task counts a target variable forever */
static void task_count(void *vp_counter)
{
volatile unsigned *counter = (volatile unsigned *)vp_counter;
*counter = 0;
while (1) {
(*counter)++;
vTaskDelay(1);
}
}
static void test_suspend_resume(int target_core)
{
volatile unsigned counter = 0;
TaskHandle_t counter_task;
xTaskCreatePinnedToCore(task_count, "Count", 2048,
(void *)&counter, UNITY_FREERTOS_PRIORITY + 1,
&counter_task, target_core);
vTaskDelay(10);
/* check some counting has happened */
TEST_ASSERT_NOT_EQUAL(0, counter);
// Do the next part a few times, just to be sure multiple suspends & resumes
// work as expected...
const int TEST_ITERATIONS = 5;
for (int i = 0; i < TEST_ITERATIONS; i++) {
vTaskSuspend(counter_task);
unsigned suspend_count = counter;
printf("Suspending @ %d\n", suspend_count);
vTaskDelay(2);
printf("Still suspended @ %d\n", counter);
/* check the counter hasn't gone up while the task is suspended */
TEST_ASSERT_EQUAL(suspend_count, counter);
vTaskResume(counter_task);
vTaskDelay(2);
printf("Resumed @ %d\n", counter);
/* check the counter is going up again now the task is resumed */
TEST_ASSERT_NOT_EQUAL(suspend_count, counter);
}
vTaskDelete(counter_task);
}
TEST_CASE("Suspend/resume task on same core", "[freertos]")
{
test_suspend_resume(UNITY_FREERTOS_CPU);
}
#ifndef CONFIG_FREERTOS_UNICORE
TEST_CASE("Suspend/resume task on other core", "[freertos]")
{
test_suspend_resume(!UNITY_FREERTOS_CPU);
}
#endif
/* Task suspends itself, then sets a flag and deletes itself */
static void task_suspend_self(void *vp_resumed)
{
volatile bool *resumed = (volatile bool *)vp_resumed;
*resumed = false;
vTaskSuspend(NULL);
*resumed = true;
vTaskDelete(NULL);
}
TEST_CASE("Suspend the current running task", "[freertos]")
{
volatile bool resumed = false;
TaskHandle_t suspend_task;
xTaskCreatePinnedToCore(task_suspend_self, "suspend_self", 2048,
(void *)&resumed, UNITY_FREERTOS_PRIORITY + 1,
&suspend_task, UNITY_FREERTOS_CPU);
vTaskDelay(1);
TEST_ASSERT_FALSE(resumed);
vTaskResume(suspend_task);
// Shouldn't need any delay here, as task should resume on this CPU immediately
TEST_ASSERT_TRUE(resumed);
}
static volatile bool timer_isr_fired;
static gptimer_handle_t gptimer = NULL;
/* Timer ISR clears interrupt, sets flag, then resumes the task supplied in the
* callback argument.
*/
bool on_timer_alarm_cb(gptimer_handle_t timer, const gptimer_alarm_event_data_t *edata, void *user_ctx)
{
TaskHandle_t handle = user_ctx;
BaseType_t higherPriorityTaskWoken = pdFALSE;
gptimer_stop(timer);
timer_isr_fired = true;
higherPriorityTaskWoken = xTaskResumeFromISR(handle);
return higherPriorityTaskWoken == pdTRUE;
}
/* Task suspends itself, then sets parameter value to the current timer group counter and deletes itself */
static IRAM_ATTR void task_suspend_self_with_timer(void *vp_resumed)
{
volatile uint64_t *resumed_counter = vp_resumed;
*resumed_counter = 0;
vTaskSuspend(NULL);
gptimer_get_raw_count(gptimer, (uint64_t *)resumed_counter);
vTaskDelete(NULL);
}
/* Create a task which suspends itself, then resume it from a timer
* interrupt. */
static void test_resume_task_from_isr(int target_core)
{
volatile uint64_t resumed_counter = 99;
TaskHandle_t suspend_task;
xTaskCreatePinnedToCore(task_suspend_self_with_timer, "suspend_self", 2048,
(void *)&resumed_counter, UNITY_FREERTOS_PRIORITY + 1,
&suspend_task, target_core);
// delay to make the task has resumed itself
vTaskDelay(1);
TEST_ASSERT_EQUAL(0, resumed_counter);
/* Configure timer ISR */
gptimer_config_t timer_config = {
.clk_src = GPTIMER_CLK_SRC_DEFAULT,
.direction = GPTIMER_COUNT_UP,
.resolution_hz = 1000000, // 1MHz, 1 tick = 1us
};
TEST_ESP_OK(gptimer_new_timer(&timer_config, &gptimer));
timer_isr_fired = false;
vTaskDelay(1); // Make sure we're at the start of a new tick
gptimer_alarm_config_t alarm_config = {
.alarm_count = 1000000 / configTICK_RATE_HZ / 2,
.reload_count = 0,
};
gptimer_event_callbacks_t cbs = {
.on_alarm = on_timer_alarm_cb,
};
TEST_ESP_OK(gptimer_register_event_callbacks(gptimer, &cbs, suspend_task));
TEST_ESP_OK(gptimer_set_alarm_action(gptimer, &alarm_config));
TEST_ESP_OK(gptimer_enable(gptimer));
TEST_ESP_OK(gptimer_start(gptimer));
// wait the timer interrupt fires up
vTaskDelay(2);
TEST_ASSERT_TRUE(timer_isr_fired);
// check the task was resumed
TEST_ASSERT_NOT_EQUAL(0, resumed_counter);
// The task should have woken within 500us of the timer interrupt event (note: task may be a flash cache miss)
printf("alarm value %llu task resumed at %u\n", alarm_config.alarm_count, (unsigned)resumed_counter);
TEST_ASSERT_UINT32_WITHIN(100, alarm_config.alarm_count, (unsigned)resumed_counter);
// clean up
TEST_ESP_OK(gptimer_disable(gptimer));
TEST_ESP_OK(gptimer_del_timer(gptimer));
}
TEST_CASE("Resume task from ISR (same core)", "[freertos]")
{
test_resume_task_from_isr(UNITY_FREERTOS_CPU);
}
#ifndef CONFIG_FREERTOS_UNICORE
TEST_CASE("Resume task from ISR (other core)", "[freertos]")
{
test_resume_task_from_isr(!UNITY_FREERTOS_CPU);
}
#if !CONFIG_FREERTOS_SMP
/*
Scheduler suspension behavior has changed in SMP FreeRTOS, thus these test are disabled for SMP FreeRTOS.
See IDF-5201
*/
static volatile bool block;
static bool suspend_both_cpus;
static void IRAM_ATTR suspend_scheduler_while_block_set(void *arg)
{
vTaskSuspendAll();
while (block) { };
esp_rom_delay_us(1);
xTaskResumeAll();
}
static void IRAM_ATTR suspend_scheduler_on_both_cpus(void)
{
block = true;
if (suspend_both_cpus) {
TEST_ESP_OK(esp_ipc_call((xPortGetCoreID() == 0) ? 1 : 0, &suspend_scheduler_while_block_set, NULL));
}
vTaskSuspendAll();
}
static void IRAM_ATTR resume_scheduler_on_both_cpus(void)
{
block = false;
xTaskResumeAll();
}
static const int waiting_ms = 2000;
static const int delta_ms = 100;
static int duration_wait_task_ms;
static int duration_ctrl_task_ms;
static void waiting_task(void *pvParameters)
{
int cpu_id = xPortGetCoreID();
int64_t start_time = esp_timer_get_time();
printf("Start waiting_task cpu=%d\n", cpu_id);
vTaskDelay(waiting_ms / portTICK_PERIOD_MS);
duration_wait_task_ms = (esp_timer_get_time() - start_time) / 1000;
printf("Finish waiting_task cpu=%d, time=%d ms\n", cpu_id, duration_wait_task_ms);
vTaskDelete(NULL);
}
static void control_task(void *pvParameters)
{
int cpu_id = xPortGetCoreID();
esp_rom_delay_us(2000); // let to start the waiting_task first
printf("Start control_task cpu=%d\n", cpu_id);
int64_t start_time = esp_timer_get_time();
suspend_scheduler_on_both_cpus();
esp_rom_delay_us(waiting_ms * 1000 + delta_ms * 1000);
resume_scheduler_on_both_cpus();
duration_ctrl_task_ms = (esp_timer_get_time() - start_time) / 1000;
printf("Finish control_task cpu=%d, time=%d ms\n", cpu_id, duration_ctrl_task_ms);
vTaskDelete(NULL);
}
static void test_scheduler_suspend1(int cpu)
{
/* This test tests a case then both CPUs were in suspend state and then resume CPUs back.
* A task for which a wait time has been set and this time has elapsed in the suspended state should in any case be ready to start.
* (In an old implementation of xTaskIncrementTick function the counting for waiting_task() will be continued
* (excluding time in suspended) after control_task() is finished.)
*/
duration_wait_task_ms = 0;
duration_ctrl_task_ms = 0;
printf("Test for CPU%d\n", cpu);
int other_cpu = (cpu == 0) ? 1 : 0;
xTaskCreatePinnedToCore(&waiting_task, "waiting_task", 8192, NULL, 5, NULL, other_cpu);
xTaskCreatePinnedToCore(&control_task, "control_task", 8192, NULL, 5, NULL, cpu);
vTaskDelay(waiting_ms * 2 / portTICK_PERIOD_MS);
TEST_ASSERT_INT_WITHIN(4, waiting_ms + delta_ms + 4, duration_ctrl_task_ms);
if (suspend_both_cpus == false && cpu == 1) {
// CPU0 continues to increase the TickCount and the wait_task does not depend on Suspended Scheduler on CPU1
TEST_ASSERT_INT_WITHIN(2, waiting_ms, duration_wait_task_ms);
} else {
TEST_ASSERT_INT_WITHIN(4, waiting_ms + delta_ms + 4, duration_wait_task_ms);
}
printf("\n");
}
TEST_CASE("Test the waiting task not missed due to scheduler suspension on both CPUs", "[freertos]")
{
printf("Suspend both CPUs:\n");
suspend_both_cpus = true;
test_scheduler_suspend1(0);
test_scheduler_suspend1(1);
}
TEST_CASE("Test the waiting task not missed due to scheduler suspension on one CPU", "[freertos]")
{
printf("Suspend only one CPU:\n");
suspend_both_cpus = false;
test_scheduler_suspend1(0);
test_scheduler_suspend1(1);
}
static uint32_t tick_hook_ms[2];
static void IRAM_ATTR tick_hook(void)
{
tick_hook_ms[xPortGetCoreID()] += portTICK_PERIOD_MS;
}
static void test_scheduler_suspend2(int cpu)
{
esp_register_freertos_tick_hook_for_cpu(tick_hook, 0);
esp_register_freertos_tick_hook_for_cpu(tick_hook, 1);
memset(tick_hook_ms, 0, sizeof(tick_hook_ms));
printf("Test for CPU%d\n", cpu);
xTaskCreatePinnedToCore(&control_task, "control_task", 8192, NULL, 5, NULL, cpu);
vTaskDelay(waiting_ms * 2 / portTICK_PERIOD_MS);
esp_deregister_freertos_tick_hook(tick_hook);
printf("tick_hook_ms[cpu0] = %d, tick_hook_ms[cpu1] = %d\n", tick_hook_ms[0], tick_hook_ms[1]);
TEST_ASSERT_INT_WITHIN(portTICK_PERIOD_MS * 2, waiting_ms * 2, tick_hook_ms[0]);
TEST_ASSERT_INT_WITHIN(portTICK_PERIOD_MS * 2, waiting_ms * 2, tick_hook_ms[1]);
printf("\n");
}
TEST_CASE("Test suspend-resume CPU. The number of tick_hook should be the same for both CPUs", "[freertos]")
{
printf("Suspend both CPUs:\n");
suspend_both_cpus = true;
test_scheduler_suspend2(0);
test_scheduler_suspend2(1);
printf("Suspend only one CPU:\n");
suspend_both_cpus = false;
test_scheduler_suspend2(0);
test_scheduler_suspend2(1);
}
static int duration_timer_ms;
static void timer_callback(TimerHandle_t arg)
{
duration_timer_ms += portTICK_PERIOD_MS;
}
static void test_scheduler_suspend3(int cpu)
{
duration_timer_ms = 0;
duration_ctrl_task_ms = 0;
printf("Test for CPU%d\n", cpu);
TimerHandle_t count_time = xTimerCreate("count_time", 1, pdTRUE, NULL, timer_callback);
xTimerStart( count_time, portMAX_DELAY);
xTaskCreatePinnedToCore(&control_task, "control_task", 8192, NULL, 5, NULL, cpu);
vTaskDelay(waiting_ms * 2 / portTICK_PERIOD_MS);
xTimerDelete(count_time, portMAX_DELAY);
printf("Finish duration_timer_ms=%d ms\n", duration_timer_ms);
TEST_ASSERT_INT_WITHIN(2, waiting_ms * 2, duration_timer_ms);
TEST_ASSERT_INT_WITHIN(5, waiting_ms + delta_ms, duration_ctrl_task_ms);
printf("\n");
}
TEST_CASE("Test suspend-resume CPU works with xTimer", "[freertos]")
{
printf("Suspend both CPUs:\n");
suspend_both_cpus = true;
test_scheduler_suspend3(0);
test_scheduler_suspend3(1);
printf("Suspend only one CPU:\n");
suspend_both_cpus = false;
test_scheduler_suspend3(0);
test_scheduler_suspend3(1);
}
#endif // CONFIG_FREERTOS_UNICORE
#endif // !CONFIG_FREERTOS_SMP

View File

@@ -0,0 +1,152 @@
/*
* SPDX-FileCopyrightText: 2022 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#include "sdkconfig.h"
#if CONFIG_FREERTOS_ENABLE_TASK_SNAPSHOT
#include <stdio.h>
#include "freertos/FreeRTOS.h"
#include "freertos/task.h"
#include "freertos/task_snapshot.h"
#include "esp_cpu.h"
#include "unity.h"
#include "sdkconfig.h"
#define TEST_MAX_TASKS_NUM 32
#define NUM_TASKS_PER_LIST 2
#define TASK_PRIORITY (configMAX_PRIORITIES - 2)
static void ready_task(void *arg)
{
while (1) {
;
}
}
static void blocked_task(void *arg)
{
// Delay for portMAX_DELAY - 1 as not to go on the suspended list
vTaskDelay(portMAX_DELAY - 1);
}
static void suspended_task(void *arg)
{
vTaskSuspend(NULL);
}
static void setup(TaskHandle_t *task_list, int *num_tasks_ret, UBaseType_t *old_priority_ret)
{
// Raise our priority so that we aren't preempted
*old_priority_ret = uxTaskPriorityGet(NULL);
vTaskPrioritySet(NULL, configMAX_PRIORITIES - 1);
// Create tasks
int num_tasks = 0;
for (int i = 0; i < NUM_TASKS_PER_LIST; i++) {
//Ready task
xTaskCreate(ready_task, "ready", 1024, NULL, TASK_PRIORITY, &(task_list[num_tasks]));
num_tasks++;
//Blocked task
xTaskCreate(blocked_task, "blkd", 1024, NULL, TASK_PRIORITY, &(task_list[num_tasks]));
num_tasks++;
//Suspended task
xTaskCreate(suspended_task, "susp", 1024, NULL, TASK_PRIORITY, &(task_list[num_tasks]));
num_tasks++;
}
*num_tasks_ret = num_tasks;
// Short delay to allow tasks to spin up
vTaskDelay(10);
// Stop preemption on this core, and stall the other core
taskDISABLE_INTERRUPTS();
#if !CONFIG_FREERTOS_UNICORE
esp_cpu_stall(!xPortGetCoreID());
#endif
}
static void check_snapshots(TaskHandle_t *task_list, int num_tasks, TaskSnapshot_t *task_snapshots, UBaseType_t num_snapshots)
{
// Check task snapshots. Every created task should be found in the task snapshot
for (int i = 0; i < num_tasks; i++) {
bool found = false;
for (int j = 0; j < num_snapshots; j++) {
if (task_list[i] == (TaskHandle_t)task_snapshots[j].pxTCB) {
found = true;
break;
}
}
TEST_ASSERT(found);
}
}
static void teardown(TaskHandle_t *task_list, int num_tasks, UBaseType_t old_priority)
{
// Resume other cores and allow preemption
#if !CONFIG_FREERTOS_UNICORE
esp_cpu_unstall(!xPortGetCoreID());
#endif
taskENABLE_INTERRUPTS();
for (int i = 0; i < num_tasks; i++) {
vTaskDelete(task_list[i]);
}
// Restore priority
vTaskPrioritySet(NULL, old_priority);
// Short delay to allow tasks to clean up
vTaskDelay(10);
}
TEST_CASE("Task snapshot: Get all", "[freertos]")
{
// Short delay to allow both cores to spin up
vTaskDelay(10);
TaskHandle_t task_list[TEST_MAX_TASKS_NUM];
int num_tasks;
UBaseType_t old_priority;
setup(task_list, &num_tasks, &old_priority);
// Get task snapshots using uxTaskGetSnapshotAll()
TaskSnapshot_t task_snapshots[TEST_MAX_TASKS_NUM];
UBaseType_t tcb_size;
UBaseType_t num_snapshots;
num_snapshots = uxTaskGetSnapshotAll(task_snapshots, TEST_MAX_TASKS_NUM, &tcb_size);
TEST_ASSERT_LESS_OR_EQUAL(TEST_MAX_TASKS_NUM, num_snapshots);
check_snapshots(task_list, num_tasks, task_snapshots, num_snapshots);
teardown(task_list, num_tasks, old_priority);
}
TEST_CASE("Task snapshot: Iterate", "[freertos]")
{
// Short delay to allow both cores to spin up
vTaskDelay(10);
TaskHandle_t task_list[TEST_MAX_TASKS_NUM];
int num_tasks;
UBaseType_t old_priority;
setup(task_list, &num_tasks, &old_priority);
// Get task snapshots using pxTaskGetNext() and vTaskGetSnapshot()
TaskSnapshot_t task_snapshots[TEST_MAX_TASKS_NUM];
UBaseType_t num_snapshots = 0;
TaskHandle_t cur_task_handle = pxTaskGetNext(NULL);
while (cur_task_handle != NULL) {
// Get the task's snapshot
BaseType_t Result = vTaskGetSnapshot(cur_task_handle, &task_snapshots[num_snapshots]);
TEST_ASSERT_EQUAL(pdTRUE, Result);
num_snapshots++;
cur_task_handle = pxTaskGetNext(cur_task_handle);
}
TEST_ASSERT_LESS_OR_EQUAL(TEST_MAX_TASKS_NUM, num_snapshots);
check_snapshots(task_list, num_tasks, task_snapshots, num_snapshots);
teardown(task_list, num_tasks, old_priority);
}
#endif // CONFIG_FREERTOS_ENABLE_TASK_SNAPSHOT

View File

@@ -0,0 +1,514 @@
/*
* SPDX-FileCopyrightText: 2022 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#include "sdkconfig.h"
#include <stdbool.h>
#include "freertos/FreeRTOS.h"
#include "freertos/task.h"
#include "freertos/semphr.h"
#include "driver/gptimer.h"
#include "esp_rom_sys.h"
#include "unity.h"
#include "test_utils.h"
/*
Scheduler suspension behavior differs significantly in SMP FreeRTOS, thus none of these tests apply to SMP FreeRTOS
*/
#if !CONFIG_FREERTOS_SMP
/*
GP timer is used to trigger an interrupt. Test cases will register an interrupt callback called from the timer's
interrupt callback. The functions below simply the interrupt registration/trigger/deregistration process.
*/
static gptimer_handle_t gptimer = NULL;
static bool (*registered_intr_callback)(void *) = NULL;
static bool on_timer_alarm_cb(gptimer_handle_t timer, const gptimer_alarm_event_data_t *edata, void *user_ctx)
{
bool yield;
if (registered_intr_callback) {
yield = registered_intr_callback(user_ctx);
} else {
yield = false;
}
return yield;
}
static void register_intr_cb(bool (*callback)(void *), void *callback_arg)
{
gptimer_handle_t gptimer_temp;
// Initialize a GP timer used to trigger an interrupt
gptimer_config_t timer_config = {
.clk_src = GPTIMER_CLK_SRC_DEFAULT,
.direction = GPTIMER_COUNT_UP,
.resolution_hz = 1000000, // 1MHz, 1 tick=1us
};
TEST_ESP_OK(gptimer_new_timer(&timer_config, &gptimer_temp));
// Configure an alarm (of 1ms) and callback for the timer
gptimer_alarm_config_t alarm_config = {
.reload_count = 0,
.alarm_count = 1000, // alarm period 1ms
.flags.auto_reload_on_alarm = true,
};
gptimer_event_callbacks_t cbs = {
.on_alarm = on_timer_alarm_cb,
};
TEST_ESP_OK(gptimer_register_event_callbacks(gptimer_temp, &cbs, callback_arg));
TEST_ESP_OK(gptimer_enable(gptimer_temp));
TEST_ESP_OK(gptimer_set_alarm_action(gptimer_temp, &alarm_config));
gptimer = gptimer_temp;
registered_intr_callback = callback;
}
static void trigger_intr_cb(void)
{
// Interrupt should be triggered in 1ms
TEST_ESP_OK(gptimer_start(gptimer));
}
static void deregister_intr_cb(void)
{
gptimer_handle_t gptimer_temp = gptimer;
gptimer = NULL;
registered_intr_callback = NULL;
TEST_ESP_OK(gptimer_stop(gptimer_temp));
TEST_ESP_OK(gptimer_disable(gptimer_temp));
TEST_ESP_OK(gptimer_del_timer(gptimer_temp));
}
/* ---------------------------------------------------------------------------------------------------------------------
Test vTaskSuspendAll() and xTaskResumeAll() basic
Purpose:
- Test that vTaskSuspendAll() will suspends the scheduler for the calling core
- Test that xTaskResumeAll() will resumes scheduling for the calling core
Procedure:
- Call vTaskSuspendAll() to suspend the scheduler
- Call xTaskResumeAll() to resume the scheduler
Expected:
- xTaskGetSchedulerState() should return the correct state
--------------------------------------------------------------------------------------------------------------------- */
TEST_CASE("Test vTaskSuspendAll and xTaskResumeAll basic", "[freertos]")
{
// Check scheduler is running on the current core
TEST_ASSERT_EQUAL(taskSCHEDULER_RUNNING, xTaskGetSchedulerState());
vTaskSuspendAll();
TEST_ASSERT_EQUAL(taskSCHEDULER_SUSPENDED, xTaskGetSchedulerState());
xTaskResumeAll();
TEST_ASSERT_EQUAL(taskSCHEDULER_RUNNING, xTaskGetSchedulerState());
}
/* ---------------------------------------------------------------------------------------------------------------------
Test vTaskSuspendAll() and xTaskResumeAll() multicore
Only runs on !CONFIG_FREERTOS_UNICORE
Purpose:
- Test that vTaskSuspendAll() will only suspends scheduling only for the calling core
- Test that xTaskResumeAll() will only resume scheduling for the calling core
Procedure:
Each core gets tested in the role of core A
- Create a taskA pinned to one core (e.g., core A) that will disable the scheduler
- Created a "taskB" to another core (e.g., core B) that will not disable the scheduler
- taskA calls vTaskSuspendAll() to suspend the scheduler on core A
- taskA calls xTaskResumeAll() to resume the scheduler on core A
Expected:
- vTaskSuspendAll() should only disable the scheduler for the suspended core A
- xTaskResumeAll() should resume the scheduler for the suspended core A
- Scheduler on core B should remain enabled
--------------------------------------------------------------------------------------------------------------------- */
#if !CONFIG_FREERTOS_UNICORE
#define TEST_BASIC_BUSY_DELAY_US 10000
static volatile int taskA_sync;
static volatile int taskB_sync;
static void test_multicore_taskA(void *arg)
{
// Wait to be started
ulTaskNotifyTake(pdTRUE, portMAX_DELAY);
// Check scheduler on core A is enabled
TEST_ASSERT_EQUAL(taskSCHEDULER_RUNNING, xTaskGetSchedulerState());
taskA_sync++;
while (taskB_sync != 1) {
; // Wait for task B to complete its check
}
// Suspend the scheduler on core A
vTaskSuspendAll();
// Check scheduler is suspended on core A
TEST_ASSERT_EQUAL(taskSCHEDULER_SUSPENDED, xTaskGetSchedulerState());
taskA_sync++;
while (taskB_sync != 2) {
; // Wait for task B to complete its check
}
// Busy spin for a while to simulate work done while scheduler is suspended
esp_rom_delay_us(TEST_BASIC_BUSY_DELAY_US);
// Check scheduler is still suspended on core A
TEST_ASSERT_EQUAL(taskSCHEDULER_SUSPENDED, xTaskGetSchedulerState());
taskA_sync++;
while (taskB_sync != 3) {
; // Wait for task B to complete its check
}
// Resume the scheduler on core A
xTaskResumeAll();
// Check that scheduler has resumed resumed on core A
TEST_ASSERT_EQUAL(taskSCHEDULER_RUNNING, xTaskGetSchedulerState());
taskA_sync++;
while (taskB_sync != 4) {
; // Wait for task B to complete its check
}
// Indicate done and wait to be deleted
SemaphoreHandle_t done_sem = (SemaphoreHandle_t)arg;
xSemaphoreGive(done_sem);
vTaskSuspend(NULL);
}
static void test_multicore_taskB(void *arg)
{
// Wait to be started
ulTaskNotifyTake(pdTRUE, portMAX_DELAY);
for (int i = 1; i <= 4; i++) {
// Wait for suspended trigger from task A
while (taskA_sync != i) {
;
}
// Check that scheduler is still running on core B
TEST_ASSERT_EQUAL(taskSCHEDULER_RUNNING, xTaskGetSchedulerState());
taskB_sync++;
}
// Indicate done and wait to be deleted
SemaphoreHandle_t done_sem = (SemaphoreHandle_t)arg;
xSemaphoreGive(done_sem);
vTaskSuspend(NULL);
}
TEST_CASE("Test vTaskSuspendAll() and xTaskResumeAll() multicore", "[freertos]")
{
SemaphoreHandle_t done_sem = xSemaphoreCreateCounting(portNUM_PROCESSORS, 0);
TEST_ASSERT_NOT_EQUAL(NULL, done_sem);
for (int i = 0; i < portNUM_PROCESSORS; i++) {
// Create tasks on core A and core B
TaskHandle_t taskA_hdl;
TaskHandle_t taskB_hdl;
TEST_ASSERT_EQUAL(pdTRUE, xTaskCreatePinnedToCore(test_multicore_taskA, "taskA", 2048, (void *)done_sem, UNITY_FREERTOS_PRIORITY - 1, &taskA_hdl, i));
TEST_ASSERT_EQUAL(pdTRUE, xTaskCreatePinnedToCore(test_multicore_taskB, "taskB", 2048, (void *)done_sem, UNITY_FREERTOS_PRIORITY - 1, &taskB_hdl, !i));
// Start the tasks and wait for their completion
taskA_sync = 0;
taskB_sync = 0;
xTaskNotifyGive(taskA_hdl);
xTaskNotifyGive(taskB_hdl);
for (int j = 0; j < 2; j++) {
xSemaphoreTake(done_sem, portMAX_DELAY);
}
// Cleanup the tasks
vTaskDelete(taskA_hdl);
vTaskDelete(taskB_hdl);
}
vSemaphoreDelete(done_sem);
// Add a short delay to allow the idle task to free any remaining task memory
vTaskDelay(10);
}
#endif // !CONFIG_FREERTOS_UNICORE
/* ---------------------------------------------------------------------------------------------------------------------
Test vTaskSuspendAll allows scheduling on other cores
Only runs on !CONFIG_FREERTOS_UNICORE
Purpose:
- Test that disabling a scheduler on one core (e.g., core B) does not disable scheduling on the other core (e.g., core A)
- While the scheduler on core B is disabled, test that...
- A task on Core A can be unblocked by another task also on core A
- A task on Core A can be unblocked by an interrupt on core A
Procedure:
Each core gets tested in the role of core A
- Create task B1 pinned to core B that will suspend scheduling on core B
- Create task A2 pinned to core A that will test unblocking on core A
- Create task A1 pinned to core A that will unblock task A2
- Register an interrupt on core A that will unblock task A2
- Have A2 block
- Have B1 disable scheduling on core B. A1 checks that scheduling is still enabled on core A
- Have A1 unblock A2
- Have the core A ISR unblock A2
- Cleanup the tasks
Expected:
When B1 disables scheduling on core B...
- Scheduling on core A should still be enabled
- A2 should be unblocked by A1 and run without issue
- A2 should be unblocked by core A ISR and run without issue
--------------------------------------------------------------------------------------------------------------------- */
#if !CONFIG_FREERTOS_UNICORE
static volatile int test_unblk_sync;
static SemaphoreHandle_t test_unblk_done_sem;
static bool test_unblk_coreA_isr(void *arg)
{
TaskHandle_t a2_task_hdl = (TaskHandle_t)arg;
BaseType_t task_woken = pdFALSE;
// Unblock task b2
vTaskNotifyGiveFromISR(a2_task_hdl, &task_woken);
return (task_woken == pdTRUE);
}
static void test_unblk_a2_task(void *arg)
{
volatile int *a2_task_run_count = (volatile int *)arg;
// Wait to be unblocked by A1
ulTaskNotifyTake(pdTRUE, portMAX_DELAY);
(*a2_task_run_count)++;
// Wait to be unblocked by Core A ISR
ulTaskNotifyTake(pdTRUE, portMAX_DELAY);
(*a2_task_run_count)++;
// Wait to be deleted
vTaskSuspend(NULL);
}
static void test_unblk_a1_task(void *arg)
{
volatile int a2_task_run_count = 0;
// Create task A2 to block on this core (i.e., core A)
TaskHandle_t a2_task_hdl;
TEST_ASSERT_EQUAL(pdTRUE, xTaskCreatePinnedToCore(test_unblk_a2_task, "A2", 8192, (void *)&a2_task_run_count, UNITY_FREERTOS_PRIORITY + 2, &a2_task_hdl, xPortGetCoreID()));
// Install an interrupt on core A
register_intr_cb(test_unblk_coreA_isr, (void *)a2_task_hdl);
// Wait to be started by the main task
ulTaskNotifyTake(pdTRUE, portMAX_DELAY);
// Start B1
xTaskNotifyGive((TaskHandle_t)arg);
while (test_unblk_sync != 1) {
; // Wait for confirmation from B1 that scheduler has been suspended on Core B
}
// Verify that the scheduler is still enabled on core A
TEST_ASSERT_EQUAL(taskSCHEDULER_RUNNING, xTaskGetSchedulerState());
// Unblock A2, it should preempt immediately due to its higher priority
xTaskNotifyGive(a2_task_hdl);
// Verify that task A2 has run
TEST_ASSERT_EQUAL(1, a2_task_run_count);
// Trigger an ISR on this core A to unblock task A2. A2 should preempt immediately due to its higher priority
trigger_intr_cb();
esp_rom_delay_us(2000); // Short busy delay to ensure interrupt has triggered
// Verify that task A2 has run
TEST_ASSERT_EQUAL(2, a2_task_run_count);
// Trigger B1 to resume scheduling on core B
test_unblk_sync = 2;
while (test_unblk_sync != 3) {
; // Wait for confirmation from B1 that scheduler has been resumed
}
// Verify that the scheduler is still enabled on core A
TEST_ASSERT_EQUAL(taskSCHEDULER_RUNNING, xTaskGetSchedulerState());
// Cleanup A2 and interrupt
deregister_intr_cb();
vTaskDelete(a2_task_hdl);
// Indicate done and wait to be deleted
xSemaphoreGive(test_unblk_done_sem);
vTaskSuspend(NULL);
}
static void test_unblk_b1_task(void *arg)
{
// Wait to be started by A1
ulTaskNotifyTake(pdTRUE, portMAX_DELAY);
// Check scheduler is running on core B
TEST_ASSERT_EQUAL(taskSCHEDULER_RUNNING, xTaskGetSchedulerState());
// Suspend the scheduler on core B
vTaskSuspendAll();
TEST_ASSERT_EQUAL(taskSCHEDULER_SUSPENDED, xTaskGetSchedulerState());
// Indicate to A1 that core B scheduler has been suspended
test_unblk_sync = 1;
while (test_unblk_sync != 2) {
; // Wait for trigger from A1
}
// Resume the scheduler on core B
xTaskResumeAll();
TEST_ASSERT_EQUAL(taskSCHEDULER_RUNNING, xTaskGetSchedulerState());
// Indicate to A1 that core B scheduler has been resumed
test_unblk_sync = 3;
// Indicate done and wait to be deleted
xSemaphoreGive(test_unblk_done_sem);
vTaskSuspend(NULL);
}
TEST_CASE("Test vTaskSuspendAll allows scheduling on other cores", "[freertos]")
{
test_unblk_done_sem = xSemaphoreCreateCounting(portNUM_PROCESSORS, 0);
TEST_ASSERT_NOT_EQUAL(NULL, test_unblk_done_sem);
for (int i = 0; i < portNUM_PROCESSORS; i++) {
test_unblk_sync = 0;
// Create a tasks
TaskHandle_t a1_task_hdl;
TaskHandle_t b1_task_hdl;
TEST_ASSERT_EQUAL(pdTRUE, xTaskCreatePinnedToCore(test_unblk_b1_task, "B1", 8192, NULL, UNITY_FREERTOS_PRIORITY + 1, &b1_task_hdl, !i));
TEST_ASSERT_EQUAL(pdTRUE, xTaskCreatePinnedToCore(test_unblk_a1_task, "A1", 8192, (void *)b1_task_hdl, UNITY_FREERTOS_PRIORITY + 1, &a1_task_hdl, i));
// Start A1 to and wait for both tasks to complete
xTaskNotifyGive(a1_task_hdl);
for (int j = 0; j < 2; j++) {
xSemaphoreTake(test_unblk_done_sem, portMAX_DELAY);
}
// Cleanup tasks
vTaskDelete(a1_task_hdl);
vTaskDelete(b1_task_hdl);
}
vSemaphoreDelete(test_unblk_done_sem);
// Add a short delay to allow the idle task to free any remaining task memory
vTaskDelay(10);
}
#endif // !CONFIG_FREERTOS_UNICORE
/* ---------------------------------------------------------------------------------------------------------------------
Test xTaskResumeAll() resumes pended tasks on the current core
Purpose:
- When the scheduler is suspended on a particular core, test that tasks unblocked by an ISR on that core will place
those tasks on the core's pending ready list (regardless of the task's affinity).
- When the scheduler is resumed on a particular core, test that the tasks on core's pending ready list will be
scheduled.
Procedure:
Test for each core
- Create some blocking tasks on the same core
- Register an interrupt on the same core to unblock those tasks
- Suspend the scheduler on the core
- Trigger the interrupt to unblock those tasks
- Resume the scheduler
- Cleanup
Expected:
- When the ISR unblocks the blocked tasks, the task's state should be ready
- When the scheduler is resumed, the tasks should be scheduled and run without issue.
--------------------------------------------------------------------------------------------------------------------- */
#define TEST_PENDED_NUM_BLOCKED_TASKS 4
static bool test_pended_isr(void *arg)
{
TaskHandle_t *blkd_tsks = (TaskHandle_t *)arg;
BaseType_t task_woken = pdFALSE;
// Unblock the blocked tasks
for (int i = 0; i < TEST_PENDED_NUM_BLOCKED_TASKS; i++) {
vTaskNotifyGiveFromISR(blkd_tsks[i], &task_woken);
}
return (task_woken == pdTRUE);
}
static void test_pended_blkd_task(void *arg)
{
volatile bool *has_run = (bool *)arg;
// Wait to be unblocked
ulTaskNotifyTake(pdTRUE, portMAX_DELAY);
// Indicate the task been unblocked and has run
*has_run = true;
// Wait to be deleted
vTaskSuspend(NULL);
}
static void test_pended_running_task(void *arg)
{
TaskHandle_t main_task_hdl = (TaskHandle_t)arg;
TaskHandle_t blkd_tsks[TEST_PENDED_NUM_BLOCKED_TASKS];
volatile bool has_run[TEST_PENDED_NUM_BLOCKED_TASKS];
// Created blocked tasks pinned to each core
for (int i = 0; i < TEST_PENDED_NUM_BLOCKED_TASKS; i++) {
has_run[i] = false;
TEST_ASSERT_EQUAL(pdTRUE, xTaskCreatePinnedToCore(test_pended_blkd_task, "blkd", 4096, (void *)&has_run[i], UNITY_FREERTOS_PRIORITY + 2, &blkd_tsks[i], i % portNUM_PROCESSORS));
}
vTaskDelay(10);
// Install an interrupt on the current core core
register_intr_cb(test_pended_isr, (void *)blkd_tsks);
// Checked that all tasks are blocked and have no run yet
for (int i = 0; i < TEST_PENDED_NUM_BLOCKED_TASKS; i++) {
TEST_ASSERT_EQUAL(eBlocked, eTaskGetState(blkd_tsks[i])); // Should be eSuspended due to portMAX_DELAY
TEST_ASSERT_EQUAL(false, has_run[i]);
}
// Suspend the scheduler on the current core
vTaskSuspendAll();
// Trigger the interrupt to unblocked the blocked tasks
trigger_intr_cb();
esp_rom_delay_us(2000); // Short busy delay to ensure interrupt has triggered
// Check that all tasks are unblocked (but should not have run since the scheduler is suspend)
for (int i = 0; i < TEST_PENDED_NUM_BLOCKED_TASKS; i++) {
// Note: We use eBlocked instead of eReady due to a bug in eTaskGetState(). See (IDF-5543)
TEST_ASSERT_EQUAL(eBlocked, eTaskGetState(blkd_tsks[i]));
TEST_ASSERT_EQUAL(false, has_run[i]);
}
// Resume the scheduler on the current core to schedule the unblocked tasks
xTaskResumeAll();
esp_rom_delay_us(10000); // Busy delay to ensure each task has enough time to run
// Check that all tasks have run
for (int i = 0; i < TEST_PENDED_NUM_BLOCKED_TASKS; i++) {
TEST_ASSERT_EQUAL(true, has_run[i]);
}
// Clean up the interrupt and tasks
deregister_intr_cb();
for (int i = 0; i < TEST_PENDED_NUM_BLOCKED_TASKS; i++) {
vTaskDelete(blkd_tsks[i]);
}
// Notify completion and wait for deletion
xTaskNotifyGive(main_task_hdl);
vTaskSuspend(NULL);
}
TEST_CASE("Test xTaskResumeAll resumes pended tasks", "[freertos]")
{
// Run the test on each core
for (int i = 0; i < portNUM_PROCESSORS; i++) {
TaskHandle_t susp_tsk_hdl;
TEST_ASSERT_EQUAL(pdTRUE, xTaskCreatePinnedToCore(test_pended_running_task, "susp", 2048, (void *)xTaskGetCurrentTaskHandle(), UNITY_FREERTOS_PRIORITY + 1, &susp_tsk_hdl, i));
// Wait for to be notified to test completion
ulTaskNotifyTake(pdTRUE, portMAX_DELAY);
vTaskDelete(susp_tsk_hdl);
}
// Add a short delay to allow the idle task to free any remaining task memory
vTaskDelay(10);
}
#endif // !CONFIG_FREERTOS_SMP

View File

@@ -0,0 +1,658 @@
/*
* SPDX-FileCopyrightText: 2022 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
/*
* Unit tests for FreeRTOS task yielding
*/
#include "freertos/FreeRTOS.h"
#include "freertos/task.h"
#include "freertos/semphr.h"
#include "unity.h"
#include "test_utils.h"
#include <string.h>
// Array to store the task ids of the test threads being yielded
static volatile uint32_t task_yield_sequence[3];
// Index variable to access the yield sequence array
static volatile uint32_t idx = 0;
// Lock to protect the shared variables to store task id
static portMUX_TYPE idx_lock;
// Synchronization variable to have a deterministic dispatch sequence of the test threads
static volatile bool task_sequence_ready;
// Synchronization variable between the test threads and the unity task
static volatile uint32_t count;
// Lock variable to create a blocked task scenario
static volatile SemaphoreHandle_t task_mutex;
// This helper macro is used to store the task id atomically
#define STORE_TASK_ID(task_id) ({ \
portENTER_CRITICAL(&idx_lock); \
task_yield_sequence[idx++] = task_id; \
portEXIT_CRITICAL(&idx_lock); \
})
/*
* Test yielding for same priority tasks on the same core.
*
* The test performs the following actions:
* - Creates 2 tasks with the same priority on the same core.
* - Each task pushes its task_id on to a queue and then yields.
* - Unity task checks the sequence of the tasks run once the yield_tasks are done.
*/
static void yield_task1(void *arg)
{
uint32_t task_id = (uint32_t)arg;
/* Store task_id in the sequence array */
STORE_TASK_ID(task_id);
/* Notify the yield_task2 to run */
task_sequence_ready = true;
/* Yield */
taskYIELD();
/* Increment task count to notify unity task */
count++;
/* Delete self */
vTaskDelete(NULL);
}
static void yield_task2(void *arg)
{
uint32_t task_id = (uint32_t)arg;
/* Wait for the other task to run for the test to begin */
while (!task_sequence_ready) {
taskYIELD();
};
/* Store task_id in the sequence array */
STORE_TASK_ID(task_id);
/* Yield */
taskYIELD();
/* Increment task count to notify unity task */
count++;
/* Delete self */
vTaskDelete(NULL);
}
TEST_CASE("Task yield must run the next ready task of the same priority", "[freertos]")
{
/* Reset yield sequence index */
idx = 0;
/* Reset yield sequence array */
memset((void *)task_yield_sequence, 0, sizeof(task_yield_sequence));
/* Initialize idx lock */
portMUX_INITIALIZE(&idx_lock);
/* Reset task count */
count = 0;
/* Reset task sequence flag */
task_sequence_ready = false;
/* Create test tasks */
xTaskCreatePinnedToCore(yield_task1, "yield_task1", 2048, (void *)1, UNITY_FREERTOS_PRIORITY - 1, NULL, UNITY_FREERTOS_CPU);
xTaskCreatePinnedToCore(yield_task2, "yield_task2", 2048, (void *)2, UNITY_FREERTOS_PRIORITY - 1, NULL, UNITY_FREERTOS_CPU);
/* Wait for the tasks to finish up */
while (count != 2) {
vTaskDelay(10);
}
idx = 0;
/* Verify that the yield is successful and the next ready task is run */
TEST_ASSERT_EQUAL(1, task_yield_sequence[idx++]);
TEST_ASSERT_EQUAL(2, task_yield_sequence[idx++]);
}
/*
* Test yielding behavior when a task is blocked
*
* The test performs the following actions:
* - Creates 2 tasks with the same priority on the same core.
* - One task blocks on a mutex.
* - Second task does not contest for a mutex and yields.
* - Unity task verifies that the blocked task is not scheduled unless it is ready to run.
*/
static void test_task1(void *arg)
{
uint32_t task_id = (uint32_t)arg;
/* Block on mutex taken by the unity task */
TEST_ASSERT_EQUAL(pdTRUE, xSemaphoreTake(task_mutex, portMAX_DELAY));
/* Store task_id in the sequence array */
STORE_TASK_ID(task_id);
/* Increment task count to notify unity task */
count++;
/* Release mutex */
TEST_ASSERT_EQUAL(pdTRUE, xSemaphoreGive(task_mutex));
/* Delete self */
vTaskDelete(NULL);
}
static void test_task2(void *arg)
{
uint32_t task_id = (uint32_t)arg;
/* Store task_id in the sequence array */
STORE_TASK_ID(task_id);
/* Yield */
taskYIELD();
/* Store task_id in the sequence array */
STORE_TASK_ID(task_id);
/* Increment task count to notify unity task */
count++;
/* Delete self */
vTaskDelete(NULL);
}
TEST_CASE("Task yield must not run a blocked task", "[freertos]")
{
/* Reset yield sequence index */
idx = 0;
/* Reset yield sequence array */
memset((void *)task_yield_sequence, 0, sizeof(task_yield_sequence));
/* Initialize idx lock */
portMUX_INITIALIZE(&idx_lock);
/* Reset task count */
count = 0;
/* Create mutex and acquire it */
task_mutex = xSemaphoreCreateMutex();
TEST_ASSERT_NOT_EQUAL(NULL, task_mutex);
TEST_ASSERT_EQUAL(pdTRUE, xSemaphoreTake(task_mutex, portMAX_DELAY));
/* Create test_task1. This gets blocked. */
xTaskCreatePinnedToCore(test_task1, "test_task1", 2048, (void *)1, UNITY_FREERTOS_PRIORITY - 1, NULL, UNITY_FREERTOS_CPU);
/* Wait for test_task1 to start up and get blocked */
vTaskDelay(10);
/* Create test_task2. This issues the yield. */
xTaskCreatePinnedToCore(test_task2, "test_task2", 2048, (void *)2, UNITY_FREERTOS_PRIORITY - 1, NULL, UNITY_FREERTOS_CPU);
/* Wait for test_task2 to finish up */
while (count != 1) {
vTaskDelay(10);
}
/* Release mutex. This should unblock test_task1. */
TEST_ASSERT_EQUAL(pdTRUE, xSemaphoreGive(task_mutex));
/* Wait for test_task1 to finish up */
vTaskDelay(10);
idx = 0;
/* Verify that the yield results in the same task running again and not the blocked task */
TEST_ASSERT_EQUAL(2, task_yield_sequence[idx++]);
/* Verify that the task yield did not result in a context switch */
TEST_ASSERT_EQUAL(2, task_yield_sequence[idx++]);
/* Verify that the other task is scheduled once it is unblocked */
TEST_ASSERT_EQUAL(1, task_yield_sequence[idx++]);
/* Cleanup task mutex */
vSemaphoreDelete(task_mutex);
}
/*
* Test yielding behavior when the scheduler is suspended
*
* The test performs the following actions:
* - Creates 2 tasks with the same priority on the same core.
* - One task suspends the scheduler and then yields.
* - Unity task verifies that the yield does not happen until the scheduler is resumed.
*/
static void test_critical_task1(void *arg)
{
uint32_t task_id = (uint32_t)arg;
/* Store task_id in the sequence array */
STORE_TASK_ID(task_id);
/* Suspend scheduler */
vTaskSuspendAll();
/* Set the task sequence flag once test_critical_task1 runs */
task_sequence_ready = true;
/* Yield */
taskYIELD();
/* Store task_id in the sequence array.
* No need for a lock when the scheduler is suspended.
*/
task_yield_sequence[idx++] = task_id;
/* Increment task count to notify unity task */
count++;
/* Resume scheduler */
xTaskResumeAll();
/* Delete self */
vTaskDelete(NULL);
}
static void test_critical_task2(void *arg)
{
uint32_t task_id = (uint32_t)arg;
/* Wait for the other task to run for the test to begin */
while (!task_sequence_ready) {
taskYIELD();
};
/* Store task_id in the sequence array */
STORE_TASK_ID(task_id);
/* Increment task count to notify unity task */
count++;
/* Delete self */
vTaskDelete(NULL);
}
TEST_CASE("Task yield must not happen when scheduler is suspended", "[freertos]")
{
/* Reset yield sequence index */
idx = 0;
/* Reset yield sequence array */
memset((void *)task_yield_sequence, 0, sizeof(task_yield_sequence));
/* Initialize idx lock */
portMUX_INITIALIZE(&idx_lock);
/* Reset task count */
count = 0;
/* Reset task sequence flag */
task_sequence_ready = false;
/* Create test tasks */
xTaskCreatePinnedToCore(test_critical_task1, "test_critical_task1", 2048, (void *)1, UNITY_FREERTOS_PRIORITY - 1, NULL, UNITY_FREERTOS_CPU);
xTaskCreatePinnedToCore(test_critical_task2, "test_critical_task2", 2048, (void *)2, UNITY_FREERTOS_PRIORITY - 1, NULL, UNITY_FREERTOS_CPU);
/* Wait for both the tasks to finish up */
while (count != 2) {
vTaskDelay(10);
}
idx = 0;
/* Verify that test_critical_task1 runs first */
TEST_ASSERT_EQUAL(1, task_yield_sequence[idx++]);
/* Verify that the task yield, when the scheduler is suspended, did not result in a context switch */
TEST_ASSERT_EQUAL(1, task_yield_sequence[idx++]);
/* Verify that test_critical_task2 is scheduled once the scheduler is resumed */
TEST_ASSERT_EQUAL(2, task_yield_sequence[idx++]);
}
/*
* Test yielding behavior when a lower priority task creates a higher priority task
*
* The test performs the following actions:
* - Creates a task with a priority higher than the unity task.
* - Unity task verifies that it yields immediately to the newly created task.
*/
static void high_prio_task(void *arg)
{
uint32_t task_id = (uint32_t)arg;
/* Store task_id in the sequence array */
STORE_TASK_ID(task_id);
/* Increment task count to notify unity task */
count++;
/* Delete self */
vTaskDelete(NULL);
}
TEST_CASE("Task yield must happen when a task creates a higher priority task", "[freertos]")
{
/* Reset yield sequence index */
idx = 0;
/* Reset yield sequence array */
memset((void *)task_yield_sequence, 0, sizeof(task_yield_sequence));
/* Initialize idx lock */
portMUX_INITIALIZE(&idx_lock);
/* Reset task count */
count = 0;
/* Create test task */
xTaskCreatePinnedToCore(high_prio_task, "high_prio_task", 2048, (void *)1, UNITY_FREERTOS_PRIORITY + 1, NULL, UNITY_FREERTOS_CPU);
uint32_t unity_task_id = 2;
/* Store task_id in the sequence array */
STORE_TASK_ID(unity_task_id);
/* Wait for the test task to finish up */
while (count == 0) {
vTaskDelay(10);
}
idx = 0;
/* Verify that the unity task yields as soon as a higher prio task is created */
TEST_ASSERT_EQUAL(1, task_yield_sequence[idx++]);
/* Verify that the unity task_id is stored after the higher priority task runs */
TEST_ASSERT_EQUAL(2, task_yield_sequence[idx++]);
}
/*
* Test yielding behavior when a lower priority task raises the priority of another task
*
* The test performs the following actions:
* - Creates a task with a priority lower than the unity task.
* - Unity task raises the priority of the newly created task.
* - Unity task verifies that it yields once the priority is raised.
*/
static void low_prio_task(void *arg)
{
uint32_t task_id = (uint32_t)arg;
/* Store task_id in the sequence array */
STORE_TASK_ID(task_id);
/* Increment task count to notify unity task */
count++;
/* Delete self */
vTaskDelete(NULL);
}
TEST_CASE("Task yield must happed when a task raises the priority of another priority task", "[freertos]")
{
/* Reset yield sequence index */
idx = 0;
/* Reset yield sequence array */
memset((void *)task_yield_sequence, 0, sizeof(task_yield_sequence));
/* Initialize idx lock */
portMUX_INITIALIZE(&idx_lock);
/* Reset task count */
count = 0;
/* Create test task */
TaskHandle_t task_handle;
xTaskCreatePinnedToCore(low_prio_task, "low_prio_task", 2048, (void *)1, UNITY_FREERTOS_PRIORITY - 1, &task_handle, UNITY_FREERTOS_CPU);
uint32_t unity_task_id = 2;
/* Store task_id in the sequence array */
STORE_TASK_ID(unity_task_id);
/* Raise the priority of the lower priority task */
vTaskPrioritySet(task_handle, UNITY_FREERTOS_PRIORITY + 1);
/* Store unity task_id in the sequence array again */
STORE_TASK_ID(unity_task_id);
/* Wait for the test task to finish up */
while (count == 0) {
vTaskDelay(10);
}
idx = 0;
/* Verify that the unity task does not yield to a lower priority task when it is created */
TEST_ASSERT_EQUAL(2, task_yield_sequence[idx++]);
/* Verify that the unity task_id yielded once the priority of the lower priority task is raised */
TEST_ASSERT_EQUAL(1, task_yield_sequence[idx++]);
/* Verify that the unity task_id is stored again once the test task finishes up */
TEST_ASSERT_EQUAL(2, task_yield_sequence[idx++]);
}
#if (portNUM_PROCESSORS > 1) && !(CONFIG_FREERTOS_UNICORE)
/*
* Test yielding behavior when a task on one core forces an yield on the other core
*
* The test performs the following actions:
* - Creates 2 tasks with the same priority on the core on which unity task is not running.
* - One task spins and does not let the other task run.
* - Force a cross-core yield from the unity task.
* - Verify that the cross-core yield happens and the second task is scheduled to run.
*/
static void other_core_task1(void *arg)
{
uint32_t task_id = (uint32_t)arg;
/* Store task_id in the sequence array */
STORE_TASK_ID(task_id);
while (1) {
vTaskDelay(10);
}
}
static void other_core_task2(void *arg)
{
uint32_t task_id = (uint32_t)arg;
/* Wait for the other task to run for the test to begin */
while (!task_sequence_ready) {
taskYIELD();
};
/* Store task_id in the sequence array */
STORE_TASK_ID(task_id);
/* Increment task count to notify unity task */
count++;
while (1) {
vTaskDelay(10);
}
}
TEST_CASE("Task yield must happen when issued from another core", "[freertos]")
{
TaskHandle_t other_core_taskhandle1;
TaskHandle_t other_core_taskhandle2;
/* Reset yield sequence index */
idx = 0;
/* Reset yield sequence array */
memset((void *)task_yield_sequence, 0, sizeof(task_yield_sequence));
/* Initialize idx lock */
portMUX_INITIALIZE(&idx_lock);
/* Reset task count */
count = 0;
/* Reset task sequence flag */
task_sequence_ready = false;
/* Create test tasks */
xTaskCreatePinnedToCore(other_core_task1, "test_task1", 2048, (void *)1, UNITY_FREERTOS_PRIORITY - 1, &other_core_taskhandle1, !UNITY_FREERTOS_CPU);
xTaskCreatePinnedToCore(other_core_task2, "test_task2", 2048, (void *)2, UNITY_FREERTOS_PRIORITY - 1, &other_core_taskhandle2, !UNITY_FREERTOS_CPU);
/* Wait for everything to be setup */
vTaskDelay(10);
uint32_t idx1 = 0;
/* Verify that other_core_task1 runs first */
TEST_ASSERT_EQUAL(1, task_yield_sequence[idx1++]);
/* Set the task sequence flag once other_core_task1 runs */
task_sequence_ready = true;
/* Force an yield on the other core */
#if CONFIG_FREERTOS_SMP
portYIELD_CORE(!UNITY_FREERTOS_CPU);
#else
vPortYieldOtherCore(!UNITY_FREERTOS_CPU);
#endif
/* Wait for the test task to finish up */
while (count == 0) {
vTaskDelay(10);
}
/* Verify that other_core_task1 yields and other_core_task2 runs */
TEST_ASSERT_EQUAL(2, task_yield_sequence[idx1++]);
/* Cleanup test tasks */
vTaskDelete(other_core_taskhandle1);
vTaskDelete(other_core_taskhandle2);
}
#if !CONFIG_FREERTOS_SMP
static volatile bool yield_triggered = false;
/*
* Test cross-core yielding behavior when the scheduler is suspended
*
* The test performs the following actions:
* - Creates 2 tasks with the same priority on the other core.
* - One task suspends the scheduler.
* - Unity task forces a cross-core yield.
* - Unity task verifies that the yield does not happen until the scheduler is resumed.
*
* Note: This test case is not valid when FreeRTOS SMP is used as the scheduler suspension
* is not per core but across cores and hence the test cannot be executed.
*/
static void other_core_critical_task1(void *arg)
{
uint32_t task_id = (uint32_t)arg;
/* Store task_id in the sequence array */
STORE_TASK_ID(task_id);
/* Suspend scheduler*/
vTaskSuspendAll();
/* Store task_id in the sequence array again.
* No need for a lock when the scheduler is supended.
*/
task_yield_sequence[idx++] = task_id;
/* Set the task sequence flag once other_core_critical_task1 runs */
task_sequence_ready = true;
/* Increment task count to notify unity task */
count++;
while (!yield_triggered) { }
/* Resume scheduler */
xTaskResumeAll();
/* Delete self */
vTaskDelete(NULL);
}
static void other_core_critical_task2(void *arg)
{
uint32_t task_id = (uint32_t)arg;
/* Wait for the other task to run for the test to begin */
while (!task_sequence_ready) {
taskYIELD();
};
/* Store task_id in the sequence array */
STORE_TASK_ID(task_id);
/* Increment task count to notify unity task */
count++;
/* Delete self */
vTaskDelete(NULL);
}
TEST_CASE("Task yield on other core must not happen when scheduler is suspended", "[freertos]")
{
/* Reset yield sequence index */
idx = 0;
/* Reset yield sequence array */
memset((void *)task_yield_sequence, 0, sizeof(task_yield_sequence));
/* Initialize idx lock */
portMUX_INITIALIZE(&idx_lock);
/* Reset task count */
count = 0;
/* Reset task sequence flag */
task_sequence_ready = false;
/* Create test tasks */
xTaskCreatePinnedToCore(other_core_critical_task1, "other_core_critical_task1", 2048, (void *)1, UNITY_FREERTOS_PRIORITY - 1, NULL, !UNITY_FREERTOS_CPU);
xTaskCreatePinnedToCore(other_core_critical_task2, "other_core_critical_task2", 2048, (void *)2, UNITY_FREERTOS_PRIORITY - 1, NULL, !UNITY_FREERTOS_CPU);
/* Wait for at least one of the tasks to finish up */
while (count == 0) {
vTaskDelay(10);
}
/* Force an yield on the other core */
vPortYieldOtherCore(!UNITY_FREERTOS_CPU);
/* Set yield triggered flag */
yield_triggered = true;
uint32_t idx1 = 0;
/* Verify that the first task runs */
TEST_ASSERT_EQUAL(1, task_yield_sequence[idx1++]);
/* Verify that the task yield when the scheduler is suspended did not result in a context switch */
TEST_ASSERT_EQUAL(1, task_yield_sequence[idx1++]);
/* Wait for the second task to finish up */
while (count != 2) {
vTaskDelay(10);
}
/* Verify that the second task is scheduled once the critical section is over */
TEST_ASSERT_EQUAL(2, task_yield_sequence[idx1++]);
}
#endif // !CONFIG_FREERTOS_SMP
#endif // (portNUM_PROCESSORS > 1) && !(CONFIG_FREERTOS_UNICORE)

View File

@@ -0,0 +1,92 @@
/*
* SPDX-FileCopyrightText: 2022 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
/* FreeRTOS timer tests
*/
#include <stdio.h>
#include "unity.h"
#include "freertos/FreeRTOS.h"
#include "freertos/task.h"
#include "freertos/timers.h"
#include "test_utils.h"
static void timer_callback(TimerHandle_t timer)
{
volatile int *count;
count = (volatile int *)pvTimerGetTimerID( timer );
(*count)++;
printf("Callback timer %p count %p = %d\n", timer, count, *count);
}
TEST_CASE("Oneshot FreeRTOS timers", "[freertos]")
{
volatile int count = 0;
TimerHandle_t oneshot = xTimerCreate("oneshot", 100 / portTICK_PERIOD_MS, pdFALSE,
(void *)&count, timer_callback);
TEST_ASSERT(oneshot);
TEST_ASSERT_EQUAL(pdFALSE, xTimerIsTimerActive(oneshot));
TEST_ASSERT_EQUAL(0, count);
TEST_ASSERT( xTimerStart(oneshot, 1) );
vTaskDelay(2); /* give the timer task a chance to process the message */
TEST_ASSERT_EQUAL(pdTRUE, xTimerIsTimerActive(oneshot));
TEST_ASSERT_EQUAL(0, count);
vTaskDelay(250 / portTICK_PERIOD_MS); // 2.5 timer periods
TEST_ASSERT_EQUAL(1, count);
TEST_ASSERT_EQUAL(pdFALSE, xTimerIsTimerActive(oneshot));
TEST_ASSERT( xTimerDelete(oneshot, 1) );
}
TEST_CASE("Recurring FreeRTOS timers", "[freertos]")
{
volatile int count = 0;
TimerHandle_t recurring = xTimerCreate("oneshot", 100 / portTICK_PERIOD_MS, pdTRUE,
(void *)&count, timer_callback);
TEST_ASSERT(recurring);
TEST_ASSERT_EQUAL(pdFALSE, xTimerIsTimerActive(recurring));
TEST_ASSERT_EQUAL(0, count);
TEST_ASSERT( xTimerStart(recurring, 1) );
vTaskDelay(2); // let timer task process the queue
TEST_ASSERT_EQUAL(pdTRUE, xTimerIsTimerActive(recurring));
TEST_ASSERT_EQUAL(0, count);
vTaskDelay(250 / portTICK_PERIOD_MS); // 2.5 timer periods
TEST_ASSERT_EQUAL(2, count);
TEST_ASSERT_EQUAL(pdTRUE, xTimerIsTimerActive(recurring));
TEST_ASSERT( xTimerStop(recurring, 1) );
TEST_ASSERT_EQUAL(2, count);
vTaskDelay(100 / portTICK_PERIOD_MS); // One more timer period
TEST_ASSERT_EQUAL(2, count); // hasn't gone up
TEST_ASSERT_EQUAL(pdFALSE, xTimerIsTimerActive(recurring));
TEST_ASSERT( xTimerDelete(recurring, 1) );
}
TEST_CASE("Static timer creation", "[freertos]")
{
StaticTimer_t static_timer;
TimerHandle_t created_timer;
volatile int count = 0;
created_timer = xTimerCreateStatic("oneshot", 100 / portTICK_PERIOD_MS,
pdTRUE,
(void *)&count,
timer_callback,
&static_timer);
TEST_ASSERT_NOT_NULL(created_timer);
}