pthread: migrate unit tests to pytest test app

This commit is contained in:
Marius Vikhammer
2022-09-16 15:38:27 +08:00
parent 5aeba87e51
commit 46c092c04a
40 changed files with 167 additions and 41 deletions

View File

@@ -0,0 +1,8 @@
# The following lines of boilerplate have to be in your project's
# CMakeLists in this exact order for cmake to work correctly
cmake_minimum_required(VERSION 3.16)
set(EXTRA_COMPONENT_DIRS "$ENV{IDF_PATH}/tools/unit-test-app/components")
include($ENV{IDF_PATH}/tools/cmake/project.cmake)
project(test_pthread)

View File

@@ -0,0 +1,2 @@
| Supported Targets | ESP32 | ESP32-C2 | ESP32-C3 | ESP32-S2 | ESP32-S3 |
| ----------------- | ----- | -------- | -------- | -------- | -------- |

View File

@@ -0,0 +1,11 @@
set(sources "test_app_main.c"
"test_pthread.c"
"test_pthread_cond_var.c"
"test_pthread_local_storage.c"
"test_pthread_cxx.cpp"
"test_pthread_rwlock.c")
idf_component_register(SRCS ${sources}
INCLUDE_DIRS "."
REQUIRES pthread esp_timer test_utils
WHOLE_ARCHIVE)

View File

@@ -0,0 +1,45 @@
/*
* SPDX-FileCopyrightText: 2022 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#include "unity.h"
#include "unity_test_runner.h"
#include "esp_heap_caps.h"
// Some resources are lazy allocated (e.g. newlib locks), the threshold is left for that case
#define TEST_MEMORY_LEAK_THRESHOLD (-200)
static size_t before_free_8bit;
static size_t before_free_32bit;
static void check_leak(size_t before_free, size_t after_free, const char *type)
{
ssize_t delta = after_free - before_free;
printf("MALLOC_CAP_%s: Before %u bytes free, After %u bytes free (delta %d)\n", type, before_free, after_free, delta);
TEST_ASSERT_MESSAGE(delta >= TEST_MEMORY_LEAK_THRESHOLD, "memory leak");
}
void setUp(void)
{
before_free_8bit = heap_caps_get_free_size(MALLOC_CAP_8BIT);
before_free_32bit = heap_caps_get_free_size(MALLOC_CAP_32BIT);
}
void tearDown(void)
{
size_t after_free_8bit = heap_caps_get_free_size(MALLOC_CAP_8BIT);
size_t after_free_32bit = heap_caps_get_free_size(MALLOC_CAP_32BIT);
check_leak(before_free_8bit, after_free_8bit, "8BIT");
check_leak(before_free_32bit, after_free_32bit, "32BIT");
}
void app_main(void)
{
printf("Running pthread component unity tests\n");
unity_run_menu();
}

View File

@@ -0,0 +1,103 @@
/*
* SPDX-FileCopyrightText: 2022 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Unlicense OR CC0-1.0
*/
#include <iostream>
#include <thread>
#include <condition_variable>
#include <chrono>
#include <mutex>
#include <atomic>
#include <unistd.h>
#include "freertos/FreeRTOS.h"
#include "unity.h"
#if __GTHREADS && __GTHREADS_CXX0X
static std::condition_variable cv;
static std::mutex cv_m;
static std::atomic<int> i{0};
static void waits(int idx, int timeout_ms)
{
std::unique_lock<std::mutex> lk(cv_m);
auto now = std::chrono::system_clock::now();
if(cv.wait_until(lk, now + std::chrono::milliseconds(timeout_ms), [](){return i == 1;}))
std::cout << "Thread " << idx << " finished waiting. i == " << i << '\n';
else
std::cout << "Thread " << idx << " timed out. i == " << i << '\n';
}
static void signals(int signal_ms)
{
std::this_thread::sleep_for(std::chrono::milliseconds(signal_ms));
std::cout << "Notifying...\n";
cv.notify_all();
std::this_thread::sleep_for(std::chrono::milliseconds(signal_ms));
i = 1;
std::cout << "Notifying again...\n";
cv.notify_all();
}
TEST_CASE("C++ condition_variable", "[std::condition_variable]")
{
i = 0;
std::thread t1(waits, 1, 100), t2(waits, 2, 800), t3(signals, 200);
t1.join();
t2.join();
t3.join();
std::cout << "All threads joined\n";
}
TEST_CASE("cxx: condition_variable can timeout", "[cxx]")
{
std::condition_variable cv;
std::mutex mtx;
std::unique_lock<std::mutex> lck(mtx);
srand(99);
for (int i = 0; i < 10; ++i) {
usleep(rand() % 1000);
auto status = cv.wait_for(lck, std::chrono::milliseconds(200));
TEST_ASSERT_EQUAL(std::cv_status::timeout, status);
}
}
TEST_CASE("cxx: condition_variable timeout never before deadline", "[cxx]")
{
using SysClock = std::chrono::system_clock;
std::mutex mutex;
std::condition_variable cond;
std::unique_lock<std::mutex> lock(mutex);
for (int i = 0; i < 25; ++i) {
auto timeout = std::chrono::milliseconds(portTICK_PERIOD_MS * (i+1));
auto deadline = SysClock::now() + timeout;
auto secs = std::chrono::time_point_cast<std::chrono::seconds>(deadline);
auto nsecs = std::chrono::duration_cast<std::chrono::nanoseconds>
(deadline - secs);
struct timespec ts = {
.tv_sec = static_cast<time_t>(secs.time_since_epoch().count()),
.tv_nsec = static_cast<long>(nsecs.count())};
int rc = ::pthread_cond_timedwait(cond.native_handle(),
lock.mutex()->native_handle(), &ts);
auto status = (rc == ETIMEDOUT) ? std::cv_status::timeout :
std::cv_status::no_timeout;
auto end = SysClock::now();
auto extra = end - deadline;
auto extra_us = extra / std::chrono::microseconds(1);
printf("timeout %lldms Extra time: %lldus, status: %s\n", timeout.count(), extra_us,
(status == std::cv_status::timeout) ? "timeout" : "no timeout");
// The timed wait should always return at least 1us after the timeout deadline
TEST_ASSERT_GREATER_THAN(0, extra_us);
}
}
#endif // __GTHREADS && __GTHREADS_CXX0X

View File

@@ -0,0 +1,35 @@
/*
* SPDX-FileCopyrightText: 2022 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Unlicense OR CC0-1.0
*/
#include <iostream>
#include <future>
#include <thread>
#include "unity.h"
#if __GTHREADS && __GTHREADS_CXX0X
TEST_CASE("C++ future", "[std::future]")
{
// future from a packaged_task
std::packaged_task<int()> task([]{ return 7; }); // wrap the function
std::future<int> f1 = task.get_future(); // get a future
std::thread t(std::move(task)); // launch on a thread
// future from an async()
std::future<int> f2 = std::async(std::launch::async, []{ return 8; });
// future from a promise
std::promise<int> p;
std::future<int> f3 = p.get_future();
std::thread( [&p]{ p.set_value_at_thread_exit(9); }).detach();
std::cout << "Waiting..." << std::flush;
f1.wait();
f2.wait();
f3.wait();
std::cout << "Done!\nResults are: "
<< f1.get() << ' ' << f2.get() << ' ' << f3.get() << '\n';
t.join();
}
#endif

View File

@@ -0,0 +1,300 @@
/*
* SPDX-FileCopyrightText: 2022 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Unlicense OR CC0-1.0
*/
#include <errno.h>
#include "freertos/FreeRTOS.h"
#include "freertos/task.h"
#include "esp_pthread.h"
#include <pthread.h>
#include "unity.h"
static void *compute_square(void *arg)
{
int *num = (int *) arg;
*num = (*num) * (*num);
vTaskDelay(2); // ensure the test task has time to continue execution
pthread_exit((void *) num);
return NULL;
}
TEST_CASE("pthread create join", "[pthread]")
{
int res = 0;
volatile int num = 7;
volatile bool attr_init = false;
void *thread_rval = NULL;
pthread_t new_thread = (pthread_t)NULL;
pthread_attr_t attr;
if (TEST_PROTECT()) {
res = pthread_attr_init(&attr);
TEST_ASSERT_EQUAL_INT(0, res);
attr_init = true;
res = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
TEST_ASSERT_EQUAL_INT(0, res);
res = pthread_create(&new_thread, &attr, compute_square, (void *) &num);
TEST_ASSERT_EQUAL_INT(0, res);
res = pthread_join(new_thread, &thread_rval);
TEST_ASSERT_EQUAL_INT(EDEADLK, res);
vTaskDelay(100 / portTICK_PERIOD_MS);
TEST_ASSERT_EQUAL_INT(49, num);
res = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE);
TEST_ASSERT_EQUAL_INT(0, res);
res = pthread_create(&new_thread, &attr, compute_square, (void *) &num);
TEST_ASSERT_EQUAL_INT(0, res);
res = pthread_join(new_thread, &thread_rval);
TEST_ASSERT_EQUAL_INT(0, res);
TEST_ASSERT_EQUAL_INT(2401, num);
TEST_ASSERT_EQUAL_PTR(&num, thread_rval);
}
if (attr_init) {
pthread_attr_destroy(&attr);
}
}
static void *waiting_thread(void *arg)
{
TaskHandle_t *task_handle = (TaskHandle_t *)arg;
TaskHandle_t parent_task = *task_handle;
*task_handle = xTaskGetCurrentTaskHandle();
xTaskNotify(parent_task, 0, eNoAction);
xTaskNotifyWait(0, 0, NULL, portMAX_DELAY);
return NULL;
}
TEST_CASE("pthread detach", "[pthread]")
{
int res = 0;
pthread_t new_thread = (pthread_t)NULL;
TaskHandle_t task_handle = NULL;
const int task_count = uxTaskGetNumberOfTasks();
bool detach_works = false;
if (TEST_PROTECT()) {
task_handle = xTaskGetCurrentTaskHandle();
res = pthread_create(&new_thread, NULL, waiting_thread, (void *)&task_handle);
TEST_ASSERT_EQUAL_INT(0, res);
res = xTaskNotifyWait(0, 0, NULL, 100 / portTICK_PERIOD_MS);
TEST_ASSERT_EQUAL_INT(pdTRUE, res);
xTaskNotify(task_handle, 0, eNoAction);
vTaskDelay(100 / portTICK_PERIOD_MS);
res = pthread_detach(new_thread);
TEST_ASSERT_EQUAL_INT(0, res);
res = uxTaskGetNumberOfTasks();
TEST_ASSERT_EQUAL_INT(task_count, res);
detach_works = true;
}
if (!detach_works) {
vTaskDelete(task_handle);
} else {
detach_works = false;
}
if (TEST_PROTECT()) {
task_handle = xTaskGetCurrentTaskHandle();
res = pthread_create(&new_thread, NULL, waiting_thread, (void *)&task_handle);
TEST_ASSERT_EQUAL_INT(0, res);
res = xTaskNotifyWait(0, 0, NULL, 100 / portTICK_PERIOD_MS);
TEST_ASSERT_EQUAL_INT(pdTRUE, res);
res = pthread_detach(new_thread);
TEST_ASSERT_EQUAL_INT(0, res);
xTaskNotify(task_handle, 0, eNoAction);
vTaskDelay(100 / portTICK_PERIOD_MS);
res = uxTaskGetNumberOfTasks();
TEST_ASSERT_EQUAL_INT(task_count, res);
detach_works = true;
}
if (!detach_works) {
vTaskDelete(task_handle);
}
}
TEST_CASE("pthread attr init destroy", "[pthread]")
{
int res = 0;
size_t stack_size_1 = 0, stack_size_2 = 0;
volatile bool attr_init = pdFALSE;
pthread_attr_t attr;
if (TEST_PROTECT()) {
res = pthread_attr_init(&attr);
TEST_ASSERT_EQUAL_INT(0, res);
attr_init = true;
res = pthread_attr_getstacksize(&attr, &stack_size_1);
TEST_ASSERT_EQUAL_INT(0, res);
res = pthread_attr_setstacksize(&attr, stack_size_1);
TEST_ASSERT_EQUAL_INT(0, res);
res = pthread_attr_getstacksize(&attr, &stack_size_2);
TEST_ASSERT_EQUAL_INT(0, res);
TEST_ASSERT_EQUAL_INT(stack_size_2, stack_size_1);
stack_size_1 = PTHREAD_STACK_MIN - 1;
res = pthread_attr_setstacksize(&attr, stack_size_1);
TEST_ASSERT_EQUAL_INT(EINVAL, res);
}
if (attr_init) {
TEST_ASSERT_EQUAL_INT(0, pthread_attr_destroy(&attr));
}
}
static void *unlock_mutex(void *arg)
{
pthread_mutex_t *mutex = (pthread_mutex_t *) arg;
intptr_t res = (intptr_t) pthread_mutex_unlock(mutex);
pthread_exit((void *) res);
return NULL;
}
static void test_mutex_lock_unlock(int mutex_type)
{
int res = 0;
int set_type = -1;
volatile bool attr_created = false;
volatile bool mutex_created = false;
volatile intptr_t thread_rval = 0;
pthread_mutex_t mutex;
pthread_mutexattr_t attr;
pthread_t new_thread;
if (TEST_PROTECT()) {
res = pthread_mutexattr_init(&attr);
TEST_ASSERT_EQUAL_INT(0, res);
attr_created = true;
res = pthread_mutexattr_settype(&attr, mutex_type);
TEST_ASSERT_EQUAL_INT(0, res);
res = pthread_mutexattr_gettype(&attr, &set_type);
TEST_ASSERT_EQUAL_INT(0, res);
TEST_ASSERT_EQUAL_INT(mutex_type, set_type);
res = pthread_mutex_init(&mutex, &attr);
TEST_ASSERT_EQUAL_INT(0, res);
mutex_created = true;
res = pthread_mutex_lock(&mutex);
TEST_ASSERT_EQUAL_INT(0, res);
res = pthread_mutex_lock(&mutex);
if(mutex_type == PTHREAD_MUTEX_ERRORCHECK) {
TEST_ASSERT_EQUAL_INT(EDEADLK, res);
} else {
TEST_ASSERT_EQUAL_INT(0, res);
res = pthread_mutex_unlock(&mutex);
TEST_ASSERT_EQUAL_INT(0, res);
}
pthread_create(&new_thread, NULL, unlock_mutex, &mutex);
pthread_join(new_thread, (void **) &thread_rval);
TEST_ASSERT_EQUAL_INT(EPERM, (int) thread_rval);
res = pthread_mutex_unlock(&mutex);
TEST_ASSERT_EQUAL_INT(0, res);
}
if (attr_created) {
pthread_mutexattr_destroy(&attr);
}
if (mutex_created) {
pthread_mutex_destroy(&mutex);
}
}
TEST_CASE("pthread mutex lock unlock", "[pthread]")
{
int res = 0;
/* Present behavior of mutex initializer is unlike what is
* defined in Posix standard, ie. calling pthread_mutex_lock
* on such a mutex would internally cause dynamic allocation.
* Therefore pthread_mutex_destroy needs to be called in
* order to avoid memory leak. */
pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
res = pthread_mutex_lock(&mutex);
TEST_ASSERT_EQUAL_INT(0, res);
res = pthread_mutex_unlock(&mutex);
TEST_ASSERT_EQUAL_INT(0, res);
/* This deviates from the Posix standard static mutex behavior.
* This needs to be removed in the future when standard mutex
* initializer is supported */
pthread_mutex_destroy(&mutex);
test_mutex_lock_unlock(PTHREAD_MUTEX_ERRORCHECK);
test_mutex_lock_unlock(PTHREAD_MUTEX_RECURSIVE);
}
static void timespec_add_nano(struct timespec * out, struct timespec * in, long val)
{
out->tv_nsec = val + in->tv_nsec;
if (out->tv_nsec < (in->tv_nsec)) {
out->tv_sec += 1;
}
}
TEST_CASE("pthread mutex trylock timedlock", "[pthread]")
{
int res = 0;
volatile bool mutex_created = false;
pthread_mutex_t mutex;
struct timespec abs_timeout;
if (TEST_PROTECT()) {
res = pthread_mutex_init(&mutex, NULL);
TEST_ASSERT_EQUAL_INT(0, res);
mutex_created = true;
res = pthread_mutex_trylock(&mutex);
TEST_ASSERT_EQUAL_INT(0, res);
res = pthread_mutex_trylock(&mutex);
TEST_ASSERT_EQUAL_INT(EBUSY, res);
clock_gettime(CLOCK_REALTIME, &abs_timeout);
timespec_add_nano(&abs_timeout, &abs_timeout, 100000000LL);
res = pthread_mutex_timedlock(&mutex, &abs_timeout);
TEST_ASSERT_EQUAL_INT(ETIMEDOUT, res);
res = pthread_mutex_unlock(&mutex);
TEST_ASSERT_EQUAL_INT(0, res);
}
if (mutex_created) {
pthread_mutex_destroy(&mutex);
}
}

View File

@@ -0,0 +1,120 @@
/*
* SPDX-FileCopyrightText: 2022 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Unlicense OR CC0-1.0
*/
#include <pthread.h>
#include "unity.h"
typedef struct {
pthread_cond_t *cond;
pthread_mutex_t *mutex;
unsigned delay_ms;
} thread_args_t;
static void *thread_signals(void *arg)
{
const thread_args_t *targs = (thread_args_t *)arg;
int r;
r = pthread_mutex_lock(targs->mutex);
TEST_ASSERT_EQUAL_INT(0, r);
r = pthread_cond_signal(targs->cond);
TEST_ASSERT_EQUAL_INT(0, r);
r = pthread_mutex_unlock(targs->mutex);
TEST_ASSERT_EQUAL_INT(0, r);
usleep(targs->delay_ms * 1000);
r = pthread_mutex_lock(targs->mutex);
TEST_ASSERT_EQUAL_INT(0, r);
r = pthread_cond_broadcast(targs->cond);
TEST_ASSERT_EQUAL_INT(0, r);
r = pthread_mutex_unlock(targs->mutex);
TEST_ASSERT_EQUAL_INT(0, r);
return NULL;
}
static void *thread_waits(void *arg)
{
const thread_args_t *targs = (thread_args_t *)arg;
int r;
r = pthread_mutex_lock(targs->mutex);
TEST_ASSERT_EQUAL_INT(0, r);
r = pthread_cond_wait(targs->cond, targs->mutex);
TEST_ASSERT_EQUAL_INT(0, r);
r = pthread_mutex_unlock(targs->mutex);
TEST_ASSERT_EQUAL_INT(0, r);
usleep(targs->delay_ms * 1000);
r = pthread_mutex_lock(targs->mutex);
TEST_ASSERT_EQUAL_INT(0, r);
struct timespec two_seconds;
clock_gettime(CLOCK_REALTIME, &two_seconds);
two_seconds.tv_sec += 2;
r = pthread_cond_timedwait(targs->cond, targs->mutex, &two_seconds);
TEST_ASSERT_EQUAL_INT(0, r);
r = pthread_mutex_unlock(targs->mutex);
TEST_ASSERT_EQUAL_INT(0, r);
return NULL;
}
#define NUM_THREADS 3
TEST_CASE("pthread cond wait", "[pthread]")
{
int r;
pthread_cond_t cond = PTHREAD_COND_INITIALIZER;
pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
struct {
thread_args_t args;
pthread_t thread;
} wait[NUM_THREADS];
struct {
thread_args_t args;
pthread_t thread;
} signal[NUM_THREADS];
wait[0].args.delay_ms = 50;
wait[1].args.delay_ms = 100;
wait[2].args.delay_ms = 200;
signal[0].args.delay_ms = 30;
signal[1].args.delay_ms = 150;
signal[2].args.delay_ms = 500; // highest delay, ensure that broadcast will be received by all waiter threads
for (int i = 0; i < NUM_THREADS; i++) {
wait[i].args.cond = &cond;
wait[i].args.mutex = &mutex;
signal[i].args.cond = &cond;
signal[i].args.mutex = &mutex;
r = pthread_create(&signal[i].thread, NULL, thread_signals, &signal[i].args);
TEST_ASSERT_EQUAL_INT(0, r);
r = pthread_create(&wait[i].thread, NULL, thread_waits, &wait[i].args);
TEST_ASSERT_EQUAL_INT(0, r);
}
for (int i = 0; i < NUM_THREADS; i++) {
r = pthread_join(signal[i].thread, NULL);
TEST_ASSERT_EQUAL_INT(0, r);
pthread_join(wait[i].thread, NULL);
TEST_ASSERT_EQUAL_INT(0, r);
}
pthread_mutex_destroy(&cond);
pthread_mutex_destroy(&mutex);
}

View File

@@ -0,0 +1,147 @@
/*
* SPDX-FileCopyrightText: 2022 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Unlicense OR CC0-1.0
*/
#include <iostream>
#include <sstream>
#include <thread>
#include <mutex>
#include <memory>
#include "freertos/FreeRTOS.h"
#include "freertos/task.h"
#include "unity.h"
#include "test_utils.h"
#if __GTHREADS && __GTHREADS_CXX0X
#include "esp_log.h"
const static __attribute__((unused)) char *TAG = "pthread_test";
static std::mutex mtx;
static std::shared_ptr<int> global_sp_mtx; // protected by mux
static std::recursive_mutex recur_mtx;
static std::shared_ptr<int> global_sp_recur_mtx; // protected by recursive mux
static void thread_do_nothing() {}
static void thread_main()
{
std::cout << "thread_main CXX " << std::hex << std::this_thread::get_id() << std::endl;
std::chrono::milliseconds dur = std::chrono::milliseconds(10);
for (int i = 0; i < 10; i++) {
for (int j = 0; j < 10; j++) {
int old_val, new_val;
// mux test
mtx.lock();
old_val = *global_sp_mtx;
std::this_thread::yield();
(*global_sp_mtx)++;
std::this_thread::yield();
new_val = *global_sp_mtx;
mtx.unlock();
std::cout << "thread " << std::hex << std::this_thread::get_id() << ": nrec " << i << " val= " << *global_sp_mtx << std::endl;
TEST_ASSERT_EQUAL(old_val + 1, new_val);
// sleep_for test
std::this_thread::sleep_for(dur);
// recursive mux test
recur_mtx.lock();
recur_mtx.lock();
old_val = *global_sp_recur_mtx;
std::this_thread::yield();
(*global_sp_recur_mtx)++;
std::this_thread::yield();
new_val = *global_sp_recur_mtx;
recur_mtx.unlock();
recur_mtx.unlock();
std::cout << "thread " << std::hex << std::this_thread::get_id() << ": rec " << i << " val= " << *global_sp_recur_mtx << std::endl;
TEST_ASSERT_EQUAL(old_val + 1, new_val);
}
// sleep_until test
using std::chrono::system_clock;
std::time_t tt = system_clock::to_time_t(system_clock::now());
struct std::tm *ptm = std::localtime(&tt);
ptm->tm_sec++;
std::this_thread::sleep_until(system_clock::from_time_t(mktime(ptm)));
}
}
TEST_CASE("pthread C++", "[pthread]")
{
global_sp_mtx.reset(new int(1));
global_sp_recur_mtx.reset(new int(-1000));
std::thread t1(thread_do_nothing);
t1.join();
std::thread t2(thread_main);
std::cout << "Detach thread " << std::hex << t2.get_id() << std::endl;
t2.detach();
TEST_ASSERT_FALSE(t2.joinable());
std::thread t3(thread_main);
std::thread t4(thread_main);
TEST_ASSERT(t3.joinable());
TEST_ASSERT(t4.joinable());
std::cout << "Join thread " << std::hex << t3.get_id() << std::endl;
t3.join();
std::cout << "Join thread " << std::hex << t4.get_id() << std::endl;
t4.join();
// we don't know if/when t2 has finished, so delay another 2s before
// deleting the common mutexes
std::this_thread::sleep_for(std::chrono::seconds(2));
global_sp_mtx.reset(); // avoid reported leak
global_sp_recur_mtx.reset();
}
static void task_test_sandbox()
{
std::stringstream ss;
ESP_LOGI(TAG, "About to create a string stream");
ESP_LOGI(TAG, "About to write to string stream");
ss << "Hello World!";
ESP_LOGI(TAG, "About to extract from stringstream");
ESP_LOGI(TAG, "Text: %s", ss.str().c_str());
}
static void task_test_sandbox_c(void *arg)
{
bool *running = (bool *)arg;
// wrap thread func to ensure that all C++ stack objects are cleaned up by their destructors
task_test_sandbox();
ESP_LOGI(TAG, "Task stk_wm = %d", uxTaskGetStackHighWaterMark(NULL));
if (running) {
*running = false;
vTaskDelete(NULL);
}
}
TEST_CASE("pthread mix C/C++", "[pthread]")
{
bool c_running = true;
std::thread t1(task_test_sandbox);
xTaskCreatePinnedToCore((TaskFunction_t)&task_test_sandbox_c, "task_test_sandbox", 3072, &c_running, 5, NULL, 0);
while (c_running) {
vTaskDelay(1);
}
if (t1.joinable()) {
std::cout << "Join thread " << std::hex << t1.get_id() << std::endl;
t1.join();
}
/* Short delay to allow cleanup, avoid leaks */
vTaskDelay(10);
}
#endif

View File

@@ -0,0 +1,258 @@
/*
* SPDX-FileCopyrightText: 2022 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Unlicense OR CC0-1.0
*/
// Test pthread_create_key, pthread_delete_key, pthread_setspecific, pthread_getspecific
#include <pthread.h>
#include <inttypes.h>
#include "unity.h"
#include "freertos/FreeRTOS.h"
#include "freertos/task.h"
#include "test_utils.h"
#include "esp_random.h"
TEST_CASE("pthread local storage basics", "[pthread]")
{
pthread_key_t key;
TEST_ASSERT_EQUAL(0, pthread_key_create(&key, NULL));
TEST_ASSERT_NULL(pthread_getspecific(key));
int val = 3;
printf("Setting to %p...\n", &val);
TEST_ASSERT_EQUAL(0, pthread_setspecific(key, &val));
printf("Reading back...\n");
TEST_ASSERT_EQUAL_PTR(&val, pthread_getspecific(key));
printf("Setting to NULL...\n");
TEST_ASSERT_EQUAL(0, pthread_setspecific(key, NULL));
printf("Reading back...\n");
TEST_ASSERT_NULL(pthread_getspecific(key));
TEST_ASSERT_EQUAL(0, pthread_key_delete(key));
}
TEST_CASE("pthread local storage unique keys", "[pthread]")
{
const int NUM_KEYS = 10;
pthread_key_t keys[NUM_KEYS];
for (int i = 0; i < NUM_KEYS; i++) {
TEST_ASSERT_EQUAL(0, pthread_key_create(&keys[i], NULL));
printf("New key %d = %"PRIu32"\n", i, keys[i]);
}
for (int i = 0; i < NUM_KEYS; i++) {
for (int j = 0; j < NUM_KEYS; j++) {
if (i != j) {
TEST_ASSERT_NOT_EQUAL(keys[i], keys[j]);
}
}
}
for (int i = 0; i < NUM_KEYS; i++) {
TEST_ASSERT_EQUAL(0, pthread_key_delete(keys[i]));
}
}
static void test_pthread_destructor(void *);
static void *expected_destructor_ptr;
static void *actual_destructor_ptr;
static void *thread_test_pthread_destructor(void *);
TEST_CASE("pthread local storage destructor", "[pthread]")
{
pthread_t thread;
pthread_key_t key = -1;
expected_destructor_ptr = NULL;
actual_destructor_ptr = NULL;
TEST_ASSERT_EQUAL(0, pthread_key_create(&key, test_pthread_destructor));
TEST_ASSERT_EQUAL(0, pthread_create(&thread, NULL, thread_test_pthread_destructor, (void *)key));
TEST_ASSERT_EQUAL(0, pthread_join(thread, NULL));
printf("Joined...\n");
TEST_ASSERT_NOT_NULL(expected_destructor_ptr);
TEST_ASSERT_NOT_NULL(actual_destructor_ptr);
TEST_ASSERT_EQUAL_PTR(expected_destructor_ptr, actual_destructor_ptr);
TEST_ASSERT_EQUAL(0, pthread_key_delete(key));
}
static void task_test_pthread_destructor(void *v_key);
TEST_CASE("pthread local storage destructor in FreeRTOS task", "[pthread]")
{
// Same as previous test case, but doesn't use pthread APIs therefore must wait
// for the idle task to call the destructor
pthread_key_t key = -1;
expected_destructor_ptr = NULL;
actual_destructor_ptr = NULL;
TEST_ASSERT_EQUAL(0, pthread_key_create(&key, test_pthread_destructor));
xTaskCreate(task_test_pthread_destructor,
"ptdest", 8192, (void *)key, UNITY_FREERTOS_PRIORITY+1,
NULL);
// Above task has higher priority to us, so should run immediately
// but we need to wait for the idle task cleanup to run
vTaskDelay(20);
TEST_ASSERT_NOT_NULL(expected_destructor_ptr);
TEST_ASSERT_NOT_NULL(actual_destructor_ptr);
TEST_ASSERT_EQUAL_PTR(expected_destructor_ptr, actual_destructor_ptr);
TEST_ASSERT_EQUAL(0, pthread_key_delete(key));
}
static void *thread_test_pthread_destructor(void *v_key)
{
printf("Local storage thread running...\n");
pthread_key_t key = (pthread_key_t) v_key;
expected_destructor_ptr = &key; // address of stack variable in the task...
pthread_setspecific(key, expected_destructor_ptr);
printf("Local storage thread done.\n");
return NULL;
}
static void test_pthread_destructor(void *value)
{
actual_destructor_ptr = value;
}
static void task_test_pthread_destructor(void *v_key)
{
/* call the pthread main routine, then delete ourselves... */
thread_test_pthread_destructor(v_key);
vTaskDelete(NULL);
}
#define STRESS_NUMITER 2000000
#define STRESS_NUMTASKS 16
static void *thread_stress_test(void *v_key)
{
pthread_key_t key = (pthread_key_t) v_key;
void *tls_value = (void *)esp_random();
pthread_setspecific(key, tls_value);
for(int i = 0; i < STRESS_NUMITER; i++) {
TEST_ASSERT_EQUAL_HEX32(pthread_getspecific(key), tls_value);
}
return NULL;
}
// This test case added to reproduce issues with unpinned tasks and TLS
TEST_CASE("pthread local storage stress test", "[pthread]")
{
pthread_key_t key = -1;
pthread_t threads[STRESS_NUMTASKS] = { 0 };
TEST_ASSERT_EQUAL(0, pthread_key_create(&key, test_pthread_destructor));
for (int i = 0; i < STRESS_NUMTASKS; i++) {
TEST_ASSERT_EQUAL(0, pthread_create(&threads[i], NULL, thread_stress_test, (void *)key));
}
for (int i = 0; i < STRESS_NUMTASKS; i++) {
TEST_ASSERT_EQUAL(0, pthread_join(threads[i], NULL));
}
}
#define NUM_KEYS 4 // number of keys used in repeat destructor test
#define NUM_REPEATS 17 // number of times we re-set a key to a non-NULL value to re-trigger destructor
typedef struct {
pthread_key_t keys[NUM_KEYS]; // pthread local storage keys used in test
unsigned count; // number of times the destructor has been called
int last_idx; // index of last key where destructor was called
} destr_test_state_t;
static void s_test_repeat_destructor(void *vp_state);
static void *s_test_repeat_destructor_thread(void *vp_state);
// Test the correct behaviour of a pthread destructor function that uses
// pthread_setspecific() to set another value when it runs, and also
//
// As described in https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_key_create.html
TEST_CASE("pthread local storage 'repeat' destructor test", "[pthread]")
{
int r;
destr_test_state_t state = { .last_idx = -1 };
pthread_t thread;
for (int i = 0; i < NUM_KEYS; i++) {
r = pthread_key_create(&state.keys[i], s_test_repeat_destructor);
TEST_ASSERT_EQUAL(0, r);
}
r = pthread_create(&thread, NULL, s_test_repeat_destructor_thread, &state);
TEST_ASSERT_EQUAL(0, r);
r = pthread_join(thread, NULL);
TEST_ASSERT_EQUAL(0 ,r);
// Cheating here to make sure compiler reads the value of 'count' from memory not from a register
//
// We expect the destructor was called NUM_REPEATS times when it repeated, then NUM_KEYS times when it didn't
TEST_ASSERT_EQUAL(NUM_REPEATS + NUM_KEYS, ((volatile destr_test_state_t)state).count);
// cleanup
for (int i = 0; i < NUM_KEYS; i++) {
r = pthread_key_delete(state.keys[i]);
TEST_ASSERT_EQUAL(0, r);
}
}
static void s_test_repeat_destructor(void *vp_state)
{
destr_test_state_t *state = vp_state;
state->count++;
printf("Destructor! Arg %p Count %d\n", state, state->count);
if (state->count > NUM_REPEATS) {
return; // Stop replacing values after NUM_REPEATS destructors have been called, they will be NULLed out now
}
// Find the key which has a NULL value, this is the key for this destructor. We will set it back to 'state' to repeat later.
// At this point only one key should have a NULL value
int null_idx = -1;
for (int i = 0; i < NUM_KEYS; i++) {
if (pthread_getspecific(state->keys[i]) == NULL) {
TEST_ASSERT_EQUAL(-1, null_idx); // If more than one key has a NULL value, something has gone wrong
null_idx = i;
// don't break, verify the other keys have non-NULL values
}
}
TEST_ASSERT_NOT_EQUAL(-1, null_idx); // One key should have a NULL value
// The same key shouldn't be destroyed twice in a row, as new non-NULL values should be destroyed
// after existing non-NULL values (to match spec behaviour)
TEST_ASSERT_NOT_EQUAL(null_idx, state->last_idx);
printf("Re-setting index %d\n", null_idx);
pthread_setspecific(state->keys[null_idx], state);
state->last_idx = null_idx;
}
static void *s_test_repeat_destructor_thread(void *vp_state)
{
destr_test_state_t *state = vp_state;
for (int i = 0; i < NUM_KEYS; i++) {
pthread_setspecific(state->keys[i], state);
}
pthread_exit(NULL);
}

View File

@@ -0,0 +1,295 @@
/*
* SPDX-FileCopyrightText: 2021-2022 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Unlicense OR CC0-1.0
*/
#include "sdkconfig.h"
#include <errno.h>
#include <stdatomic.h>
#include "freertos/FreeRTOS.h"
#include "freertos/task.h"
#include "freertos/semphr.h"
#include "esp_timer.h"
#include "esp_pthread.h"
#include <pthread.h>
#include "unity.h"
TEST_CASE("pthread_rwlock_init invalid arg", "[pthread][rwlock]")
{
TEST_ASSERT_EQUAL_INT(pthread_rwlock_init(NULL, NULL), EINVAL);
}
TEST_CASE("pthread_rwlock_destroy invalid arg", "[pthread][rwlock]")
{
TEST_ASSERT_EQUAL_INT(pthread_rwlock_destroy(NULL), EINVAL);
pthread_rwlock_t rwlock = 0;
TEST_ASSERT_EQUAL_INT(pthread_rwlock_destroy(&rwlock), EINVAL);
}
TEST_CASE("create and destroy rwlock", "[pthread][rwlock]")
{
pthread_rwlock_t rwlock;
TEST_ASSERT_EQUAL_INT(pthread_rwlock_init(&rwlock, NULL), 0);
TEST_ASSERT_EQUAL_INT(pthread_rwlock_destroy(&rwlock), 0);
}
TEST_CASE("pthread_rwlock_destroy encounters static initializer", "[pthread][rwlock]")
{
pthread_rwlock_t rwlock = PTHREAD_RWLOCK_INITIALIZER;
TEST_ASSERT_EQUAL_INT(pthread_rwlock_destroy(&rwlock), 0);
}
TEST_CASE("rdlock invalid param", "[pthread][rwlock]")
{
TEST_ASSERT_EQUAL_INT(pthread_rwlock_rdlock(NULL), EINVAL);
pthread_rwlock_t rwlock = 0;
TEST_ASSERT_EQUAL_INT(pthread_rwlock_rdlock(&rwlock), EINVAL);
}
TEST_CASE("unlock invalid param", "[pthread][rwlock]")
{
TEST_ASSERT_EQUAL_INT(pthread_rwlock_unlock(NULL), EINVAL);
pthread_rwlock_t rwlock = 0;
TEST_ASSERT_EQUAL_INT(pthread_rwlock_unlock(&rwlock), EINVAL);
}
TEST_CASE("wrlock lock invalid param", "[pthread][rwlock]")
{
TEST_ASSERT_EQUAL_INT(pthread_rwlock_wrlock(NULL), EINVAL);
pthread_rwlock_t rwlock = 0;
TEST_ASSERT_EQUAL_INT(pthread_rwlock_wrlock(&rwlock), EINVAL);
}
TEST_CASE("rdlock lock statically initialized lock", "[pthread][rwlock]")
{
pthread_rwlock_t rwlock = PTHREAD_RWLOCK_INITIALIZER;
TEST_ASSERT_EQUAL_INT(pthread_rwlock_rdlock(&rwlock), 0);
TEST_ASSERT_EQUAL_INT(pthread_rwlock_unlock(&rwlock), 0);
TEST_ASSERT_EQUAL_INT(pthread_rwlock_destroy(&rwlock), 0);
}
TEST_CASE("rdlock unlock", "[pthread][rwlock]")
{
pthread_rwlock_t rwlock;
TEST_ASSERT_EQUAL_INT(pthread_rwlock_init(&rwlock, NULL), 0);
TEST_ASSERT_EQUAL_INT(pthread_rwlock_rdlock(&rwlock), 0);
TEST_ASSERT_EQUAL_INT(pthread_rwlock_unlock(&rwlock), 0);
TEST_ASSERT_EQUAL_INT(pthread_rwlock_destroy(&rwlock), 0);
}
TEST_CASE("multiple read locks", "[pthread][rwlock]")
{
pthread_rwlock_t rwlock;
TEST_ASSERT_EQUAL_INT(pthread_rwlock_init(&rwlock, NULL), 0);
TEST_ASSERT_EQUAL_INT(pthread_rwlock_rdlock(&rwlock), 0);
TEST_ASSERT_EQUAL_INT(pthread_rwlock_rdlock(&rwlock), 0);
TEST_ASSERT_EQUAL_INT(pthread_rwlock_unlock(&rwlock), 0);
TEST_ASSERT_EQUAL_INT(pthread_rwlock_unlock(&rwlock), 0);
TEST_ASSERT_EQUAL_INT(pthread_rwlock_destroy(&rwlock), 0);
}
TEST_CASE("wrlock lock-unlock", "[pthread][rwlock]")
{
pthread_rwlock_t rwlock;
TEST_ASSERT_EQUAL_INT(pthread_rwlock_init(&rwlock, NULL), 0);
TEST_ASSERT_EQUAL_INT(pthread_rwlock_wrlock(&rwlock), 0);
TEST_ASSERT_EQUAL_INT(pthread_rwlock_unlock(&rwlock), 0);
TEST_ASSERT_EQUAL_INT(pthread_rwlock_destroy(&rwlock), 0);
}
struct ReaderWriterArgs {
QueueHandle_t *wait_queue;
size_t sem_wait_release_num;
pthread_rwlock_t *rwlock;
volatile bool reading;
volatile bool writing;
};
static void *reader(void *arg)
{
uint8_t dummy_message;
struct ReaderWriterArgs *rw_args = (struct ReaderWriterArgs*) arg;
TEST_ASSERT_EQUAL(xQueueReceive(*(rw_args->wait_queue), &dummy_message, portMAX_DELAY), pdTRUE);
TEST_ASSERT_EQUAL_INT(pthread_rwlock_rdlock(rw_args->rwlock), 0);
rw_args->reading = true;
TEST_ASSERT_FALSE(rw_args->writing);
rw_args->reading = false;
TEST_ASSERT_EQUAL_INT(pthread_rwlock_unlock(rw_args->rwlock), 0);
return NULL;
}
static void *writer(void *arg)
{
uint8_t dummy_msg;
struct ReaderWriterArgs *rw_args = (struct ReaderWriterArgs*) arg;
TEST_ASSERT_EQUAL_INT(pthread_rwlock_wrlock(rw_args->rwlock), 0);
rw_args->writing = true;
for (size_t i = 0; i < rw_args->sem_wait_release_num; i++) {
TEST_ASSERT_EQUAL(xQueueSendToBack(*(rw_args->wait_queue), &dummy_msg, portMAX_DELAY), pdTRUE);
}
TEST_ASSERT_FALSE(rw_args->reading);
vTaskDelay(20 / portTICK_PERIOD_MS);
TEST_ASSERT_FALSE(rw_args->reading);
rw_args->writing = false;
TEST_ASSERT_EQUAL_INT(pthread_rwlock_unlock(rw_args->rwlock), 0);
return NULL;
}
TEST_CASE("wrlock reader waits", "[pthread][rwlock]")
{
QueueHandle_t wait_queue;
pthread_rwlock_t rwlock;
pthread_t reader_thread;
pthread_t writer_thread;
struct ReaderWriterArgs rw_args;
wait_queue = xQueueCreate(1, 1);
TEST_ASSERT(wait_queue);
TEST_ASSERT_EQUAL_INT(pthread_rwlock_init(&rwlock, NULL), 0);
rw_args.wait_queue = &wait_queue;
rw_args.sem_wait_release_num = 1;
rw_args.rwlock = &rwlock;
rw_args.writing = false;
rw_args.reading = false;
TEST_ASSERT_EQUAL(pthread_create(&reader_thread, NULL, reader, &rw_args), 0);
TEST_ASSERT_EQUAL(pthread_create(&writer_thread, NULL, writer, &rw_args), 0);
TEST_ASSERT_EQUAL(pthread_join(writer_thread, NULL), 0);
TEST_ASSERT_EQUAL(pthread_join(reader_thread, NULL), 0);
TEST_ASSERT_EQUAL_INT(pthread_rwlock_destroy(&rwlock), 0);
vQueueDelete(wait_queue);
}
TEST_CASE("wrlock multiple readers wait", "[pthread][rwlock]")
{
static const size_t THREAD_NUM = 4;
QueueHandle_t wait_queue;
pthread_rwlock_t rwlock;
pthread_t reader_thread[THREAD_NUM];
pthread_t writer_thread;
struct ReaderWriterArgs rw_args;
wait_queue = xQueueCreate(THREAD_NUM, 1);
TEST_ASSERT(wait_queue);
TEST_ASSERT_EQUAL_INT(pthread_rwlock_init(&rwlock, NULL), 0);
rw_args.wait_queue = &wait_queue;
rw_args.sem_wait_release_num = THREAD_NUM;
rw_args.rwlock = &rwlock;
rw_args.writing = false;
rw_args.reading = false;
for (size_t i = 0; i < THREAD_NUM; i++) {
TEST_ASSERT_EQUAL(pthread_create(&(reader_thread[i]), NULL, reader, &rw_args), 0);
}
TEST_ASSERT_EQUAL(pthread_create(&writer_thread, NULL, writer, &rw_args), 0);
TEST_ASSERT_EQUAL(pthread_join(writer_thread, NULL), 0);
for (size_t i = 0; i < THREAD_NUM; i++) {
TEST_ASSERT_EQUAL(pthread_join(reader_thread[i], NULL), 0);
}
TEST_ASSERT_EQUAL_INT(pthread_rwlock_destroy(&rwlock), 0);
vQueueDelete(wait_queue);
}
static void *writer2(void *arg)
{
uint8_t dummy_msg;
struct ReaderWriterArgs *rw_args = (struct ReaderWriterArgs*) arg;
TEST_ASSERT_EQUAL(xQueueReceive(*(rw_args->wait_queue), &dummy_msg, portMAX_DELAY), pdTRUE);
TEST_ASSERT_TRUE(rw_args->writing);
TEST_ASSERT_EQUAL_INT(pthread_rwlock_wrlock(rw_args->rwlock), 0);
TEST_ASSERT_FALSE(rw_args->writing);
rw_args->writing = true;
vTaskDelay(10 / portTICK_PERIOD_MS);
rw_args->writing = false;
TEST_ASSERT_EQUAL_INT(pthread_rwlock_unlock(rw_args->rwlock), 0);
return NULL;
}
TEST_CASE("wrlock writer waits", "[pthread][rwlock]")
{
QueueHandle_t wait_queue;
pthread_rwlock_t rwlock;
pthread_t writer_thread;
pthread_t writer_2_thread;
struct ReaderWriterArgs rw_args;
wait_queue = xQueueCreate(1, 1);
TEST_ASSERT(wait_queue);
TEST_ASSERT_EQUAL_INT(pthread_rwlock_init(&rwlock, NULL), 0);
rw_args.wait_queue = &wait_queue;
rw_args.sem_wait_release_num = 1;
rw_args.rwlock = &rwlock;
rw_args.writing = false;
rw_args.reading = false;
TEST_ASSERT_EQUAL(pthread_create(&writer_2_thread, NULL, writer2, &rw_args), 0);
TEST_ASSERT_EQUAL(pthread_create(&writer_thread, NULL, writer, &rw_args), 0);
TEST_ASSERT_EQUAL(pthread_join(writer_thread, NULL), 0);
TEST_ASSERT_EQUAL(pthread_join(writer_2_thread, NULL), 0);
TEST_ASSERT_EQUAL_INT(pthread_rwlock_destroy(&rwlock), 0);
vQueueDelete(wait_queue);
}
TEST_CASE("wrlock multiple writers wait", "[pthread][rwlock]")
{
static const size_t THREAD_NUM = 4;
QueueHandle_t wait_queue;
pthread_rwlock_t rwlock;
pthread_t writer_thread;
pthread_t writer_2_thread[THREAD_NUM];
struct ReaderWriterArgs rw_args;
wait_queue = xQueueCreate(THREAD_NUM, 1);
TEST_ASSERT(wait_queue);
TEST_ASSERT_EQUAL_INT(pthread_rwlock_init(&rwlock, NULL), 0);
rw_args.wait_queue = &wait_queue;
rw_args.sem_wait_release_num = THREAD_NUM;
rw_args.rwlock = &rwlock;
rw_args.writing = false;
rw_args.reading = false;
for (size_t i = 0; i < THREAD_NUM; i++) {
TEST_ASSERT_EQUAL(pthread_create(&writer_2_thread[i], NULL, writer2, &rw_args), 0);
}
TEST_ASSERT_EQUAL(pthread_create(&writer_thread, NULL, writer, &rw_args), 0);
TEST_ASSERT_EQUAL(pthread_join(writer_thread, NULL), 0);
for (size_t i = 0; i < THREAD_NUM; i++) {
TEST_ASSERT_EQUAL(pthread_join(writer_2_thread[i], NULL), 0);
}
TEST_ASSERT_EQUAL_INT(pthread_rwlock_destroy(&rwlock), 0);
vQueueDelete(wait_queue);
}

View File

@@ -0,0 +1,35 @@
# SPDX-FileCopyrightText: 2022 Espressif Systems (Shanghai) CO LTD
# SPDX-License-Identifier: CC0-1.0
import pytest
from pytest_embedded import Dut
@pytest.mark.generic
@pytest.mark.supported_targets
@pytest.mark.parametrize(
'config',
[
'default',
],
indirect=True,
)
def test_pthread(dut: Dut) -> None:
dut.expect_exact('Press ENTER to see the list of tests')
dut.write('*')
dut.expect_unity_test_output(timeout=300)
@pytest.mark.generic
@pytest.mark.parametrize(
'config',
[
pytest.param('single_core_esp32', marks=[pytest.mark.esp32]),
pytest.param('single_core_esp32s3', marks=[pytest.mark.esp32s3]),
],
indirect=True,
)
def test_pthread_single_core(dut: Dut) -> None:
dut.expect_exact('Press ENTER to see the list of tests')
dut.write('*')
dut.expect_unity_test_output(timeout=300)

View File

@@ -0,0 +1,2 @@
CONFIG_IDF_TARGET="esp32"
CONFIG_FREERTOS_UNICORE=y

View File

@@ -0,0 +1,2 @@
CONFIG_IDF_TARGET="esp32s3"
CONFIG_FREERTOS_UNICORE=y

View File

@@ -0,0 +1,2 @@
# Some of the tests will starve the watchdog
CONFIG_ESP_TASK_WDT=n