mirror of
https://github.com/espressif/esp-idf.git
synced 2025-08-10 20:54:24 +00:00
tiny-test-fw: move to tools/esp_python_packages:
make `tiny_test_fw` as a package and move to root path of idf python packages
This commit is contained in:
327
tools/ci/python_packages/tiny_test_fw/Utility/CIAssignTest.py
Normal file
327
tools/ci/python_packages/tiny_test_fw/Utility/CIAssignTest.py
Normal file
@@ -0,0 +1,327 @@
|
||||
# Copyright 2015-2017 Espressif Systems (Shanghai) PTE LTD
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http:#www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""
|
||||
Common logic to assign test cases to CI jobs.
|
||||
|
||||
Some background knowledge about Gitlab CI and use flow in esp-idf:
|
||||
|
||||
* Gitlab CI jobs are static in ``.gitlab-ci.yml``. We can't dynamically create test jobs
|
||||
* For test job running on DUT, we use ``tags`` to select runners with different test environment
|
||||
* We have ``assign_test`` stage, will collect cases, and then assign them to correct test jobs
|
||||
* ``assign_test`` will fail if failed to assign any cases
|
||||
* with ``assign_test``, we can:
|
||||
* dynamically filter test case we want to test
|
||||
* alert user if they forget to add CI jobs and guide how to add test jobs
|
||||
* the last step of ``assign_test`` is to output config files, then test jobs will run these cases
|
||||
|
||||
The Basic logic to assign test cases is as follow:
|
||||
|
||||
1. do search all the cases
|
||||
2. do filter case (if filter is specified by @bot)
|
||||
3. put cases to different groups according to rule of ``Group``
|
||||
* try to put them in existed groups
|
||||
* if failed then create a new group and add this case
|
||||
4. parse and filter the test jobs from CI config file
|
||||
5. try to assign all groups to jobs according to tags
|
||||
6. output config files for jobs
|
||||
|
||||
"""
|
||||
|
||||
import os
|
||||
import re
|
||||
import json
|
||||
|
||||
import yaml
|
||||
try:
|
||||
from yaml import CLoader as Loader
|
||||
except ImportError:
|
||||
from yaml import Loader as Loader
|
||||
|
||||
from . import (CaseConfig, SearchCases, GitlabCIJob, console_log)
|
||||
|
||||
|
||||
class Group(object):
|
||||
|
||||
MAX_EXECUTION_TIME = 30
|
||||
MAX_CASE = 15
|
||||
SORT_KEYS = ["env_tag"]
|
||||
# Matching CI job rules could be different from the way we want to group test cases.
|
||||
# For example, when assign unit test cases, different test cases need to use different test functions.
|
||||
# We need to put them into different groups.
|
||||
# But these groups can be assigned to jobs with same tags, as they use the same test environment.
|
||||
CI_JOB_MATCH_KEYS = SORT_KEYS
|
||||
|
||||
def __init__(self, case):
|
||||
self.execution_time = 0
|
||||
self.case_list = [case]
|
||||
self.filters = dict(zip(self.SORT_KEYS, [self._get_case_attr(case, x) for x in self.SORT_KEYS]))
|
||||
# we use ci_job_match_keys to match CI job tags. It's a set of required tags.
|
||||
self.ci_job_match_keys = set([self._get_case_attr(case, x) for x in self.CI_JOB_MATCH_KEYS])
|
||||
|
||||
@staticmethod
|
||||
def _get_case_attr(case, attr):
|
||||
# we might use different type for case (dict or test_func)
|
||||
# this method will do get attribute form cases
|
||||
return case.case_info[attr]
|
||||
|
||||
def accept_new_case(self):
|
||||
"""
|
||||
check if allowed to add any case to this group
|
||||
|
||||
:return: True or False
|
||||
"""
|
||||
max_time = (sum([self._get_case_attr(x, "execution_time") for x in self.case_list])
|
||||
< self.MAX_EXECUTION_TIME)
|
||||
max_case = (len(self.case_list) < self.MAX_CASE)
|
||||
return max_time and max_case
|
||||
|
||||
def add_case(self, case):
|
||||
"""
|
||||
add case to current group
|
||||
|
||||
:param case: test case
|
||||
:return: True if add succeed, else False
|
||||
"""
|
||||
added = False
|
||||
if self.accept_new_case():
|
||||
for key in self.filters:
|
||||
if self._get_case_attr(case, key) != self.filters[key]:
|
||||
break
|
||||
else:
|
||||
self.case_list.append(case)
|
||||
added = True
|
||||
return added
|
||||
|
||||
def add_extra_case(self, case):
|
||||
"""
|
||||
By default (``add_case`` method), cases will only be added when have equal values of all filters with group.
|
||||
But in some cases, we also want to add cases which are not best fit.
|
||||
For example, one group has can run cases require (A, B). It can also accept cases require (A, ) and (B, ).
|
||||
When assign failed by best fit, we will use this method to try if we can assign all failed cases.
|
||||
|
||||
If subclass want to retry, they need to overwrite this method.
|
||||
Logic can be applied to handle such scenario could be different for different cases.
|
||||
|
||||
:return: True if accepted else False
|
||||
"""
|
||||
pass
|
||||
|
||||
def output(self):
|
||||
"""
|
||||
output data for job configs
|
||||
|
||||
:return: {"Filter": case filter, "CaseConfig": list of case configs for cases in this group}
|
||||
"""
|
||||
output_data = {
|
||||
"Filter": self.filters,
|
||||
"CaseConfig": [{"name": self._get_case_attr(x, "name")} for x in self.case_list],
|
||||
}
|
||||
return output_data
|
||||
|
||||
|
||||
class AssignTest(object):
|
||||
"""
|
||||
Auto assign tests to CI jobs.
|
||||
|
||||
:param test_case_path: path of test case file(s)
|
||||
:param ci_config_file: path of ``.gitlab-ci.yml``
|
||||
"""
|
||||
# subclass need to rewrite CI test job pattern, to filter all test jobs
|
||||
CI_TEST_JOB_PATTERN = re.compile(r"^test_.+")
|
||||
# by default we only run function in CI, as other tests could take long time
|
||||
DEFAULT_FILTER = {
|
||||
"category": "function",
|
||||
"ignore": False,
|
||||
}
|
||||
|
||||
def __init__(self, test_case_path, ci_config_file, case_group=Group):
|
||||
self.test_case_path = test_case_path
|
||||
self.test_cases = []
|
||||
self.jobs = self._parse_gitlab_ci_config(ci_config_file)
|
||||
self.case_group = case_group
|
||||
|
||||
@staticmethod
|
||||
def _handle_parallel_attribute(job_name, job):
|
||||
jobs_out = []
|
||||
try:
|
||||
for i in range(job["parallel"]):
|
||||
jobs_out.append(GitlabCIJob.Job(job, job_name + "_{}".format(i + 1)))
|
||||
except KeyError:
|
||||
# Gitlab don't allow to set parallel to 1.
|
||||
# to make test job name same ($CI_JOB_NAME_$CI_NODE_INDEX),
|
||||
# we append "_" to jobs don't have parallel attribute
|
||||
jobs_out.append(GitlabCIJob.Job(job, job_name + "_"))
|
||||
return jobs_out
|
||||
|
||||
def _parse_gitlab_ci_config(self, ci_config_file):
|
||||
|
||||
with open(ci_config_file, "r") as f:
|
||||
ci_config = yaml.load(f, Loader=Loader)
|
||||
|
||||
job_list = list()
|
||||
for job_name in ci_config:
|
||||
if self.CI_TEST_JOB_PATTERN.search(job_name) is not None:
|
||||
job_list.extend(self._handle_parallel_attribute(job_name, ci_config[job_name]))
|
||||
job_list.sort(key=lambda x: x["name"])
|
||||
return job_list
|
||||
|
||||
def _search_cases(self, test_case_path, case_filter=None):
|
||||
"""
|
||||
:param test_case_path: path contains test case folder
|
||||
:param case_filter: filter for test cases. the filter to use is default filter updated with case_filter param.
|
||||
:return: filtered test case list
|
||||
"""
|
||||
_case_filter = self.DEFAULT_FILTER.copy()
|
||||
if case_filter:
|
||||
_case_filter.update(case_filter)
|
||||
test_methods = SearchCases.Search.search_test_cases(test_case_path)
|
||||
return CaseConfig.filter_test_cases(test_methods, _case_filter)
|
||||
|
||||
def _group_cases(self):
|
||||
"""
|
||||
separate all cases into groups according group rules. each group will be executed by one CI job.
|
||||
|
||||
:return: test case groups.
|
||||
"""
|
||||
groups = []
|
||||
for case in self.test_cases:
|
||||
for group in groups:
|
||||
# add to current group
|
||||
if group.add_case(case):
|
||||
break
|
||||
else:
|
||||
# create new group
|
||||
groups.append(self.case_group(case))
|
||||
return groups
|
||||
|
||||
def _assign_failed_cases(self, assigned_groups, failed_groups):
|
||||
""" try to assign failed cases to already assigned test groups """
|
||||
still_failed_groups = []
|
||||
failed_cases = []
|
||||
for group in failed_groups:
|
||||
failed_cases.extend(group.case_list)
|
||||
for case in failed_cases:
|
||||
# first try to assign to already assigned groups
|
||||
for group in assigned_groups:
|
||||
if group.add_extra_case(case):
|
||||
break
|
||||
else:
|
||||
# if failed, group the failed cases
|
||||
for group in still_failed_groups:
|
||||
if group.add_case(case):
|
||||
break
|
||||
else:
|
||||
still_failed_groups.append(self.case_group(case))
|
||||
return still_failed_groups
|
||||
|
||||
@staticmethod
|
||||
def _apply_bot_filter():
|
||||
"""
|
||||
we support customize CI test with bot.
|
||||
here we process from and return the filter which ``_search_cases`` accepts.
|
||||
|
||||
:return: filter for search test cases
|
||||
"""
|
||||
bot_filter = os.getenv("BOT_CASE_FILTER")
|
||||
if bot_filter:
|
||||
bot_filter = json.loads(bot_filter)
|
||||
else:
|
||||
bot_filter = dict()
|
||||
return bot_filter
|
||||
|
||||
def _apply_bot_test_count(self):
|
||||
"""
|
||||
Bot could also pass test count.
|
||||
If filtered cases need to be tested for several times, then we do duplicate them here.
|
||||
"""
|
||||
test_count = os.getenv("BOT_TEST_COUNT")
|
||||
if test_count:
|
||||
test_count = int(test_count)
|
||||
self.test_cases *= test_count
|
||||
|
||||
@staticmethod
|
||||
def _count_groups_by_keys(test_groups):
|
||||
"""
|
||||
Count the number of test groups by job match keys.
|
||||
It's an important information to update CI config file.
|
||||
"""
|
||||
group_count = dict()
|
||||
for group in test_groups:
|
||||
key = ",".join(group.ci_job_match_keys)
|
||||
try:
|
||||
group_count[key] += 1
|
||||
except KeyError:
|
||||
group_count[key] = 1
|
||||
return group_count
|
||||
|
||||
def assign_cases(self):
|
||||
"""
|
||||
separate test cases to groups and assign test cases to CI jobs.
|
||||
|
||||
:raise AssertError: if failed to assign any case to CI job.
|
||||
:return: None
|
||||
"""
|
||||
failed_to_assign = []
|
||||
assigned_groups = []
|
||||
case_filter = self._apply_bot_filter()
|
||||
self.test_cases = self._search_cases(self.test_case_path, case_filter)
|
||||
self._apply_bot_test_count()
|
||||
test_groups = self._group_cases()
|
||||
|
||||
for group in test_groups:
|
||||
for job in self.jobs:
|
||||
if job.match_group(group):
|
||||
job.assign_group(group)
|
||||
assigned_groups.append(group)
|
||||
break
|
||||
else:
|
||||
failed_to_assign.append(group)
|
||||
|
||||
if failed_to_assign:
|
||||
failed_to_assign = self._assign_failed_cases(assigned_groups, failed_to_assign)
|
||||
|
||||
# print debug info
|
||||
# total requirement of current pipeline
|
||||
required_group_count = self._count_groups_by_keys(test_groups)
|
||||
console_log("Required job count by tags:")
|
||||
for tags in required_group_count:
|
||||
console_log("\t{}: {}".format(tags, required_group_count[tags]))
|
||||
|
||||
# number of unused jobs
|
||||
not_used_jobs = [job for job in self.jobs if "case group" not in job]
|
||||
if not_used_jobs:
|
||||
console_log("{} jobs not used. Please check if you define too much jobs".format(len(not_used_jobs)), "O")
|
||||
for job in not_used_jobs:
|
||||
console_log("\t{}".format(job["name"]), "O")
|
||||
|
||||
# failures
|
||||
if failed_to_assign:
|
||||
console_log("Too many test cases vs jobs to run. "
|
||||
"Please increase parallel count in tools/ci/config/target-test.yml "
|
||||
"for jobs with specific tags:", "R")
|
||||
failed_group_count = self._count_groups_by_keys(failed_to_assign)
|
||||
for tags in failed_group_count:
|
||||
console_log("\t{}: {}".format(tags, failed_group_count[tags]), "R")
|
||||
raise RuntimeError("Failed to assign test case to CI jobs")
|
||||
|
||||
def output_configs(self, output_path):
|
||||
"""
|
||||
:param output_path: path to output config files for each CI job
|
||||
:return: None
|
||||
"""
|
||||
if not os.path.exists(output_path):
|
||||
os.makedirs(output_path)
|
||||
for job in self.jobs:
|
||||
job.output_config(output_path)
|
225
tools/ci/python_packages/tiny_test_fw/Utility/CaseConfig.py
Normal file
225
tools/ci/python_packages/tiny_test_fw/Utility/CaseConfig.py
Normal file
@@ -0,0 +1,225 @@
|
||||
# Copyright 2015-2017 Espressif Systems (Shanghai) PTE LTD
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http:#www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""
|
||||
Processing case config files.
|
||||
This is mainly designed for CI, we need to auto create and assign test jobs.
|
||||
|
||||
Template Config File::
|
||||
|
||||
TestConfig:
|
||||
app:
|
||||
package: ttfw_idf
|
||||
class: Example
|
||||
dut:
|
||||
path:
|
||||
class:
|
||||
config_file: /somewhere/config_file_for_runner
|
||||
test_name: CI_test_job_1
|
||||
|
||||
Filter:
|
||||
chip: ESP32
|
||||
env_tag: default
|
||||
|
||||
CaseConfig:
|
||||
- name: test_examples_protocol_https_request
|
||||
# optional
|
||||
extra_data: some extra data passed to case with kwarg extra_data
|
||||
overwrite: # overwrite test configs
|
||||
app:
|
||||
package: ttfw_idf
|
||||
class: Example
|
||||
- name: xxx
|
||||
"""
|
||||
import importlib
|
||||
|
||||
import yaml
|
||||
try:
|
||||
from yaml import CLoader as Loader
|
||||
except ImportError:
|
||||
from yaml import Loader as Loader
|
||||
|
||||
from . import TestCase
|
||||
|
||||
|
||||
def _convert_to_lower_case_bytes(item):
|
||||
"""
|
||||
bot filter is always lower case string.
|
||||
this function will convert to all string to lower case.
|
||||
Note: Unicode strings are converted to bytes.
|
||||
"""
|
||||
if isinstance(item, (tuple, list)):
|
||||
output = [_convert_to_lower_case_bytes(v) for v in item]
|
||||
elif isinstance(item, type(b'')):
|
||||
output = item.lower()
|
||||
elif isinstance(item, type(u'')):
|
||||
output = item.encode().lower()
|
||||
else:
|
||||
output = item
|
||||
return output
|
||||
|
||||
|
||||
def _filter_one_case(test_method, case_filter):
|
||||
""" Apply filter for one case (the filter logic is the same as described in ``filter_test_cases``) """
|
||||
filter_result = True
|
||||
# filter keys are lower case. Do map lower case keys with original keys.
|
||||
key_mapping = {x.lower(): x for x in test_method.case_info.keys()}
|
||||
|
||||
for orig_key in case_filter:
|
||||
key = key_mapping[orig_key]
|
||||
if key in test_method.case_info:
|
||||
# the filter key is both in case and filter
|
||||
# we need to check if they match
|
||||
filter_item = _convert_to_lower_case_bytes(case_filter[orig_key])
|
||||
accepted_item = _convert_to_lower_case_bytes(test_method.case_info[key])
|
||||
|
||||
if isinstance(filter_item, (tuple, list)) \
|
||||
and isinstance(accepted_item, (tuple, list)):
|
||||
# both list/tuple, check if they have common item
|
||||
filter_result = True if set(filter_item) & set(accepted_item) else False
|
||||
elif isinstance(filter_item, (tuple, list)):
|
||||
# filter item list/tuple, check if case accepted value in filter item list/tuple
|
||||
filter_result = True if accepted_item in filter_item else False
|
||||
elif isinstance(accepted_item, (tuple, list)):
|
||||
# accepted item list/tuple, check if case filter value is in accept item list/tuple
|
||||
filter_result = True if filter_item in accepted_item else False
|
||||
else:
|
||||
if type(filter_item) != type(accepted_item):
|
||||
# This will catch silent ignores of test cases when Unicode and bytes are compared
|
||||
raise AssertionError(filter_item, '!=', accepted_item)
|
||||
# both string/int, just do string compare
|
||||
filter_result = (filter_item == accepted_item)
|
||||
else:
|
||||
# key in filter only, which means the case supports all values for this filter key, match succeed
|
||||
pass
|
||||
if not filter_result:
|
||||
# match failed
|
||||
break
|
||||
return filter_result
|
||||
|
||||
|
||||
def filter_test_cases(test_methods, case_filter):
|
||||
"""
|
||||
filter test case. filter logic:
|
||||
|
||||
1. if filter key both in case attribute and filter:
|
||||
* if both value is string/int, then directly compare
|
||||
* if one is list/tuple, the other one is string/int, then check if string/int is in list/tuple
|
||||
* if both are list/tuple, then check if they have common item
|
||||
2. if only case attribute or filter have the key, filter succeed
|
||||
3. will do case insensitive compare for string
|
||||
|
||||
for example, the following are match succeed scenarios
|
||||
(the rule is symmetric, result is same if exchange values for user filter and case attribute):
|
||||
|
||||
* user case filter is ``chip: ["esp32", "esp32c"]``, case doesn't have ``chip`` attribute
|
||||
* user case filter is ``chip: ["esp32", "esp32c"]``, case attribute is ``chip: "esp32"``
|
||||
* user case filter is ``chip: "esp32"``, case attribute is ``chip: "esp32"``
|
||||
|
||||
|
||||
:param test_methods: a list of test methods functions
|
||||
:param case_filter: case filter
|
||||
:return: filtered test methods
|
||||
"""
|
||||
filtered_test_methods = []
|
||||
for test_method in test_methods:
|
||||
if _filter_one_case(test_method, case_filter):
|
||||
filtered_test_methods.append(test_method)
|
||||
return filtered_test_methods
|
||||
|
||||
|
||||
class Parser(object):
|
||||
DEFAULT_CONFIG = {
|
||||
"TestConfig": dict(),
|
||||
"Filter": dict(),
|
||||
"CaseConfig": [{"extra_data": None}],
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def parse_config_file(cls, config_file):
|
||||
"""
|
||||
parse from config file and then update to default config.
|
||||
|
||||
:param config_file: config file path
|
||||
:return: configs
|
||||
"""
|
||||
configs = cls.DEFAULT_CONFIG.copy()
|
||||
if config_file:
|
||||
with open(config_file, "r") as f:
|
||||
configs.update(yaml.load(f, Loader=Loader))
|
||||
return configs
|
||||
|
||||
@classmethod
|
||||
def handle_overwrite_args(cls, overwrite):
|
||||
"""
|
||||
handle overwrite configs. import module from path and then get the required class.
|
||||
|
||||
:param overwrite: overwrite args
|
||||
:return: dict of (original key: class)
|
||||
"""
|
||||
output = dict()
|
||||
for key in overwrite:
|
||||
module = importlib.import_module(overwrite[key]["package"])
|
||||
output[key] = module.__getattribute__(overwrite[key]["class"])
|
||||
return output
|
||||
|
||||
@classmethod
|
||||
def apply_config(cls, test_methods, config_file):
|
||||
"""
|
||||
apply config for test methods
|
||||
|
||||
:param test_methods: a list of test methods functions
|
||||
:param config_file: case filter file
|
||||
:return: filtered cases
|
||||
"""
|
||||
configs = cls.parse_config_file(config_file)
|
||||
test_case_list = []
|
||||
for _config in configs["CaseConfig"]:
|
||||
_filter = configs["Filter"].copy()
|
||||
_overwrite = cls.handle_overwrite_args(_config.pop("overwrite", dict()))
|
||||
_extra_data = _config.pop("extra_data", None)
|
||||
_filter.update(_config)
|
||||
for test_method in test_methods:
|
||||
if _filter_one_case(test_method, _filter):
|
||||
test_case_list.append(TestCase.TestCase(test_method, _extra_data, **_overwrite))
|
||||
return test_case_list
|
||||
|
||||
|
||||
class Generator(object):
|
||||
""" Case config file generator """
|
||||
|
||||
def __init__(self):
|
||||
self.default_config = {
|
||||
"TestConfig": dict(),
|
||||
"Filter": dict(),
|
||||
}
|
||||
|
||||
def set_default_configs(self, test_config, case_filter):
|
||||
"""
|
||||
:param test_config: "TestConfig" value
|
||||
:param case_filter: "Filter" value
|
||||
:return: None
|
||||
"""
|
||||
self.default_config = {"TestConfig": test_config, "Filter": case_filter}
|
||||
|
||||
def generate_config(self, case_configs, output_file):
|
||||
"""
|
||||
:param case_configs: "CaseConfig" value
|
||||
:param output_file: output file path
|
||||
:return: None
|
||||
"""
|
||||
config = self.default_config.copy()
|
||||
config.update({"CaseConfig": case_configs})
|
||||
with open(output_file, "w") as f:
|
||||
yaml.dump(config, f)
|
65
tools/ci/python_packages/tiny_test_fw/Utility/GitlabCIJob.py
Normal file
65
tools/ci/python_packages/tiny_test_fw/Utility/GitlabCIJob.py
Normal file
@@ -0,0 +1,65 @@
|
||||
# Copyright 2015-2017 Espressif Systems (Shanghai) PTE LTD
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http:#www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import os
|
||||
|
||||
import yaml
|
||||
|
||||
|
||||
class Job(dict):
|
||||
"""
|
||||
Gitlab CI job
|
||||
|
||||
:param job: job data loaded from .gitlab-ci.yml
|
||||
:param job_name: job name
|
||||
"""
|
||||
def __init__(self, job, job_name):
|
||||
super(Job, self).__init__(job)
|
||||
self["name"] = job_name
|
||||
self.tags = set(self["tags"])
|
||||
|
||||
def match_group(self, group):
|
||||
"""
|
||||
Match group by tags of job.
|
||||
All filters values of group should be included in tags.
|
||||
|
||||
:param group: case group to match
|
||||
:return: True or False
|
||||
"""
|
||||
match_result = False
|
||||
if "case group" not in self and group.ci_job_match_keys == self.tags:
|
||||
# group not assigned and all tags match
|
||||
match_result = True
|
||||
return match_result
|
||||
|
||||
def assign_group(self, group):
|
||||
"""
|
||||
assign a case group to a test job.
|
||||
|
||||
:param group: the case group to assign
|
||||
"""
|
||||
self["case group"] = group
|
||||
|
||||
def output_config(self, file_path):
|
||||
"""
|
||||
output test config to the given path.
|
||||
file name will be job_name.yml
|
||||
|
||||
:param file_path: output file path
|
||||
:return: None
|
||||
"""
|
||||
file_name = os.path.join(file_path, self["name"] + ".yml")
|
||||
if "case group" in self:
|
||||
with open(file_name, "w") as f:
|
||||
yaml.dump(self["case group"].output(), f, default_flow_style=False)
|
111
tools/ci/python_packages/tiny_test_fw/Utility/SearchCases.py
Normal file
111
tools/ci/python_packages/tiny_test_fw/Utility/SearchCases.py
Normal file
@@ -0,0 +1,111 @@
|
||||
# Copyright 2015-2017 Espressif Systems (Shanghai) PTE LTD
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http:#www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
""" search test cases from a given file or path """
|
||||
import os
|
||||
import fnmatch
|
||||
import types
|
||||
import copy
|
||||
|
||||
from . import load_source
|
||||
|
||||
|
||||
class Search(object):
|
||||
TEST_CASE_FILE_PATTERN = "*_test.py"
|
||||
|
||||
@classmethod
|
||||
def _search_cases_from_file(cls, file_name):
|
||||
""" get test cases from test case .py file """
|
||||
|
||||
print("Try to get cases from: " + file_name)
|
||||
test_functions = []
|
||||
try:
|
||||
mod = load_source(file_name)
|
||||
for func in [mod.__getattribute__(x) for x in dir(mod)
|
||||
if isinstance(mod.__getattribute__(x), types.FunctionType)]:
|
||||
try:
|
||||
# test method decorator will add test_method attribute to test function
|
||||
if func.test_method:
|
||||
test_functions.append(func)
|
||||
except AttributeError:
|
||||
continue
|
||||
except ImportError as e:
|
||||
print("ImportError: \r\n\tFile:" + file_name + "\r\n\tError:" + str(e))
|
||||
for i, test_function in enumerate(test_functions):
|
||||
print("\t{}. ".format(i + 1) + test_function.case_info["name"])
|
||||
return test_functions
|
||||
|
||||
@classmethod
|
||||
def _search_test_case_files(cls, test_case, file_pattern):
|
||||
""" search all test case files recursively of a path """
|
||||
|
||||
if not os.path.exists(test_case):
|
||||
raise OSError("test case path not exist")
|
||||
if os.path.isdir(test_case):
|
||||
test_case_files = []
|
||||
for root, _, file_names in os.walk(test_case):
|
||||
for filename in fnmatch.filter(file_names, file_pattern):
|
||||
test_case_files.append(os.path.join(root, filename))
|
||||
else:
|
||||
test_case_files = [test_case]
|
||||
return test_case_files
|
||||
|
||||
@classmethod
|
||||
def replicate_case(cls, case):
|
||||
"""
|
||||
Replicate cases according to its filter values.
|
||||
If one case has specified filter chip=(ESP32, ESP32C),
|
||||
it will create 2 cases, one for ESP32 and on for ESP32C.
|
||||
Once the cases are replicated, it's easy to filter those we want to execute.
|
||||
|
||||
:param case: the original case
|
||||
:return: a list of replicated cases
|
||||
"""
|
||||
replicate_config = []
|
||||
for key in case.case_info:
|
||||
if isinstance(case.case_info[key], (list, tuple)):
|
||||
replicate_config.append(key)
|
||||
|
||||
def _replicate_for_key(case_list, replicate_key, replicate_list):
|
||||
case_out = []
|
||||
for _case in case_list:
|
||||
for value in replicate_list:
|
||||
new_case = copy.deepcopy(_case)
|
||||
new_case.case_info[replicate_key] = value
|
||||
case_out.append(new_case)
|
||||
return case_out
|
||||
|
||||
replicated_cases = [case]
|
||||
for key in replicate_config:
|
||||
replicated_cases = _replicate_for_key(replicated_cases, key, case.case_info[key])
|
||||
|
||||
return replicated_cases
|
||||
|
||||
@classmethod
|
||||
def search_test_cases(cls, test_case):
|
||||
"""
|
||||
search all test cases from a folder or file, and then do case replicate.
|
||||
|
||||
:param test_case: test case file(s) path
|
||||
:return: a list of replicated test methods
|
||||
"""
|
||||
test_case_files = cls._search_test_case_files(test_case, cls.TEST_CASE_FILE_PATTERN)
|
||||
test_cases = []
|
||||
for test_case_file in test_case_files:
|
||||
test_cases += cls._search_cases_from_file(test_case_file)
|
||||
# handle replicate cases
|
||||
test_case_out = []
|
||||
for case in test_cases:
|
||||
test_case_out += cls.replicate_case(case)
|
||||
return test_case_out
|
58
tools/ci/python_packages/tiny_test_fw/Utility/TestCase.py
Normal file
58
tools/ci/python_packages/tiny_test_fw/Utility/TestCase.py
Normal file
@@ -0,0 +1,58 @@
|
||||
# Copyright 2015-2017 Espressif Systems (Shanghai) PTE LTD
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http:#www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import yaml
|
||||
|
||||
try:
|
||||
from yaml import CLoader as Loader
|
||||
except ImportError:
|
||||
from yaml import Loader as Loader
|
||||
|
||||
|
||||
class TestCase(object):
|
||||
"""
|
||||
Test Case Object, mainly used with runner.
|
||||
runner can parse all test cases from a given path, set data and config for test case in prepare stage.
|
||||
TestCase instance will record these data, provide run method to let runner execute test case.
|
||||
|
||||
:param test_method: test function
|
||||
:param extra_data: data passed to test function
|
||||
:param overwrite_args: kwargs that overwrite original test case configs
|
||||
"""
|
||||
DEFAULT_CASE_DOC = dict()
|
||||
|
||||
def __init__(self, test_method, extra_data, **overwrite_args):
|
||||
self.test_method = test_method
|
||||
self.extra_data = extra_data
|
||||
self.overwrite_args = overwrite_args
|
||||
|
||||
def run(self):
|
||||
""" execute the test case """
|
||||
return self.test_method(self.extra_data, **self.overwrite_args)
|
||||
|
||||
def document(self):
|
||||
"""
|
||||
generate test case document.
|
||||
parse the case doc with yaml parser and update to original case attributes.
|
||||
|
||||
:return: case document, dict of case attributes and values
|
||||
"""
|
||||
doc_string = self.test_method.__doc__
|
||||
try:
|
||||
doc = yaml.load(doc_string, Loader=Loader)
|
||||
except (AttributeError, OSError, UnicodeDecodeError):
|
||||
doc = self.DEFAULT_CASE_DOC
|
||||
doc.update(self.test_method.env_args)
|
||||
doc.update(self.test_method.accepted_filter)
|
||||
return doc
|
71
tools/ci/python_packages/tiny_test_fw/Utility/__init__.py
Normal file
71
tools/ci/python_packages/tiny_test_fw/Utility/__init__.py
Normal file
@@ -0,0 +1,71 @@
|
||||
from __future__ import print_function
|
||||
import os.path
|
||||
import sys
|
||||
|
||||
|
||||
_COLOR_CODES = {
|
||||
"white": u'\033[0m',
|
||||
"red": u'\033[31m',
|
||||
"green": u'\033[32m',
|
||||
"orange": u'\033[33m',
|
||||
"blue": u'\033[34m',
|
||||
"purple": u'\033[35m',
|
||||
"W": u'\033[0m',
|
||||
"R": u'\033[31m',
|
||||
"G": u'\033[32m',
|
||||
"O": u'\033[33m',
|
||||
"B": u'\033[34m',
|
||||
"P": u'\033[35m'
|
||||
}
|
||||
|
||||
|
||||
def console_log(data, color="white", end="\n"):
|
||||
"""
|
||||
log data to console.
|
||||
(if not flush console log, Gitlab-CI won't update logs during job execution)
|
||||
|
||||
:param data: data content
|
||||
:param color: color
|
||||
"""
|
||||
if color not in _COLOR_CODES:
|
||||
color = "white"
|
||||
color_codes = _COLOR_CODES[color]
|
||||
if isinstance(data, type(b'')):
|
||||
data = data.decode('utf-8', 'replace')
|
||||
print(color_codes + data, end=end)
|
||||
if color not in ["white", "W"]:
|
||||
# reset color to white for later logs
|
||||
print(_COLOR_CODES["white"] + u"\r")
|
||||
sys.stdout.flush()
|
||||
|
||||
|
||||
__LOADED_MODULES = dict()
|
||||
# we should only load one module once.
|
||||
# if we load one module twice,
|
||||
# python will regard the same object loaded in the first time and second time as different objects.
|
||||
# it will lead to strange errors like `isinstance(object, type_of_this_object)` return False
|
||||
|
||||
|
||||
def load_source(path):
|
||||
"""
|
||||
Dynamic loading python file. Note that this function SHOULD NOT be used to replace ``import``.
|
||||
It should only be used when the package path is only available in runtime.
|
||||
|
||||
:param path: The path of python file
|
||||
:return: Loaded object
|
||||
"""
|
||||
path = os.path.realpath(path)
|
||||
# load name need to be unique, otherwise it will update the already loaded module
|
||||
load_name = str(len(__LOADED_MODULES))
|
||||
try:
|
||||
return __LOADED_MODULES[path]
|
||||
except KeyError:
|
||||
try:
|
||||
from importlib.machinery import SourceFileLoader
|
||||
ret = SourceFileLoader(load_name, path).load_module()
|
||||
except ImportError:
|
||||
# importlib.machinery doesn't exists in Python 2 so we will use imp (deprecated in Python 3)
|
||||
import imp
|
||||
ret = imp.load_source(load_name, path)
|
||||
__LOADED_MODULES[path] = ret
|
||||
return ret
|
Reference in New Issue
Block a user