style: format python files with isort and double-quote-string-fixer

This commit is contained in:
Fu Hanxi
2021-01-26 10:49:01 +08:00
parent dc8402ea61
commit 0146f258d7
276 changed files with 8241 additions and 8162 deletions

View File

@@ -80,9 +80,9 @@ class BaseApp(object):
if not test_suite_name:
test_suite_name = os.path.splitext(os.path.basename(sys.modules['__main__'].__file__))[0]
sdk_path = cls.get_sdk_path()
log_folder = os.path.join(sdk_path, "TEST_LOGS",
log_folder = os.path.join(sdk_path, 'TEST_LOGS',
test_suite_name +
time.strftime("_%m%d_%H_%M_%S", time.localtime(LOG_FOLDER_TIMESTAMP)))
time.strftime('_%m%d_%H_%M_%S', time.localtime(LOG_FOLDER_TIMESTAMP)))
if not os.path.exists(log_folder):
os.makedirs(log_folder)
return log_folder

View File

@@ -38,12 +38,13 @@ If they using different port then need to implement their DUTPort class as well.
"""
from __future__ import print_function
import time
import copy
import functools
import re
import sys
import threading
import copy
import functools
import time
# python2 and python3 queue package name is different
try:
@@ -82,15 +83,15 @@ def _decode_data(data):
# convert bytes to string. This is a bit of a hack, we know that we want to log this
# later so encode to the stdout encoding with backslash escapes for anything non-encodable
try:
return data.decode(sys.stdout.encoding, "backslashreplace")
return data.decode(sys.stdout.encoding, 'backslashreplace')
except UnicodeDecodeError: # Python <3.5 doesn't support backslashreplace
return data.decode(sys.stdout.encoding, "replace")
return data.decode(sys.stdout.encoding, 'replace')
return data
def _pattern_to_string(pattern):
try:
ret = "RegEx: " + pattern.pattern
ret = 'RegEx: ' + pattern.pattern
except AttributeError:
ret = pattern
return ret
@@ -167,7 +168,7 @@ class _LogThread(threading.Thread, _queue.Queue):
Then data will be passed to ``expect`` as soon as received.
"""
def __init__(self):
threading.Thread.__init__(self, name="LogThread")
threading.Thread.__init__(self, name='LogThread')
_queue.Queue.__init__(self, maxsize=0)
self.setDaemon(True)
self.flush_lock = threading.Lock()
@@ -177,7 +178,7 @@ class _LogThread(threading.Thread, _queue.Queue):
:param filename: log file name
:param data: log data. Must be ``bytes``.
"""
self.put({"filename": filename, "data": data})
self.put({'filename': filename, 'data': data})
def flush_data(self):
with self.flush_lock:
@@ -187,14 +188,14 @@ class _LogThread(threading.Thread, _queue.Queue):
try:
log = self.get_nowait()
try:
data_cache[log["filename"]] += log["data"]
data_cache[log['filename']] += log['data']
except KeyError:
data_cache[log["filename"]] = log["data"]
data_cache[log['filename']] = log['data']
except _queue.Empty:
break
# flush data
for filename in data_cache:
with open(filename, "ab+") as f:
with open(filename, 'ab+') as f:
f.write(data_cache[filename])
def run(self):
@@ -231,7 +232,7 @@ class RecvThread(threading.Thread):
lines = decoded_data.splitlines(True)
last_line = lines[-1]
if last_line[-1] != "\n":
if last_line[-1] != '\n':
if len(lines) == 1:
# only one line and the line is not finished, then append this to cache
self._line_cache += lines[-1]
@@ -239,7 +240,7 @@ class RecvThread(threading.Thread):
else:
# more than one line and not finished, replace line cache
self._line_cache = lines[-1]
ret += "".join(lines[:-1])
ret += ''.join(lines[:-1])
else:
# line finishes, flush cache
self._line_cache = str()
@@ -302,7 +303,7 @@ class BaseDUT(object):
self.start_receive()
def __str__(self):
return "DUT({}: {})".format(self.name, str(self.port))
return 'DUT({}: {})'.format(self.name, str(self.port))
def _save_expect_failure(self, pattern, data, start_time):
"""
@@ -311,8 +312,8 @@ class BaseDUT(object):
The expect failures could be false alarm, and test case might generate a lot of such failures.
Therefore, we don't print the failure immediately and limit the max size of failure list.
"""
self.expect_failures.insert(0, {"pattern": pattern, "data": data,
"start": start_time, "end": time.time()})
self.expect_failures.insert(0, {'pattern': pattern, 'data': data,
'start': start_time, 'end': time.time()})
self.expect_failures = self.expect_failures[:self.MAX_EXPECT_FAILURES_TO_SAVED]
def _save_dut_log(self, data):
@@ -444,7 +445,7 @@ class BaseDUT(object):
raise e
return data
def write(self, data, eol="\r\n", flush=True):
def write(self, data, eol='\r\n', flush=True):
"""
:param data: data
:param eol: end of line pattern.
@@ -474,7 +475,7 @@ class BaseDUT(object):
self.data_cache.flush(size)
return data
def start_capture_raw_data(self, capture_id="default"):
def start_capture_raw_data(self, capture_id='default'):
"""
Sometime application want to get DUT raw data and use ``expect`` method at the same time.
Capture methods provides a way to get raw data without affecting ``expect`` or ``read`` method.
@@ -491,7 +492,7 @@ class BaseDUT(object):
# otherwise, create new data cache
self.recorded_data[capture_id] = _DataCache()
def stop_capture_raw_data(self, capture_id="default"):
def stop_capture_raw_data(self, capture_id='default'):
"""
Stop capture and get raw data.
This method should be used after ``start_capture_raw_data`` on the same capture ID.
@@ -504,9 +505,9 @@ class BaseDUT(object):
ret = self.recorded_data[capture_id].get_data()
self.recorded_data.pop(capture_id)
except KeyError as e:
e.message = "capture_id does not exist. " \
"You should call start_capture_raw_data with same ID " \
"before calling stop_capture_raw_data"
e.message = 'capture_id does not exist. ' \
'You should call start_capture_raw_data with same ID ' \
'before calling stop_capture_raw_data'
raise e
return ret
@@ -552,9 +553,9 @@ class BaseDUT(object):
return ret, index
EXPECT_METHOD = [
[type(re.compile("")), "_expect_re"],
[type(b''), "_expect_str"], # Python 2 & 3 hook to work without 'from builtins import str' from future
[type(u''), "_expect_str"],
[type(re.compile('')), '_expect_re'],
[type(b''), '_expect_str'], # Python 2 & 3 hook to work without 'from builtins import str' from future
[type(u''), '_expect_str'],
]
def _get_expect_method(self, pattern):
@@ -607,7 +608,7 @@ class BaseDUT(object):
if ret is None:
pattern = _pattern_to_string(pattern)
self._save_expect_failure(pattern, data, start_time)
raise ExpectTimeout(self.name + ": " + pattern)
raise ExpectTimeout(self.name + ': ' + pattern)
return stdout if full_stdout else ret
def _expect_multi(self, expect_all, expect_item_list, timeout):
@@ -622,12 +623,12 @@ class BaseDUT(object):
def process_expected_item(item_raw):
# convert item raw data to standard dict
item = {
"pattern": item_raw[0] if isinstance(item_raw, tuple) else item_raw,
"method": self._get_expect_method(item_raw[0] if isinstance(item_raw, tuple)
'pattern': item_raw[0] if isinstance(item_raw, tuple) else item_raw,
'method': self._get_expect_method(item_raw[0] if isinstance(item_raw, tuple)
else item_raw),
"callback": item_raw[1] if isinstance(item_raw, tuple) else None,
"index": -1,
"ret": None,
'callback': item_raw[1] if isinstance(item_raw, tuple) else None,
'index': -1,
'ret': None,
}
return item
@@ -642,9 +643,9 @@ class BaseDUT(object):
for expect_item in expect_items:
if expect_item not in matched_expect_items:
# exclude those already matched
expect_item["ret"], expect_item["index"] = \
expect_item["method"](data, expect_item["pattern"])
if expect_item["ret"] is not None:
expect_item['ret'], expect_item['index'] = \
expect_item['method'](data, expect_item['pattern'])
if expect_item['ret'] is not None:
# match succeed for one item
matched_expect_items.append(expect_item)
@@ -664,20 +665,20 @@ class BaseDUT(object):
if match_succeed:
# sort matched items according to order of appearance in the input data,
# so that the callbacks are invoked in correct order
matched_expect_items = sorted(matched_expect_items, key=lambda it: it["index"])
matched_expect_items = sorted(matched_expect_items, key=lambda it: it['index'])
# invoke callbacks and flush matched data cache
slice_index = -1
for expect_item in matched_expect_items:
# trigger callback
if expect_item["callback"]:
expect_item["callback"](expect_item["ret"])
slice_index = max(slice_index, expect_item["index"])
if expect_item['callback']:
expect_item['callback'](expect_item['ret'])
slice_index = max(slice_index, expect_item['index'])
# flush already matched data
self.data_cache.flush(slice_index)
else:
pattern = str([_pattern_to_string(x["pattern"]) for x in expect_items])
pattern = str([_pattern_to_string(x['pattern']) for x in expect_items])
self._save_expect_failure(pattern, data, start_time)
raise ExpectTimeout(self.name + ": " + pattern)
raise ExpectTimeout(self.name + ': ' + pattern)
@_expect_lock
def expect_any(self, *expect_items, **timeout):
@@ -697,8 +698,8 @@ class BaseDUT(object):
"""
# to be compatible with python2
# in python3 we can write f(self, *expect_items, timeout=DEFAULT_TIMEOUT)
if "timeout" not in timeout:
timeout["timeout"] = self.DEFAULT_EXPECT_TIMEOUT
if 'timeout' not in timeout:
timeout['timeout'] = self.DEFAULT_EXPECT_TIMEOUT
return self._expect_multi(False, expect_items, **timeout)
@_expect_lock
@@ -719,38 +720,38 @@ class BaseDUT(object):
"""
# to be compatible with python2
# in python3 we can write f(self, *expect_items, timeout=DEFAULT_TIMEOUT)
if "timeout" not in timeout:
timeout["timeout"] = self.DEFAULT_EXPECT_TIMEOUT
if 'timeout' not in timeout:
timeout['timeout'] = self.DEFAULT_EXPECT_TIMEOUT
return self._expect_multi(True, expect_items, **timeout)
@staticmethod
def _format_ts(ts):
return "{}:{}".format(time.strftime("%m-%d %H:%M:%S", time.localtime(ts)), str(ts % 1)[2:5])
return '{}:{}'.format(time.strftime('%m-%d %H:%M:%S', time.localtime(ts)), str(ts % 1)[2:5])
def print_debug_info(self):
"""
Print debug info of current DUT. Currently we will print debug info for expect failures.
"""
Utility.console_log("DUT debug info for DUT: {}:".format(self.name), color="orange")
Utility.console_log('DUT debug info for DUT: {}:'.format(self.name), color='orange')
for failure in self.expect_failures:
Utility.console_log(u"\t[pattern]: {}\r\n\t[data]: {}\r\n\t[time]: {} - {}\r\n"
.format(failure["pattern"], failure["data"],
self._format_ts(failure["start"]), self._format_ts(failure["end"])),
color="orange")
Utility.console_log(u'\t[pattern]: {}\r\n\t[data]: {}\r\n\t[time]: {} - {}\r\n'
.format(failure['pattern'], failure['data'],
self._format_ts(failure['start']), self._format_ts(failure['end'])),
color='orange')
class SerialDUT(BaseDUT):
""" serial with logging received data feature """
DEFAULT_UART_CONFIG = {
"baudrate": 115200,
"bytesize": serial.EIGHTBITS,
"parity": serial.PARITY_NONE,
"stopbits": serial.STOPBITS_ONE,
"timeout": 0.05,
"xonxoff": False,
"rtscts": False,
'baudrate': 115200,
'bytesize': serial.EIGHTBITS,
'parity': serial.PARITY_NONE,
'stopbits': serial.STOPBITS_ONE,
'timeout': 0.05,
'xonxoff': False,
'rtscts': False,
}
def __init__(self, name, port, log_file, app, **kwargs):
@@ -768,8 +769,8 @@ class SerialDUT(BaseDUT):
:param data: raw data from read
:return: formatted data (str)
"""
timestamp = "[{}]".format(self._format_ts(time.time()))
formatted_data = timestamp.encode() + b"\r\n" + data + b"\r\n"
timestamp = '[{}]'.format(self._format_ts(time.time()))
formatted_data = timestamp.encode() + b'\r\n' + data + b'\r\n'
return formatted_data
def _port_open(self):

View File

@@ -13,12 +13,12 @@
# limitations under the License.
""" Test Env, manages DUT, App and EnvConfig, interface for test cases to access these components """
import functools
import os
import threading
import functools
import traceback
import netifaces
import traceback
from . import EnvConfig
@@ -44,7 +44,7 @@ class Env(object):
:keyword env_config_file: test env config file path
:keyword test_name: test suite name, used when generate log folder name
"""
CURRENT_LOG_FOLDER = ""
CURRENT_LOG_FOLDER = ''
def __init__(self,
app=None,
@@ -79,7 +79,7 @@ class Env(object):
:return: dut instance
"""
if dut_name in self.allocated_duts:
dut = self.allocated_duts[dut_name]["dut"]
dut = self.allocated_duts[dut_name]['dut']
else:
if dut_class is None:
dut_class = self.default_dut_cls
@@ -95,7 +95,7 @@ class Env(object):
result, detected_target = dut_class.confirm_dut(port)
except ValueError:
# try to auto detect ports
allocated_ports = [self.allocated_duts[x]["port"] for x in self.allocated_duts]
allocated_ports = [self.allocated_duts[x]['port'] for x in self.allocated_duts]
available_ports = dut_class.list_available_ports()
for port in available_ports:
if port not in allocated_ports:
@@ -113,17 +113,17 @@ class Env(object):
if port:
try:
dut_config = self.get_variable(dut_name + "_port_config")
dut_config = self.get_variable(dut_name + '_port_config')
except ValueError:
dut_config = dict()
dut_config.update(dut_init_args)
dut = dut_class(dut_name, port,
os.path.join(self.log_path, dut_name + ".log"),
os.path.join(self.log_path, dut_name + '.log'),
app_inst,
**dut_config)
self.allocated_duts[dut_name] = {"port": port, "dut": dut}
self.allocated_duts[dut_name] = {'port': port, 'dut': dut}
else:
raise ValueError("Failed to get DUT")
raise ValueError('Failed to get DUT')
return dut
@_synced
@@ -136,7 +136,7 @@ class Env(object):
:return: None
"""
try:
dut = self.allocated_duts.pop(dut_name)["dut"]
dut = self.allocated_duts.pop(dut_name)['dut']
dut.close()
except KeyError:
pass
@@ -153,13 +153,13 @@ class Env(object):
return self.config.get_variable(variable_name)
PROTO_MAP = {
"ipv4": netifaces.AF_INET,
"ipv6": netifaces.AF_INET6,
"mac": netifaces.AF_LINK,
'ipv4': netifaces.AF_INET,
'ipv6': netifaces.AF_INET6,
'mac': netifaces.AF_LINK,
}
@_synced
def get_pc_nic_info(self, nic_name="pc_nic", proto="ipv4"):
def get_pc_nic_info(self, nic_name='pc_nic', proto='ipv4'):
"""
get_pc_nic_info(nic_name="pc_nic")
try to get info of a specified NIC and protocol.
@@ -192,7 +192,7 @@ class Env(object):
"""
dut_close_errors = []
for dut_name in self.allocated_duts:
dut = self.allocated_duts[dut_name]["dut"]
dut = self.allocated_duts[dut_name]['dut']
try:
if dut_debug:
dut.print_debug_info()

View File

@@ -79,5 +79,5 @@ class Config(object):
# TODO: to support auto get variable here
value = None
if value is None:
raise ValueError("Failed to get variable")
raise ValueError('Failed to get variable')
return value

View File

@@ -13,18 +13,15 @@
# limitations under the License.
""" Interface for test cases. """
import os
import time
import functools
import os
import socket
import time
from datetime import datetime
import junit_xml
from . import Env
from . import DUT
from . import App
from . import Utility
from . import DUT, App, Env, Utility
class TestCaseFailed(AssertionError):
@@ -37,7 +34,7 @@ class TestCaseFailed(AssertionError):
'cases' argument is the names of one or more test cases
"""
message = "Test case{} failed: {}".format("s" if len(cases) > 1 else "", ", ".join(str(c) for c in cases))
message = 'Test case{} failed: {}'.format('s' if len(cases) > 1 else '', ', '.join(str(c) for c in cases))
super(TestCaseFailed, self).__init__(self, message)
@@ -50,11 +47,11 @@ class DefaultEnvConfig(object):
3. default env config get from this class
"""
DEFAULT_CONFIG = {
"app": App.BaseApp,
"dut": DUT.BaseDUT,
"env_tag": "default",
"env_config_file": None,
"test_suite_name": None,
'app': App.BaseApp,
'dut': DUT.BaseDUT,
'env_tag': 'default',
'env_config_file': None,
'test_suite_name': None,
}
@classmethod
@@ -78,10 +75,10 @@ get_default_config = DefaultEnvConfig.get_default_config
MANDATORY_INFO = {
"execution_time": 1,
"env_tag": "default",
"category": "function",
"ignore": False,
'execution_time': 1,
'env_tag': 'default',
'category': 'function',
'ignore': False,
}
@@ -89,8 +86,8 @@ class JunitReport(object):
# wrapper for junit test report
# TODO: JunitReport methods are not thread safe (although not likely to be used this way).
JUNIT_FILE_NAME = "XUNIT_RESULT.xml"
JUNIT_DEFAULT_TEST_SUITE = "test-suite"
JUNIT_FILE_NAME = 'XUNIT_RESULT.xml'
JUNIT_DEFAULT_TEST_SUITE = 'test-suite'
JUNIT_TEST_SUITE = junit_xml.TestSuite(JUNIT_DEFAULT_TEST_SUITE,
hostname=socket.gethostname(),
timestamp=datetime.utcnow().isoformat())
@@ -100,7 +97,7 @@ class JunitReport(object):
@classmethod
def output_report(cls, junit_file_path):
""" Output current test result to file. """
with open(os.path.join(junit_file_path, cls.JUNIT_FILE_NAME), "w") as f:
with open(os.path.join(junit_file_path, cls.JUNIT_FILE_NAME), 'w') as f:
cls.JUNIT_TEST_SUITE.to_file(f, [cls.JUNIT_TEST_SUITE], prettyprint=False)
@classmethod
@@ -136,7 +133,7 @@ class JunitReport(object):
"""
# set stdout to empty string, so we can always append string to stdout.
# It won't affect output logic. If stdout is empty, it won't be put to report.
test_case = junit_xml.TestCase(name, stdout="")
test_case = junit_xml.TestCase(name, stdout='')
cls.JUNIT_CURRENT_TEST_CASE = test_case
cls._TEST_CASE_CREATED_TS = time.time()
return test_case
@@ -151,7 +148,7 @@ class JunitReport(object):
assert cls.JUNIT_CURRENT_TEST_CASE
for item in performance_items:
cls.JUNIT_CURRENT_TEST_CASE.stdout += "[{}]: {}\n".format(item[0], item[1])
cls.JUNIT_CURRENT_TEST_CASE.stdout += '[{}]: {}\n'.format(item[0], item[1])
def test_method(**kwargs):
@@ -174,8 +171,8 @@ def test_method(**kwargs):
def test(test_func):
case_info = MANDATORY_INFO.copy()
case_info["name"] = case_info["ID"] = test_func.__name__
case_info["junit_report_by_case"] = False
case_info['name'] = case_info['ID'] = test_func.__name__
case_info['junit_report_by_case'] = False
case_info.update(kwargs)
@functools.wraps(test_func)
@@ -197,12 +194,12 @@ def test_method(**kwargs):
env_inst = Env.Env(**env_config)
# prepare for xunit test results
junit_file_path = env_inst.app_cls.get_log_folder(env_config["test_suite_name"])
junit_test_case = JunitReport.create_test_case(case_info["ID"])
junit_file_path = env_inst.app_cls.get_log_folder(env_config['test_suite_name'])
junit_test_case = JunitReport.create_test_case(case_info['ID'])
result = False
try:
Utility.console_log("starting running test: " + test_func.__name__, color="green")
Utility.console_log('starting running test: ' + test_func.__name__, color='green')
# execute test function
test_func(env_inst, extra_data)
# if finish without exception, test result is True
@@ -224,16 +221,16 @@ def test_method(**kwargs):
for error in close_errors:
junit_test_case.add_failure_info(str(error))
result = False
if not case_info["junit_report_by_case"]:
if not case_info['junit_report_by_case']:
JunitReport.test_case_finish(junit_test_case)
# end case and output result
JunitReport.output_report(junit_file_path)
if result:
Utility.console_log("Test Succeed: " + test_func.__name__, color="green")
Utility.console_log('Test Succeed: ' + test_func.__name__, color='green')
else:
Utility.console_log(("Test Fail: " + test_func.__name__), color="red")
Utility.console_log(('Test Fail: ' + test_func.__name__), color='red')
return result
handle_test.case_info = case_info

View File

@@ -39,9 +39,9 @@ The Basic logic to assign test cases is as follow:
"""
import json
import os
import re
import json
import yaml
@@ -50,13 +50,13 @@ try:
except ImportError:
from yaml import Loader as Loader
from . import (CaseConfig, SearchCases, GitlabCIJob, console_log)
from . import CaseConfig, GitlabCIJob, SearchCases, console_log
class Group(object):
MAX_EXECUTION_TIME = 30
MAX_CASE = 15
SORT_KEYS = ["env_tag"]
SORT_KEYS = ['env_tag']
# Matching CI job rules could be different from the way we want to group test cases.
# For example, when assign unit test cases, different test cases need to use different test functions.
# We need to put them into different groups.
@@ -92,7 +92,7 @@ class Group(object):
:return: True or False
"""
max_time = (sum([self._get_case_attr(x, "execution_time") for x in self.case_list])
max_time = (sum([self._get_case_attr(x, 'execution_time') for x in self.case_list])
< self.MAX_EXECUTION_TIME)
max_case = (len(self.case_list) < self.MAX_CASE)
return max_time and max_case
@@ -135,8 +135,8 @@ class Group(object):
:return: {"Filter": case filter, "CaseConfig": list of case configs for cases in this group}
"""
output_data = {
"Filter": self.filters,
"CaseConfig": [{"name": self._get_case_attr(x, "name")} for x in self.case_list],
'Filter': self.filters,
'CaseConfig': [{'name': self._get_case_attr(x, 'name')} for x in self.case_list],
}
return output_data
@@ -149,12 +149,12 @@ class AssignTest(object):
:param ci_config_file: path of ``.gitlab-ci.yml``
"""
# subclass need to rewrite CI test job pattern, to filter all test jobs
CI_TEST_JOB_PATTERN = re.compile(r"^test_.+")
CI_TEST_JOB_PATTERN = re.compile(r'^test_.+')
# by default we only run function in CI, as other tests could take long time
DEFAULT_FILTER = {
"category": "function",
"ignore": False,
"supported_in_ci": True,
'category': 'function',
'ignore': False,
'supported_in_ci': True,
}
def __init__(self, test_case_paths, ci_config_file, case_group=Group):
@@ -168,25 +168,25 @@ class AssignTest(object):
def _handle_parallel_attribute(job_name, job):
jobs_out = []
try:
for i in range(job["parallel"]):
jobs_out.append(GitlabCIJob.Job(job, job_name + "_{}".format(i + 1)))
for i in range(job['parallel']):
jobs_out.append(GitlabCIJob.Job(job, job_name + '_{}'.format(i + 1)))
except KeyError:
# Gitlab don't allow to set parallel to 1.
# to make test job name same ($CI_JOB_NAME_$CI_NODE_INDEX),
# we append "_" to jobs don't have parallel attribute
jobs_out.append(GitlabCIJob.Job(job, job_name + "_"))
jobs_out.append(GitlabCIJob.Job(job, job_name + '_'))
return jobs_out
def _parse_gitlab_ci_config(self, ci_config_file):
with open(ci_config_file, "r") as f:
with open(ci_config_file, 'r') as f:
ci_config = yaml.load(f, Loader=Loader)
job_list = list()
for job_name in ci_config:
if self.CI_TEST_JOB_PATTERN.search(job_name) is not None:
job_list.extend(self._handle_parallel_attribute(job_name, ci_config[job_name]))
job_list.sort(key=lambda x: x["name"])
job_list.sort(key=lambda x: x['name'])
return job_list
def search_cases(self, case_filter=None):
@@ -256,7 +256,7 @@ class AssignTest(object):
Bot could also pass test count.
If filtered cases need to be tested for several times, then we do duplicate them here.
"""
test_count = os.getenv("BOT_TEST_COUNT")
test_count = os.getenv('BOT_TEST_COUNT')
if test_count:
test_count = int(test_count)
self.test_cases *= test_count
@@ -269,7 +269,7 @@ class AssignTest(object):
"""
group_count = dict()
for group in test_groups:
key = ",".join(group.ci_job_match_keys)
key = ','.join(group.ci_job_match_keys)
try:
group_count[key] += 1
except KeyError:
@@ -305,26 +305,26 @@ class AssignTest(object):
# print debug info
# total requirement of current pipeline
required_group_count = self._count_groups_by_keys(test_groups)
console_log("Required job count by tags:")
console_log('Required job count by tags:')
for tags in required_group_count:
console_log("\t{}: {}".format(tags, required_group_count[tags]))
console_log('\t{}: {}'.format(tags, required_group_count[tags]))
# number of unused jobs
not_used_jobs = [job for job in self.jobs if "case group" not in job]
not_used_jobs = [job for job in self.jobs if 'case group' not in job]
if not_used_jobs:
console_log("{} jobs not used. Please check if you define too much jobs".format(len(not_used_jobs)), "O")
console_log('{} jobs not used. Please check if you define too much jobs'.format(len(not_used_jobs)), 'O')
for job in not_used_jobs:
console_log("\t{}".format(job["name"]), "O")
console_log('\t{}'.format(job['name']), 'O')
# failures
if failed_to_assign:
console_log("Too many test cases vs jobs to run. "
"Please increase parallel count in tools/ci/config/target-test.yml "
"for jobs with specific tags:", "R")
console_log('Too many test cases vs jobs to run. '
'Please increase parallel count in tools/ci/config/target-test.yml '
'for jobs with specific tags:', 'R')
failed_group_count = self._count_groups_by_keys(failed_to_assign)
for tags in failed_group_count:
console_log("\t{}: {}".format(tags, failed_group_count[tags]), "R")
raise RuntimeError("Failed to assign test case to CI jobs")
console_log('\t{}: {}'.format(tags, failed_group_count[tags]), 'R')
raise RuntimeError('Failed to assign test case to CI jobs')
def output_configs(self, output_path):
"""

View File

@@ -141,9 +141,9 @@ def filter_test_cases(test_methods, case_filter):
class Parser(object):
DEFAULT_CONFIG = {
"TestConfig": dict(),
"Filter": dict(),
"CaseConfig": [{"extra_data": None}],
'TestConfig': dict(),
'Filter': dict(),
'CaseConfig': [{'extra_data': None}],
}
@classmethod
@@ -156,7 +156,7 @@ class Parser(object):
"""
configs = cls.DEFAULT_CONFIG.copy()
if config_file:
with open(config_file, "r") as f:
with open(config_file, 'r') as f:
configs.update(yaml.load(f, Loader=Loader))
return configs
@@ -170,8 +170,8 @@ class Parser(object):
"""
output = dict()
for key in overwrite:
module = importlib.import_module(overwrite[key]["package"])
output[key] = module.__getattribute__(overwrite[key]["class"])
module = importlib.import_module(overwrite[key]['package'])
output[key] = module.__getattribute__(overwrite[key]['class'])
return output
@classmethod
@@ -185,10 +185,10 @@ class Parser(object):
"""
configs = cls.parse_config_file(config_file)
test_case_list = []
for _config in configs["CaseConfig"]:
_filter = configs["Filter"].copy()
_overwrite = cls.handle_overwrite_args(_config.pop("overwrite", dict()))
_extra_data = _config.pop("extra_data", None)
for _config in configs['CaseConfig']:
_filter = configs['Filter'].copy()
_overwrite = cls.handle_overwrite_args(_config.pop('overwrite', dict()))
_extra_data = _config.pop('extra_data', None)
_filter.update(_config)
# Try get target from yml
@@ -222,8 +222,8 @@ class Generator(object):
def __init__(self):
self.default_config = {
"TestConfig": dict(),
"Filter": dict(),
'TestConfig': dict(),
'Filter': dict(),
}
def set_default_configs(self, test_config, case_filter):
@@ -232,7 +232,7 @@ class Generator(object):
:param case_filter: "Filter" value
:return: None
"""
self.default_config = {"TestConfig": test_config, "Filter": case_filter}
self.default_config = {'TestConfig': test_config, 'Filter': case_filter}
def generate_config(self, case_configs, output_file):
"""
@@ -241,6 +241,6 @@ class Generator(object):
:return: None
"""
config = self.default_config.copy()
config.update({"CaseConfig": case_configs})
with open(output_file, "w") as f:
config.update({'CaseConfig': case_configs})
with open(output_file, 'w') as f:
yaml.dump(config, f)

View File

@@ -26,8 +26,8 @@ class Job(dict):
"""
def __init__(self, job, job_name):
super(Job, self).__init__(job)
self["name"] = job_name
self.tags = set(self["tags"])
self['name'] = job_name
self.tags = set(self['tags'])
def match_group(self, group):
"""
@@ -38,7 +38,7 @@ class Job(dict):
:return: True or False
"""
match_result = False
if "case group" not in self and group.ci_job_match_keys == self.tags:
if 'case group' not in self and group.ci_job_match_keys == self.tags:
# group not assigned and all tags match
match_result = True
return match_result
@@ -49,7 +49,7 @@ class Job(dict):
:param group: the case group to assign
"""
self["case group"] = group
self['case group'] = group
def output_config(self, file_path):
"""
@@ -59,7 +59,7 @@ class Job(dict):
:param file_path: output file path
:return: None
"""
file_name = os.path.join(file_path, self["name"] + ".yml")
if "case group" in self:
with open(file_name, "w") as f:
yaml.safe_dump(self["case group"].output(), f, encoding='utf-8', default_flow_style=False)
file_name = os.path.join(file_path, self['name'] + '.yml')
if 'case group' in self:
with open(file_name, 'w') as f:
yaml.safe_dump(self['case group'].output(), f, encoding='utf-8', default_flow_style=False)

View File

@@ -13,23 +13,23 @@
# limitations under the License.
""" search test cases from a given file or path """
import os
import fnmatch
import types
import copy
import fnmatch
import os
import types
from . import load_source
class Search(object):
TEST_CASE_FILE_PATTERN = "*_test.py"
TEST_CASE_FILE_PATTERN = '*_test.py'
SUPPORT_REPLICATE_CASES_KEY = ['target']
@classmethod
def _search_cases_from_file(cls, file_name):
""" get test cases from test case .py file """
print("Try to get cases from: " + file_name)
print('Try to get cases from: ' + file_name)
test_functions = []
try:
mod = load_source(file_name)
@@ -42,14 +42,14 @@ class Search(object):
except AttributeError:
continue
except ImportError as e:
print("ImportError: \r\n\tFile:" + file_name + "\r\n\tError:" + str(e))
print('ImportError: \r\n\tFile:' + file_name + '\r\n\tError:' + str(e))
test_functions_out = []
for case in test_functions:
test_functions_out += cls.replicate_case(case)
for i, test_function in enumerate(test_functions_out):
print("\t{}. {} <{}>".format(i + 1, test_function.case_info["name"], test_function.case_info["target"]))
print('\t{}. {} <{}>'.format(i + 1, test_function.case_info['name'], test_function.case_info['target']))
test_function.case_info['app_dir'] = os.path.dirname(file_name)
return test_functions_out
@@ -58,7 +58,7 @@ class Search(object):
""" search all test case files recursively of a path """
if not os.path.exists(test_case):
raise OSError("test case path not exist")
raise OSError('test case path not exist')
if os.path.isdir(test_case):
test_case_files = []
for root, _, file_names in os.walk(test_case):

View File

@@ -1,4 +1,5 @@
from __future__ import print_function
import os.path
import sys
import time
@@ -7,35 +8,35 @@ import traceback
from .. import Env
_COLOR_CODES = {
"white": u'\033[0m',
"red": u'\033[31m',
"green": u'\033[32m',
"orange": u'\033[33m',
"blue": u'\033[34m',
"purple": u'\033[35m',
"W": u'\033[0m',
"R": u'\033[31m',
"G": u'\033[32m',
"O": u'\033[33m',
"B": u'\033[34m',
"P": u'\033[35m'
'white': u'\033[0m',
'red': u'\033[31m',
'green': u'\033[32m',
'orange': u'\033[33m',
'blue': u'\033[34m',
'purple': u'\033[35m',
'W': u'\033[0m',
'R': u'\033[31m',
'G': u'\033[32m',
'O': u'\033[33m',
'B': u'\033[34m',
'P': u'\033[35m'
}
def _get_log_file_name():
if Env.Env.CURRENT_LOG_FOLDER:
file_name = os.path.join(Env.Env.CURRENT_LOG_FOLDER, "console.log")
file_name = os.path.join(Env.Env.CURRENT_LOG_FOLDER, 'console.log')
else:
raise OSError("env log folder does not exist, will not save to log file")
raise OSError('env log folder does not exist, will not save to log file')
return file_name
def format_timestamp():
ts = time.time()
return "{}:{}".format(time.strftime("%m-%d %H:%M:%S", time.localtime(ts)), str(ts % 1)[2:5])
return '{}:{}'.format(time.strftime('%m-%d %H:%M:%S', time.localtime(ts)), str(ts % 1)[2:5])
def console_log(data, color="white", end="\n"):
def console_log(data, color='white', end='\n'):
"""
log data to console.
(if not flush console log, Gitlab-CI won't update logs during job execution)
@@ -44,19 +45,19 @@ def console_log(data, color="white", end="\n"):
:param color: color
"""
if color not in _COLOR_CODES:
color = "white"
color = 'white'
color_codes = _COLOR_CODES[color]
if isinstance(data, type(b'')):
data = data.decode('utf-8', 'replace')
print(color_codes + data, end=end)
if color not in ["white", "W"]:
if color not in ['white', 'W']:
# reset color to white for later logs
print(_COLOR_CODES["white"] + u"\r")
print(_COLOR_CODES['white'] + u'\r')
sys.stdout.flush()
log_data = "[{}] ".format(format_timestamp()) + data
log_data = '[{}] '.format(format_timestamp()) + data
try:
log_file = _get_log_file_name()
with open(log_file, "a+") as f:
with open(log_file, 'a+') as f:
f.write(log_data + end)
except OSError:
pass
@@ -108,4 +109,4 @@ def handle_unexpected_exception(junit_test_case, exception):
traceback.print_exc()
# AssertionError caused by an 'assert' statement has an empty string as its 'str' form
e_str = str(exception) if str(exception) else repr(exception)
junit_test_case.add_failure_info("Unexpected exception: {}\n{}".format(e_str, traceback.format_exc()))
junit_test_case.add_failure_info('Unexpected exception: {}\n{}'.format(e_str, traceback.format_exc()))

View File

@@ -21,13 +21,13 @@ Command line interface to run test cases from a given path.
Use ``python Runner.py test_case_path -c config_file -e env_config_file`` to run test cases.
"""
import argparse
import os
import sys
import argparse
import threading
from tiny_test_fw import TinyFW
from tiny_test_fw.Utility import SearchCases, CaseConfig
from tiny_test_fw.Utility import CaseConfig, SearchCases
class Runner(threading.Thread):
@@ -43,7 +43,7 @@ class Runner(threading.Thread):
if case_config:
test_suite_name = os.path.splitext(os.path.basename(case_config))[0]
else:
test_suite_name = "TestRunner"
test_suite_name = 'TestRunner'
TinyFW.set_default_config(env_config_file=env_config_file, test_suite_name=test_suite_name)
test_methods = SearchCases.Search.search_test_cases(test_case_paths)
self.test_cases = CaseConfig.Parser.apply_config(test_methods, case_config)
@@ -60,12 +60,12 @@ class Runner(threading.Thread):
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("test_cases", nargs='+',
help="test case folders or files")
parser.add_argument("--case_config", "-c", default=None,
help="case filter/config file")
parser.add_argument("--env_config_file", "-e", default=None,
help="test env config file")
parser.add_argument('test_cases', nargs='+',
help='test case folders or files')
parser.add_argument('--case_config', '-c', default=None,
help='case filter/config file')
parser.add_argument('--env_config_file', '-e', default=None,
help='test env config file')
args = parser.parse_args()
test_cases = [os.path.join(os.getenv('IDF_PATH'), path) if not os.path.isabs(path) else path for path in args.test_cases]
@@ -78,7 +78,7 @@ if __name__ == '__main__':
if not runner.is_alive():
break
except KeyboardInterrupt:
print("exit by Ctrl-C")
print('exit by Ctrl-C')
break
if not runner.get_test_result():
sys.exit(1)

View File

@@ -19,7 +19,7 @@ import ttfw_idf
from tiny_test_fw import TinyFW
@ttfw_idf.idf_example_test(env_tag="Example_WIFI")
@ttfw_idf.idf_example_test(env_tag='Example_WIFI')
def test_examples_protocol_https_request(env, extra_data):
"""
steps: |
@@ -27,17 +27,17 @@ def test_examples_protocol_https_request(env, extra_data):
2. connect to www.howsmyssl.com:443
3. send http request
"""
dut1 = env.get_dut("https_request", "examples/protocols/https_request", dut_class=ttfw_idf.ESP32DUT)
dut1 = env.get_dut('https_request', 'examples/protocols/https_request', dut_class=ttfw_idf.ESP32DUT)
dut1.start_app()
dut1.expect(re.compile(r"Connecting to www.howsmyssl.com:443"), timeout=30)
dut1.expect("Performing the SSL/TLS handshake")
dut1.expect("Certificate verified.", timeout=15)
dut1.expect_all(re.compile(r"Cipher suite is TLS-ECDHE-RSA-WITH-AES-128-GCM-SHA256"),
"Reading HTTP response",
dut1.expect(re.compile(r'Connecting to www.howsmyssl.com:443'), timeout=30)
dut1.expect('Performing the SSL/TLS handshake')
dut1.expect('Certificate verified.', timeout=15)
dut1.expect_all(re.compile(r'Cipher suite is TLS-ECDHE-RSA-WITH-AES-128-GCM-SHA256'),
'Reading HTTP response',
timeout=20)
dut1.expect(re.compile(r"Completed (\d) requests"))
dut1.expect(re.compile(r'Completed (\d) requests'))
if __name__ == '__main__':
TinyFW.set_default_config(env_config_file="EnvConfigTemplate.yml", dut=ttfw_idf.IDFDUT)
TinyFW.set_default_config(env_config_file='EnvConfigTemplate.yml', dut=ttfw_idf.IDFDUT)
test_examples_protocol_https_request()

View File

@@ -18,6 +18,7 @@
#
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
# import sphinx_rtd_theme