-
Notifications
You must be signed in to change notification settings - Fork 12
/
ci-test-run.py
217 lines (166 loc) · 8.06 KB
/
ci-test-run.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
#!/usr/bin/env python3 -u
import fcntl
import datetime
import json
import yaml
import logging
import os
import re
import subprocess
import sys
import time
import argparse
from klever.cli import Cli
def get_logger(name):
logger = logging.getLogger(name)
logger.setLevel(logging.INFO)
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s (script:%(lineno)03d) %(levelname)s> %(message)s',
"%Y-%m-%d %H:%M:%S")
handler.setFormatter(formatter)
logger.addHandler(handler)
return logger
def _execute_cmd(logger, *args, stdin=None, cwd=None, get_output=False):
logger.info('Execute command "{0}"'.format(' '.join(args)))
kwargs = {
'stdin': stdin,
'cwd': cwd
}
if get_output:
return subprocess.check_output(args, **kwargs).decode('utf8')
else:
subprocess.check_call(args, **kwargs)
def execute_cmd(logger, *args, stdin=None, cwd=None, get_output=False):
try:
return _execute_cmd(logger, *args, stdin=stdin, cwd=cwd, get_output=get_output)
except subprocess.CalledProcessError:
error(logger, 'Could not execute command')
def error(logger, msg):
logger.error(msg)
sys.exit(1)
def run_job(job, logger):
logger.info('Start decision of job {0} ({1})'.format(job['name'], job['id']))
run_data = 'ci-config/validation job decision configuration.json' if job.get(
'validation') else 'ci-config/job decision configuration.json'
cli = Cli('localhost:8998', 'manager', 'manager')
_, job_uuid = cli.create_job(job['id'])
_, decision_uuid = cli.start_job_decision(job_uuid, rundata=run_data)
# Wait till job will be decided somehow.
while True:
time.sleep(5)
job_version_solution_progress = cli.decision_progress(decision_uuid)
if int(job_version_solution_progress['status']) > 2:
break
results = cli.decision_results(decision_uuid)
if not results:
error(logger, 'No results found for {}'.format(job['name']))
return results
def compare_results(job, regr_test_results, job_version_solution_results):
def mark_in(mark_id, verdict_type, verdict):
return verdict_type in regr_test_results and \
mark_id in regr_test_results[verdict_type].get(verdict, [])
error_msgs = []
status = int(job_version_solution_results['status'])
if status != 3:
return ['Testing/validation job failed: ' + job]
for verdict in ('safes', 'unsafes', 'unknowns'):
new_marks = []
matched_marks = []
job_verdict_result = job_version_solution_results[verdict]
for i, report in enumerate(job_verdict_result['reports']):
# Do not hurt if there are several associated unknown marks for unknowns since this is quite naturally.
if len(report['marks']) != 1 and (verdict != 'unknowns' or len(report['marks']) < 1):
error_msgs.append(
'There are {0} associations for report "{1}" of verdict "{2}", job "{3}" as expected'.format(
'more' if len(report['marks']) > 1 else 'less', i, verdict, job))
for mark_id in job_verdict_result['marks'].keys():
if all(not mark_in(mark_id, verdict_type, verdict) for verdict_type in
('ideal verdicts', 'current verdicts')):
new_marks.append(mark_id)
else:
matched_marks.append(mark_id)
for report in job_verdict_result['reports']:
if mark_id in report['marks']:
# For unsafe marks there are similarities for each pair or unsafe-mark.
dif = report['marks'][mark_id] if verdict == 'unsafes' else 1
if mark_in(mark_id, 'ideal verdicts', verdict) and \
(regr_test_results['ideal verdicts'][verdict][mark_id] > 0 or \
not mark_in(mark_id, 'current verdicts', verdict)):
target_verdict = 'ideal verdicts'
else:
target_verdict = 'current verdicts'
regr_test_results[target_verdict][verdict][mark_id] -= dif
# Combine marks from both types of verdicts to get unmatched marks if so.
regr_test_marks = []
for verdict_type in ('ideal verdicts', 'current verdicts'):
if verdict in regr_test_results.get(verdict_type, []):
regr_test_marks.extend(list(regr_test_results[verdict_type][verdict]))
if regr_test_marks:
unmatched_marks = set(regr_test_marks) - set(matched_marks)
if unmatched_marks:
error_msgs.append(
'There are unmatched marks for verdict "{0}", job "{1}": '.format(verdict, job) +
', '.join(unmatched_marks))
for verdict_type in ('ideal verdicts', 'current verdicts'):
if verdict in regr_test_results.get(verdict_type, []):
for mark_id in regr_test_results[verdict_type][verdict]:
if regr_test_results[verdict_type][verdict][mark_id]:
error_msgs.append(
'There are {0} associations for mark "{1}" of verdict "{2}", '
'job "{3}" as expected'
.format('more' if regr_test_results[verdict_type][verdict][mark_id] < 0
else 'less', mark_id, verdict, job))
if new_marks:
error_msgs.append(
'There are new marks for verdict "{0}", job "{1}": '
.format(verdict, job) + ', '.join(new_marks))
return error_msgs
def main(args=sys.argv[1:]):
logger = get_logger(None)
parser = argparse.ArgumentParser(description="Run CI script")
parser.add_argument('--job_file', type=str, help='Job file to run')
parser.add_argument('--job_name', type=str, help='Job name to run')
args = parser.parse_args(args)
if not args.job_file:
error(logger, 'No configuration file specified')
error_msgs = []
# Decide testing/validation jobs and obtain their results.
job_versions_solution_results = {}
with open(args.job_file) as fp:
jobs = yaml.safe_load(fp)
# Compare results with previous ones. Gather all mismatches.
# with open('presets/marks/previous_regression_testing_results.yaml') as fp:
# regr_test_results = yaml.safe_load(fp)
with open('presets/marks/previous regression testing results.json') as fp:
regr_test_results = json.load(fp)
for job in jobs['jobs']:
if job['id'] not in regr_test_results:
error_msgs.append('There are new testing/validation jobs with unknown previous results: ' + ', '.join(job))
else:
if args.job_name and args.job_name != job['name']:
continue
results = run_job(job, logger)
job_versions_solution_results[job['id']] = results
logger.info('Compare results for job "{0}"'.format(job['id']))
error_msgs.extend(compare_results(job['id'], regr_test_results[job['id']], results))
if error_msgs:
error(logger, '\n'.join(error_msgs))
if args.job_name and not job_versions_solution_results:
error(logger, "Do not find a job {}".format(args.job_name))
# This file will be attached to e-mail.
with open('job-versions-solution-results.yaml', 'w') as fp:
yaml.dump(job_versions_solution_results, fp)
# Execute unit tests for OpenStack deployment.
if False:
try:
_execute_cmd(logger, 'pytest', '-x', '-s', os.path.join('src', 'tests', 'test_openstack.py'))
logger.info('Unit tests for OpenStack deployment passed')
except subprocess.CalledProcessError:
error_msgs.append('Unit tests for OpenStack deployment failed')
else:
logger.info('Unit tests for OpenStack deployment were skipped')
if error_msgs:
error(logger, '\n'.join(error_msgs))
if __name__ == "__main__":
main(sys.argv[1:])