opentf-toolkit-nightly 0.60.0.dev1236__py3-none-any.whl → 0.60.0.dev1246__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- opentf/schemas/opentestfactory.org/v1alpha1/InsightCollector.json +25 -0
- opentf/schemas/opentestfactory.org/v1alpha1/Notification.json +23 -0
- {opentf_toolkit_nightly-0.60.0.dev1236.dist-info → opentf_toolkit_nightly-0.60.0.dev1246.dist-info}/METADATA +1 -1
- {opentf_toolkit_nightly-0.60.0.dev1236.dist-info → opentf_toolkit_nightly-0.60.0.dev1246.dist-info}/RECORD +7 -8
- opentf/commons/datasources.py +0 -834
- {opentf_toolkit_nightly-0.60.0.dev1236.dist-info → opentf_toolkit_nightly-0.60.0.dev1246.dist-info}/LICENSE +0 -0
- {opentf_toolkit_nightly-0.60.0.dev1236.dist-info → opentf_toolkit_nightly-0.60.0.dev1246.dist-info}/WHEEL +0 -0
- {opentf_toolkit_nightly-0.60.0.dev1236.dist-info → opentf_toolkit_nightly-0.60.0.dev1246.dist-info}/top_level.txt +0 -0
|
@@ -102,6 +102,31 @@
|
|
|
102
102
|
}
|
|
103
103
|
}
|
|
104
104
|
}
|
|
105
|
+
},
|
|
106
|
+
{
|
|
107
|
+
"if": {
|
|
108
|
+
"properties": {
|
|
109
|
+
"kind": {
|
|
110
|
+
"const": "SurefireXmlReport"
|
|
111
|
+
}
|
|
112
|
+
},
|
|
113
|
+
"required": [
|
|
114
|
+
"kind"
|
|
115
|
+
]
|
|
116
|
+
},
|
|
117
|
+
"then": {
|
|
118
|
+
"properties": {
|
|
119
|
+
"spec": {
|
|
120
|
+
"type": "object",
|
|
121
|
+
"properties": {
|
|
122
|
+
"scope": {
|
|
123
|
+
"type": "string"
|
|
124
|
+
}
|
|
125
|
+
},
|
|
126
|
+
"additionalProperties": false
|
|
127
|
+
}
|
|
128
|
+
}
|
|
129
|
+
}
|
|
105
130
|
}
|
|
106
131
|
]
|
|
107
132
|
}
|
|
@@ -91,6 +91,29 @@
|
|
|
91
91
|
}
|
|
92
92
|
}
|
|
93
93
|
},
|
|
94
|
+
"managedTestResult": {
|
|
95
|
+
"type": "object",
|
|
96
|
+
"properties": {
|
|
97
|
+
"reportStatus": {
|
|
98
|
+
"type": "string"
|
|
99
|
+
},
|
|
100
|
+
"stepStatuses": {
|
|
101
|
+
"type": "object"
|
|
102
|
+
},
|
|
103
|
+
"failureDetails": {
|
|
104
|
+
"type": "array",
|
|
105
|
+
"items": {
|
|
106
|
+
"type": "string"
|
|
107
|
+
}
|
|
108
|
+
},
|
|
109
|
+
"duration": {
|
|
110
|
+
"type": "integer"
|
|
111
|
+
}
|
|
112
|
+
},
|
|
113
|
+
"required": [
|
|
114
|
+
"reportStatus"
|
|
115
|
+
]
|
|
116
|
+
},
|
|
94
117
|
"logs": {
|
|
95
118
|
"type": "array",
|
|
96
119
|
"items": {
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: opentf-toolkit-nightly
|
|
3
|
-
Version: 0.60.0.
|
|
3
|
+
Version: 0.60.0.dev1246
|
|
4
4
|
Summary: OpenTestFactory Orchestrator Toolkit
|
|
5
5
|
Home-page: https://gitlab.com/henixdevelopment/open-source/opentestfactory/python-toolkit
|
|
6
6
|
Author: Martin Lafaix
|
|
@@ -1,7 +1,6 @@
|
|
|
1
1
|
opentf/commons/__init__.py,sha256=xbDeHfYHshJtopyxljdTWw5vUXcUKosU6ILDNcEBZrY,22598
|
|
2
2
|
opentf/commons/auth.py,sha256=bM2Z3kxm2Wku1lKXaRAIg37LHvXWAXIZIqjplDfN2P8,15899
|
|
3
3
|
opentf/commons/config.py,sha256=_8WzSaeB0yjGRa8mTQ69OASyUSbKZbwNOiKpMp2jFwI,7842
|
|
4
|
-
opentf/commons/datasources.py,sha256=QFlomWOFStM6CBK3tYMQY_PyY8HgQQN1coMlCWtIGhs,25971
|
|
5
4
|
opentf/commons/exceptions.py,sha256=7dhUXO8iyAbqVwlUKxZhgRzGqVcb7LkG39hFlm-VxIA,2407
|
|
6
5
|
opentf/commons/expressions.py,sha256=jM_YKXVOFhvOE2aE2IuacuvxhIsOYTFs2oQkpcbWR6g,19645
|
|
7
6
|
opentf/commons/pubsub.py,sha256=M0bvajR9raUP-xe5mfRjdrweZyHQw1_Qsy56gS-Sck4,7676
|
|
@@ -28,8 +27,8 @@ opentf/schemas/opentestfactory.org/v1alpha1/ExecutionError.json,sha256=tz8ZggvjS
|
|
|
28
27
|
opentf/schemas/opentestfactory.org/v1alpha1/ExecutionResult.json,sha256=UeWc4TfRY3G1CnMapFxXWRunaXzZdxOIle3DxURSf-A,3287
|
|
29
28
|
opentf/schemas/opentestfactory.org/v1alpha1/GeneratorCommand.json,sha256=uxbqDhP4newgz-85TnGKbchx448QEQ8WB5PXpcJy2ME,1754
|
|
30
29
|
opentf/schemas/opentestfactory.org/v1alpha1/GeneratorResult.json,sha256=LkHLGt2uam1Q5Ux0zP_O9oFgxBMCjD3Th3BsfsXxd1g,6633
|
|
31
|
-
opentf/schemas/opentestfactory.org/v1alpha1/InsightCollector.json,sha256=
|
|
32
|
-
opentf/schemas/opentestfactory.org/v1alpha1/Notification.json,sha256=
|
|
30
|
+
opentf/schemas/opentestfactory.org/v1alpha1/InsightCollector.json,sha256=lQZ6g1lAohihZ-WW4nPBOBvF9bC0MaAillKGSm8m4oc,17777
|
|
31
|
+
opentf/schemas/opentestfactory.org/v1alpha1/Notification.json,sha256=MFPIS3l9nKHsvcpNl3yRwrsQJJ99OsfAFMFXxXJcaRs,4612
|
|
33
32
|
opentf/schemas/opentestfactory.org/v1alpha1/PluginMetadata.json,sha256=BLklO7CObT4OpAEsQT60WJ1ttOcG71hIYzgN-e7Ch9k,2803
|
|
34
33
|
opentf/schemas/opentestfactory.org/v1alpha1/ProviderCommand.json,sha256=soe0imdnnq1mfGEpcLJvF3JVUIrF-7FFECc7CzNzobI,2875
|
|
35
34
|
opentf/schemas/opentestfactory.org/v1alpha1/ProviderConfig.json,sha256=HT0bgPJ5fNytQJr-wxU21oApp4RrjogurgRP-zj_eDs,3878
|
|
@@ -59,8 +58,8 @@ opentf/scripts/startup.py,sha256=sggwEpMx7PTaSgYzs-2uCF5YZzpsncMyTlfF_G60CrE,215
|
|
|
59
58
|
opentf/toolkit/__init__.py,sha256=mYeJPZ92ulbTBItqEsZgF4nnuRh6G19QPY3Jxc92ifc,23028
|
|
60
59
|
opentf/toolkit/channels.py,sha256=h5QLrr4vNLKjt8K524ZcJMqmHhE9kV5lxiW-MN6zMvQ,23622
|
|
61
60
|
opentf/toolkit/core.py,sha256=fqnGgaYnuVcd4fyeNIwpc0QtyUo7jsKeVgdkBfY3iqo,9443
|
|
62
|
-
opentf_toolkit_nightly-0.60.0.
|
|
63
|
-
opentf_toolkit_nightly-0.60.0.
|
|
64
|
-
opentf_toolkit_nightly-0.60.0.
|
|
65
|
-
opentf_toolkit_nightly-0.60.0.
|
|
66
|
-
opentf_toolkit_nightly-0.60.0.
|
|
61
|
+
opentf_toolkit_nightly-0.60.0.dev1246.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
|
|
62
|
+
opentf_toolkit_nightly-0.60.0.dev1246.dist-info/METADATA,sha256=SU-WtPMFOWbatc_aLvy9WFWOmhOl8gfslZqFt1r7Zuk,1932
|
|
63
|
+
opentf_toolkit_nightly-0.60.0.dev1246.dist-info/WHEEL,sha256=PZUExdf71Ui_so67QXpySuHtCi3-J3wvF4ORK6k_S8U,91
|
|
64
|
+
opentf_toolkit_nightly-0.60.0.dev1246.dist-info/top_level.txt,sha256=_gPuE6GTT6UNXy1DjtmQSfCcZb_qYA2vWmjg7a30AGk,7
|
|
65
|
+
opentf_toolkit_nightly-0.60.0.dev1246.dist-info/RECORD,,
|
opentf/commons/datasources.py
DELETED
|
@@ -1,834 +0,0 @@
|
|
|
1
|
-
# Copyright (c) 2024 Henix, Henix.fr
|
|
2
|
-
#
|
|
3
|
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
-
# you may not use this file except in compliance with the License.
|
|
5
|
-
# You may obtain a copy of the License at
|
|
6
|
-
#
|
|
7
|
-
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
-
#
|
|
9
|
-
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
-
# See the License for the specific language governing permissions and
|
|
13
|
-
# limitations under the License.
|
|
14
|
-
|
|
15
|
-
"""Datasources (testcases, tags and jobs) retrieval helpers"""
|
|
16
|
-
|
|
17
|
-
from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, Union
|
|
18
|
-
|
|
19
|
-
from datetime import datetime
|
|
20
|
-
|
|
21
|
-
from flask import current_app
|
|
22
|
-
|
|
23
|
-
from opentf.commons.exceptions import ServiceError
|
|
24
|
-
from opentf.commons.expressions import evaluate_bool
|
|
25
|
-
from opentf.commons.selectors import match_selectors
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
########################################################################
|
|
29
|
-
# Constants
|
|
30
|
-
|
|
31
|
-
SUCCESS = 'SUCCESS'
|
|
32
|
-
FAILURE = 'FAILURE'
|
|
33
|
-
ERROR = 'ERROR'
|
|
34
|
-
SKIPPED = 'SKIPPED'
|
|
35
|
-
TOTAL = 'total count'
|
|
36
|
-
|
|
37
|
-
DETAILS_KEYS = ('failureDetails', 'errorDetails', 'warningDetails')
|
|
38
|
-
STATUSES_ORDER = (SUCCESS, FAILURE, ERROR, SKIPPED)
|
|
39
|
-
FAILURE_STATUSES = (FAILURE, ERROR)
|
|
40
|
-
|
|
41
|
-
PROVIDERCOMMAND = 'ProviderCommand'
|
|
42
|
-
EXECUTIONCOMMAND = 'ExecutionCommand'
|
|
43
|
-
EXECUTIONRESULT = 'ExecutionResult'
|
|
44
|
-
WORKFLOW = 'Workflow'
|
|
45
|
-
GENERATORRESULT = 'GeneratorResult'
|
|
46
|
-
CREATION_TIMESTAMP = 'creationTimestamp'
|
|
47
|
-
|
|
48
|
-
########################################################################
|
|
49
|
-
## Helpers
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
class DataSourceScopeError(ServiceError):
|
|
53
|
-
"""DataSourceScopeError class"""
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
class DataSourceDataError(ServiceError):
|
|
57
|
-
"""DataSourceDataError class"""
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
def _merge_dicts(dict1: Dict[str, Any], dict2: Dict[str, Any]) -> Dict[str, Any]:
|
|
61
|
-
for k, v in dict1.items():
|
|
62
|
-
if k in dict2:
|
|
63
|
-
dict2[k] = _merge_dicts(v.copy(), dict2[k])
|
|
64
|
-
dict3 = dict1.copy()
|
|
65
|
-
dict3.update(dict2)
|
|
66
|
-
return dict3
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
def _as_list(what) -> List[str]:
|
|
70
|
-
return [what] if isinstance(what, str) else what
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
def _get_metadata(
|
|
74
|
-
filter_: Callable, events: Iterable[Dict[str, Any]], kind_: str
|
|
75
|
-
) -> Dict[str, Any]:
|
|
76
|
-
"""Get metadata of the first workflow event that satisfies filter.
|
|
77
|
-
|
|
78
|
-
# Required parameters
|
|
79
|
-
|
|
80
|
-
- filter_: a callable, filtering fuction
|
|
81
|
-
- events: a list of events or iterator
|
|
82
|
-
- kind_: a string, considered events kind
|
|
83
|
-
|
|
84
|
-
# Returned value
|
|
85
|
-
|
|
86
|
-
A possibly empty dictionary, the `.metadata` part of the
|
|
87
|
-
first event that satisfies kind and filter conditions.
|
|
88
|
-
"""
|
|
89
|
-
src = (event for event in events if event['kind'] == kind_)
|
|
90
|
-
return next(filter(filter_, src), {}).get('metadata', {})
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
def parse_testcase_name(full_name: str) -> Tuple[str, str]:
|
|
94
|
-
"""Parse test case name from testResults notification.
|
|
95
|
-
|
|
96
|
-
full_name is a string: classname#testcase name
|
|
97
|
-
|
|
98
|
-
# Returned value
|
|
99
|
-
|
|
100
|
-
A tuple of two strings: suite and test case name. If one
|
|
101
|
-
of strings is empty, returns not empty element value instead.
|
|
102
|
-
"""
|
|
103
|
-
suite, _, name = full_name.partition('#')
|
|
104
|
-
return suite or name, name or suite
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
########################################################################
|
|
108
|
-
## Datasource: Testcases
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
def in_scope(expr: Union[str, bool], contexts: Dict[str, Any]) -> bool:
|
|
112
|
-
"""Safely evaluate datasource scope."""
|
|
113
|
-
try:
|
|
114
|
-
if isinstance(expr, bool):
|
|
115
|
-
return expr
|
|
116
|
-
return evaluate_bool(expr, contexts)
|
|
117
|
-
except ValueError as err:
|
|
118
|
-
raise ValueError(f'Invalid conditional {expr}: {err}.')
|
|
119
|
-
except KeyError as err:
|
|
120
|
-
raise ValueError(f'Nonexisting context entry in expression {expr}: {err}.')
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
def get_testresults(events: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
|
|
124
|
-
"""Return a possibly empty list of Notifications.
|
|
125
|
-
|
|
126
|
-
Each notification in the list is guaranteed to have a
|
|
127
|
-
`spec.testResults` entry.
|
|
128
|
-
"""
|
|
129
|
-
return [item for item in events if _has_testresult(item)]
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
def _has_testresult(item: Dict[str, Any]) -> bool:
|
|
133
|
-
"""Determine if a workflow notification has a testResults element."""
|
|
134
|
-
return item.get('kind') == 'Notification' and item.get('spec', {}).get(
|
|
135
|
-
'testResults', False
|
|
136
|
-
)
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
def _get_workflow_jobs(events: List[Dict[str, Any]]) -> Dict[str, Any]:
|
|
140
|
-
"""Get workflow jobs that have steps.
|
|
141
|
-
|
|
142
|
-
TOTO Will have to be reviewed when adding nested generators.
|
|
143
|
-
|
|
144
|
-
# Required parameters
|
|
145
|
-
|
|
146
|
-
- events: a list of events
|
|
147
|
-
|
|
148
|
-
# Returned value
|
|
149
|
-
|
|
150
|
-
A dictionary. Keys are job names, values are a (dict, event) pair.
|
|
151
|
-
|
|
152
|
-
- name: a string, the job's name and the generator's job_id, if any
|
|
153
|
-
- job: a dictionary (its `runs-on` entry is a list of strings)
|
|
154
|
-
- event: either a workflow or a generatorresult event.
|
|
155
|
-
"""
|
|
156
|
-
|
|
157
|
-
def _clean(j):
|
|
158
|
-
j['runs-on'] = _as_list(j.get('runs-on', []))
|
|
159
|
-
return j
|
|
160
|
-
|
|
161
|
-
jobs = {
|
|
162
|
-
job_name + ' ' + event['metadata'].get('job_id', ''): (_clean(job), event)
|
|
163
|
-
for event in filter(lambda x: x['kind'] in (WORKFLOW, GENERATORRESULT), events)
|
|
164
|
-
for job_name, job in event.get('jobs', {}).items()
|
|
165
|
-
}
|
|
166
|
-
for job_name, (job, event) in jobs.items():
|
|
167
|
-
if ' ' not in job_name.strip():
|
|
168
|
-
# we do not have to patch top-level jobs
|
|
169
|
-
continue
|
|
170
|
-
if not event['metadata']['job_origin']:
|
|
171
|
-
job['runs-on'] = list(
|
|
172
|
-
set(
|
|
173
|
-
job['runs-on'] + jobs[event['metadata']['name'] + ' '][0]['runs-on']
|
|
174
|
-
)
|
|
175
|
-
)
|
|
176
|
-
|
|
177
|
-
return {name: (job, event) for name, (job, event) in jobs.items() if 'steps' in job}
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
def _uses_inception(events: List[Dict[str, Any]]) -> bool:
|
|
181
|
-
"""Determine if a workflow is the inception workflow."""
|
|
182
|
-
workflow_event = next(
|
|
183
|
-
(event for event in events if event['kind'] == WORKFLOW), None
|
|
184
|
-
)
|
|
185
|
-
if not workflow_event:
|
|
186
|
-
raise ValueError('No Workflow event in workflow events...')
|
|
187
|
-
return any(
|
|
188
|
-
'inception' in _as_list(job.get('runs-on', []))
|
|
189
|
-
for job in workflow_event['jobs'].values()
|
|
190
|
-
)
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
def _get_inception_testresults(events: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
|
|
194
|
-
"""Get unique testResults notifications for inception workflow.
|
|
195
|
-
|
|
196
|
-
Note: This is a kludge until we find a reliable way to map such results
|
|
197
|
-
to the executed tests list.
|
|
198
|
-
"""
|
|
199
|
-
unique_results = set()
|
|
200
|
-
unique_events = []
|
|
201
|
-
for event in get_testresults(events):
|
|
202
|
-
event_results = []
|
|
203
|
-
for result in event['spec']['testResults']:
|
|
204
|
-
event_results.append(
|
|
205
|
-
(
|
|
206
|
-
result['attachment_origin'],
|
|
207
|
-
result['name'],
|
|
208
|
-
result['duration'],
|
|
209
|
-
result['status'],
|
|
210
|
-
)
|
|
211
|
-
)
|
|
212
|
-
if tuple(event_results) not in unique_results:
|
|
213
|
-
unique_results.add(tuple(event_results))
|
|
214
|
-
unique_events.append(event)
|
|
215
|
-
return unique_events
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
def _get_testresult_params(param_step_id: str, job: Dict[str, Any]) -> Dict[str, Any]:
|
|
219
|
-
"""Get .with.data field of param_step_id.
|
|
220
|
-
|
|
221
|
-
# Required parameters
|
|
222
|
-
|
|
223
|
-
- param_step_id: a string
|
|
224
|
-
- job: a dictionary
|
|
225
|
-
|
|
226
|
-
# Returned value
|
|
227
|
-
|
|
228
|
-
A dictionary, the `.with.data` part of the params step.
|
|
229
|
-
|
|
230
|
-
# Raised exceptions
|
|
231
|
-
|
|
232
|
-
An _IndexError_ exception is raised if no params step is found.
|
|
233
|
-
"""
|
|
234
|
-
return [
|
|
235
|
-
step['with']['data'] for step in job['steps'] if step.get('id') == param_step_id
|
|
236
|
-
].pop()
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
def _get_testcase_timestamps_and_job_id(step_origin: str, events: List[Dict[str, Any]]):
|
|
240
|
-
def _is_origin_provider(event: Dict[str, Any]) -> bool:
|
|
241
|
-
return event['metadata']['step_id'] == step_origin
|
|
242
|
-
|
|
243
|
-
def _is_origin_execution(event: Dict[str, Any]) -> bool:
|
|
244
|
-
return step_origin in event['metadata']['step_origin']
|
|
245
|
-
|
|
246
|
-
creation = _get_metadata(_is_origin_provider, events, PROVIDERCOMMAND)
|
|
247
|
-
start = _get_metadata(_is_origin_execution, events, EXECUTIONCOMMAND)
|
|
248
|
-
end = _get_metadata(_is_origin_execution, reversed(events), EXECUTIONRESULT)
|
|
249
|
-
|
|
250
|
-
return {
|
|
251
|
-
CREATION_TIMESTAMP: creation.get(CREATION_TIMESTAMP, None),
|
|
252
|
-
'startTime': start.get(CREATION_TIMESTAMP, None),
|
|
253
|
-
'endTime': end.get(CREATION_TIMESTAMP, None),
|
|
254
|
-
'job_id': creation.get('job_id', None),
|
|
255
|
-
}
|
|
256
|
-
|
|
257
|
-
|
|
258
|
-
def _complete_labels(
|
|
259
|
-
labels: Dict[str, Any],
|
|
260
|
-
exec_step_id: str,
|
|
261
|
-
managedtests: Dict[str, Any],
|
|
262
|
-
job: Dict[str, Any],
|
|
263
|
-
) -> Dict[str, Any]:
|
|
264
|
-
testcases = managedtests.get('testCases')
|
|
265
|
-
if not testcases or exec_step_id not in testcases:
|
|
266
|
-
if not testcases:
|
|
267
|
-
current_app.logger.warning(
|
|
268
|
-
f'Was expecting a "testCases" part in parent of step {exec_step_id}, ignoring.'
|
|
269
|
-
)
|
|
270
|
-
return labels
|
|
271
|
-
|
|
272
|
-
labels['test']['managed'] = True
|
|
273
|
-
testcase_metadata = testcases[exec_step_id]
|
|
274
|
-
labels['test']['technology-name'] = testcase_metadata['technology']
|
|
275
|
-
labels['test']['collection'] = managedtests.get('testPlan', {})
|
|
276
|
-
labels['test'].update(
|
|
277
|
-
{
|
|
278
|
-
key: value
|
|
279
|
-
for key, value in testcase_metadata.items()
|
|
280
|
-
if key
|
|
281
|
-
in (
|
|
282
|
-
'name',
|
|
283
|
-
'reference',
|
|
284
|
-
'importance',
|
|
285
|
-
'nature',
|
|
286
|
-
'path',
|
|
287
|
-
'type',
|
|
288
|
-
'uuid',
|
|
289
|
-
)
|
|
290
|
-
}
|
|
291
|
-
)
|
|
292
|
-
try:
|
|
293
|
-
params = _get_testresult_params(testcase_metadata['param_step_id'], job)
|
|
294
|
-
labels['test']['global'] = params.get('global', {})
|
|
295
|
-
labels['test']['data'] = params.get('test', {})
|
|
296
|
-
except IndexError:
|
|
297
|
-
current_app.logger.warning(
|
|
298
|
-
f'Could not find "params" step associated to "execute" step {exec_step_id}, ignoring.'
|
|
299
|
-
)
|
|
300
|
-
return labels
|
|
301
|
-
|
|
302
|
-
|
|
303
|
-
def _create_testresult_labels(
|
|
304
|
-
events: List[Dict[str, Any]],
|
|
305
|
-
step_origin: str,
|
|
306
|
-
exec_step: Dict[str, Any],
|
|
307
|
-
job_name: str,
|
|
308
|
-
job: Dict[str, Any],
|
|
309
|
-
parent: Dict[str, Any],
|
|
310
|
-
) -> Dict[str, Any]:
|
|
311
|
-
"""Create labels for test result.
|
|
312
|
-
|
|
313
|
-
# Required parameters
|
|
314
|
-
|
|
315
|
-
- events: a list, workflow events
|
|
316
|
-
- step_origin: a string, the 'execute' step uuid
|
|
317
|
-
- exec_step: a dictionary, the 'execute' step
|
|
318
|
-
- job_name: a string (the name of the job containing exec_step)
|
|
319
|
-
- job: a dictionary, the job containing exec_step
|
|
320
|
-
- parent: a dictionary, the event defining the job
|
|
321
|
-
|
|
322
|
-
# Returned value
|
|
323
|
-
|
|
324
|
-
A labels dictionary.
|
|
325
|
-
"""
|
|
326
|
-
exec_step_id = exec_step['id']
|
|
327
|
-
times_jobid = _get_testcase_timestamps_and_job_id(step_origin, events)
|
|
328
|
-
labels = {
|
|
329
|
-
'apiVersion': 'testing.opentestfactory.org/v1alpha1',
|
|
330
|
-
'kind': 'TestCase',
|
|
331
|
-
'metadata': {
|
|
332
|
-
CREATION_TIMESTAMP: times_jobid[CREATION_TIMESTAMP],
|
|
333
|
-
'execution_id': exec_step_id,
|
|
334
|
-
'job_id': times_jobid['job_id'],
|
|
335
|
-
'namespace': parent['metadata']['namespace'],
|
|
336
|
-
'workflow_id': parent['metadata']['workflow_id'],
|
|
337
|
-
},
|
|
338
|
-
'test': {
|
|
339
|
-
'job': job_name.split()[0],
|
|
340
|
-
'managed': False,
|
|
341
|
-
'runs-on': job['runs-on'],
|
|
342
|
-
'technology': exec_step['uses'].partition('/')[0],
|
|
343
|
-
'test': exec_step.get('with', {}).get('test'),
|
|
344
|
-
'uses': exec_step['uses'],
|
|
345
|
-
},
|
|
346
|
-
'execution': {
|
|
347
|
-
'startTime': times_jobid['startTime'],
|
|
348
|
-
'endTime': times_jobid['endTime'],
|
|
349
|
-
},
|
|
350
|
-
}
|
|
351
|
-
if not (managedtests := parent['metadata'].get('managedTests')):
|
|
352
|
-
return labels
|
|
353
|
-
return _complete_labels(labels, exec_step_id, managedtests, job)
|
|
354
|
-
|
|
355
|
-
|
|
356
|
-
def _get_testresult_steporigin(
|
|
357
|
-
attachment_origin: str, events: List[Dict[str, Any]]
|
|
358
|
-
) -> Optional[str]:
|
|
359
|
-
"""Find the step that produced the attachment.
|
|
360
|
-
|
|
361
|
-
# Required parameters
|
|
362
|
-
|
|
363
|
-
- attachment_origin: a string (the attachment uuid)
|
|
364
|
-
- events: a list of events
|
|
365
|
-
|
|
366
|
-
# Returned value
|
|
367
|
-
|
|
368
|
-
A step ID (a string) or None.
|
|
369
|
-
"""
|
|
370
|
-
for event in events:
|
|
371
|
-
if not (
|
|
372
|
-
event['kind'] == EXECUTIONRESULT and event['metadata'].get('attachments')
|
|
373
|
-
):
|
|
374
|
-
continue
|
|
375
|
-
metadata = event['metadata']
|
|
376
|
-
for value in metadata.get('attachments', {}).values():
|
|
377
|
-
if value['uuid'] != attachment_origin:
|
|
378
|
-
continue
|
|
379
|
-
return (
|
|
380
|
-
metadata['step_origin'][0]
|
|
381
|
-
if metadata['step_origin']
|
|
382
|
-
else metadata['step_id']
|
|
383
|
-
)
|
|
384
|
-
return None
|
|
385
|
-
|
|
386
|
-
|
|
387
|
-
def _get_testresult_labels(
|
|
388
|
-
attachment_origin: str, events: List[Dict[str, Any]]
|
|
389
|
-
) -> Optional[Dict[str, Any]]:
|
|
390
|
-
"""Get labels for test result.
|
|
391
|
-
|
|
392
|
-
# Required parameters
|
|
393
|
-
|
|
394
|
-
- attachment_origin: a string (the attachment uuid)
|
|
395
|
-
- events: a list of events
|
|
396
|
-
|
|
397
|
-
# Returned value
|
|
398
|
-
|
|
399
|
-
A _labels_ dictionary or None.
|
|
400
|
-
"""
|
|
401
|
-
if step_origin := _get_testresult_steporigin(attachment_origin, events):
|
|
402
|
-
jobs_with_steps = _get_workflow_jobs(events)
|
|
403
|
-
for job_name, (job, parent) in jobs_with_steps.items():
|
|
404
|
-
for exec_step in job['steps']:
|
|
405
|
-
if exec_step.get('id') == step_origin:
|
|
406
|
-
return _create_testresult_labels(
|
|
407
|
-
events, step_origin, exec_step, job_name, job, parent
|
|
408
|
-
)
|
|
409
|
-
return None
|
|
410
|
-
|
|
411
|
-
|
|
412
|
-
def _make_testcase_from_testresult(
|
|
413
|
-
item: Dict[str, Any], labels: Dict[str, Any], scope: Union[str, bool]
|
|
414
|
-
) -> Dict[str, Any]:
|
|
415
|
-
suite_name, testcase_name = parse_testcase_name(item['name'])
|
|
416
|
-
item_data = {
|
|
417
|
-
'metadata': {
|
|
418
|
-
'name': item['name'],
|
|
419
|
-
'id': item['id'],
|
|
420
|
-
},
|
|
421
|
-
'test': {
|
|
422
|
-
'outcome': item['status'].lower(),
|
|
423
|
-
'suiteName': suite_name,
|
|
424
|
-
'testCaseName': testcase_name,
|
|
425
|
-
},
|
|
426
|
-
'status': item['status'],
|
|
427
|
-
'execution': {
|
|
428
|
-
'duration': item.get('duration', 0),
|
|
429
|
-
},
|
|
430
|
-
}
|
|
431
|
-
if item['status'] in FAILURE_STATUSES:
|
|
432
|
-
for key in DETAILS_KEYS:
|
|
433
|
-
if item.get(key):
|
|
434
|
-
item_data['execution'][key] = item[key]
|
|
435
|
-
if item.get('errorsList'):
|
|
436
|
-
item_data['execution']['errorsList'] = item['errorsList']
|
|
437
|
-
testcase = _merge_dicts(labels, item_data)
|
|
438
|
-
try:
|
|
439
|
-
if not in_scope(scope, testcase):
|
|
440
|
-
return {}
|
|
441
|
-
except ValueError as err:
|
|
442
|
-
raise DataSourceScopeError(f'[SCOPE ERROR] {err}')
|
|
443
|
-
return testcase
|
|
444
|
-
|
|
445
|
-
|
|
446
|
-
def _get_max_count(state: Dict[str, Any]) -> int:
|
|
447
|
-
if state['reset']:
|
|
448
|
-
return state['per_page'] * state['page']
|
|
449
|
-
return state['per_page']
|
|
450
|
-
|
|
451
|
-
|
|
452
|
-
def _extract_testcases(
|
|
453
|
-
testresults: List[Dict[str, Any]],
|
|
454
|
-
state: Dict[str, Any],
|
|
455
|
-
scope: Union[str, bool],
|
|
456
|
-
events: List[Dict[str, Any]],
|
|
457
|
-
) -> Dict[str, Dict[str, Any]]:
|
|
458
|
-
testcases = {}
|
|
459
|
-
items = 0
|
|
460
|
-
testresults_part = testresults[state['last_notification_used'] :]
|
|
461
|
-
if not testresults_part:
|
|
462
|
-
return {}
|
|
463
|
-
for i, testresult in enumerate(
|
|
464
|
-
testresults_part,
|
|
465
|
-
start=state['last_notification_used'],
|
|
466
|
-
):
|
|
467
|
-
if i == state['last_notification_used']:
|
|
468
|
-
last_testresult_used = state['last_testresult_used']
|
|
469
|
-
else:
|
|
470
|
-
last_testresult_used = 0
|
|
471
|
-
execution_id = testresult['metadata']['attachment_origin'][0]
|
|
472
|
-
labels = _get_testresult_labels(execution_id, events)
|
|
473
|
-
if not labels:
|
|
474
|
-
continue
|
|
475
|
-
for j, item in enumerate(
|
|
476
|
-
testresult['spec']['testResults'][last_testresult_used:],
|
|
477
|
-
start=last_testresult_used,
|
|
478
|
-
):
|
|
479
|
-
testcase = _make_testcase_from_testresult(item, labels, scope)
|
|
480
|
-
if not testcase:
|
|
481
|
-
continue
|
|
482
|
-
if not match_selectors(testcase, state['fieldselector']):
|
|
483
|
-
continue
|
|
484
|
-
testcases[item['id']] = testcase
|
|
485
|
-
items += 1
|
|
486
|
-
if items > _get_max_count(state):
|
|
487
|
-
state['last_notification_used'] = i
|
|
488
|
-
state['last_testresult_used'] = j
|
|
489
|
-
return testcases
|
|
490
|
-
|
|
491
|
-
state['last_notification_used'] = i + 1
|
|
492
|
-
state['last_testresult_used'] = 0
|
|
493
|
-
return testcases
|
|
494
|
-
|
|
495
|
-
|
|
496
|
-
def get_testcases(
|
|
497
|
-
events: List[Dict[str, Any]], scope: Union[str, bool] = True, state=None
|
|
498
|
-
) -> Dict[str, Dict[str, Any]]:
|
|
499
|
-
"""Extract metadata for each test result.
|
|
500
|
-
|
|
501
|
-
Test results are Notification events with a `.spec.testResults`
|
|
502
|
-
entry.
|
|
503
|
-
|
|
504
|
-
# Required parameters
|
|
505
|
-
|
|
506
|
-
- events: a list of events
|
|
507
|
-
|
|
508
|
-
# Returned value
|
|
509
|
-
|
|
510
|
-
A possibly empty dictionary. Keys are the test result IDs, values
|
|
511
|
-
are dictionaries with test case metadata, labels, status, and
|
|
512
|
-
execution info.
|
|
513
|
-
|
|
514
|
-
`testcases` is a dictionary of entries like:
|
|
515
|
-
|
|
516
|
-
```
|
|
517
|
-
apiVersion: testing.opentestfactory.org/v1alpha1
|
|
518
|
-
kind: TestCase
|
|
519
|
-
metadata:
|
|
520
|
-
name: <<<Test case full name>>>
|
|
521
|
-
id: <<<Test case uuid>>>
|
|
522
|
-
job_id: <<<Test case job uuid>>>
|
|
523
|
-
execution_id: <<<Test case attachment origin uuid>>>
|
|
524
|
-
workflow_id: <<<Test case workflow uuid>>>
|
|
525
|
-
namespace: <<<Test case namespace>>>
|
|
526
|
-
creationTimestamp: <<<Test case provider creation timestamp>>>
|
|
527
|
-
test:
|
|
528
|
-
runs-on: <<<Test case execution environment tags>>>
|
|
529
|
-
uses: <<<Test case provider>>>
|
|
530
|
-
technology: <<<Test case technology>>>
|
|
531
|
-
managed: bool <<<True for test referential managed test cases>>>
|
|
532
|
-
job: <<<Test case job name>>>
|
|
533
|
-
test: <<<Test case test reference>>>
|
|
534
|
-
suiteName: <<<Test case suite>>>
|
|
535
|
-
testCaseName: <<<Test case short name>>>
|
|
536
|
-
outcome: <<<success|failure|skipped|error>>>
|
|
537
|
-
status: <<<SUCCESS|FAILURE|SKIPPED|ERROR>>>
|
|
538
|
-
execution:
|
|
539
|
-
startTime: <<<Test case execution start time>>>
|
|
540
|
-
endTime: <<<Test case execution end time>>>
|
|
541
|
-
duration: <<<Test case execution duration (from result notification)>>>
|
|
542
|
-
errorsList: [<<<Test case general execution errors>>>]
|
|
543
|
-
(failure|warning|error)Details: {<<<Test case failure details>>>}
|
|
544
|
-
```
|
|
545
|
-
|
|
546
|
-
# Raised exceptions
|
|
547
|
-
|
|
548
|
-
A _ValueError_ exception is raised if there were no test results in
|
|
549
|
-
`events` or some scope errors occured retrieving test results.
|
|
550
|
-
"""
|
|
551
|
-
if not state:
|
|
552
|
-
raise ValueError('No workflow cache state received from observer.')
|
|
553
|
-
|
|
554
|
-
if _uses_inception(events):
|
|
555
|
-
testresults = _get_inception_testresults(events)
|
|
556
|
-
else:
|
|
557
|
-
testresults = get_testresults(events)
|
|
558
|
-
|
|
559
|
-
if not testresults:
|
|
560
|
-
return {}
|
|
561
|
-
|
|
562
|
-
if testcases := _extract_testcases(testresults, state, scope, events):
|
|
563
|
-
return testcases
|
|
564
|
-
|
|
565
|
-
raise DataSourceScopeError(f'No test cases matching scope `{scope}`.')
|
|
566
|
-
|
|
567
|
-
|
|
568
|
-
########################################################################
|
|
569
|
-
## Datasource: Tags
|
|
570
|
-
|
|
571
|
-
|
|
572
|
-
def _make_tag_datasource(tag: str, parent: Dict[str, Any]) -> Dict[str, Any]:
|
|
573
|
-
return {
|
|
574
|
-
'apiVersion': 'opentestfactory.org/v1alpha1',
|
|
575
|
-
'kind': 'Tag',
|
|
576
|
-
'metadata': {
|
|
577
|
-
'name': tag,
|
|
578
|
-
'workflow_id': parent['metadata']['workflow_id'],
|
|
579
|
-
'namespace': parent['metadata']['namespace'],
|
|
580
|
-
},
|
|
581
|
-
'status': {
|
|
582
|
-
'jobCount': 0,
|
|
583
|
-
'testCaseCount': 0,
|
|
584
|
-
'testCaseStatusSummary': {
|
|
585
|
-
'success': 0,
|
|
586
|
-
'failure': 0,
|
|
587
|
-
'error': 0,
|
|
588
|
-
'skipped': 0,
|
|
589
|
-
'cancelled': 0,
|
|
590
|
-
},
|
|
591
|
-
},
|
|
592
|
-
}
|
|
593
|
-
|
|
594
|
-
|
|
595
|
-
def get_tags(events: List[Dict[str, Any]]) -> Dict[str, Any]:
|
|
596
|
-
"""Extract metadata for each execution environment tag.
|
|
597
|
-
|
|
598
|
-
# Required parameters:
|
|
599
|
-
|
|
600
|
-
- events: a list of events
|
|
601
|
-
|
|
602
|
-
# Returned value:
|
|
603
|
-
|
|
604
|
-
A dictionary. Keys are tags names, values are dictionaries with tag metadata and status.
|
|
605
|
-
|
|
606
|
-
`tags` is a dictionary of entries like:
|
|
607
|
-
|
|
608
|
-
```
|
|
609
|
-
apiVersion: opentestfactory.org/v1alpha1
|
|
610
|
-
kind: Tag
|
|
611
|
-
metadata:
|
|
612
|
-
name: <<<Tag name>>>
|
|
613
|
-
workflow_id: <<<Tag workflow id>>>
|
|
614
|
-
namespace: <<<Tag namespace>>>
|
|
615
|
-
status:
|
|
616
|
-
jobCount: <<<Tag related jobs count>>>
|
|
617
|
-
testCaseCount: <<<Tag related test cases count>>>
|
|
618
|
-
testCaseStatusSummary: <<<Tag test case count by status>>>
|
|
619
|
-
success: N
|
|
620
|
-
failure: N
|
|
621
|
-
error: N
|
|
622
|
-
skipped: N
|
|
623
|
-
cancelled: N
|
|
624
|
-
```
|
|
625
|
-
"""
|
|
626
|
-
if not (jobs := _get_workflow_jobs(events)):
|
|
627
|
-
raise DataSourceDataError(
|
|
628
|
-
'No job events found in workflow. Cannot extract data for tags.'
|
|
629
|
-
)
|
|
630
|
-
tags = {}
|
|
631
|
-
for job, parent in jobs.values():
|
|
632
|
-
for tag in job['runs-on']:
|
|
633
|
-
tags.setdefault(tag, _make_tag_datasource(tag, parent))
|
|
634
|
-
tags[tag]['status']['jobCount'] += 1
|
|
635
|
-
|
|
636
|
-
return tags
|
|
637
|
-
|
|
638
|
-
|
|
639
|
-
########################################################################
|
|
640
|
-
## Datasource: Jobs
|
|
641
|
-
|
|
642
|
-
|
|
643
|
-
def _collect_job_times_and_id(
|
|
644
|
-
events: List[Dict[str, Any]], request_metadata: Dict[str, Any]
|
|
645
|
-
) -> Dict[str, Any]:
|
|
646
|
-
"""Collect job start and end time, if available.
|
|
647
|
-
|
|
648
|
-
# Required parameters
|
|
649
|
-
|
|
650
|
-
- events: a list of events
|
|
651
|
-
- request_metadata: the channel request metadata for the job
|
|
652
|
-
|
|
653
|
-
# Returned object
|
|
654
|
-
|
|
655
|
-
A dictionary with the following entries:
|
|
656
|
-
|
|
657
|
-
- job_id
|
|
658
|
-
- requestTime
|
|
659
|
-
|
|
660
|
-
If the job started, it contains the additional entries:
|
|
661
|
-
|
|
662
|
-
- startTime
|
|
663
|
-
- endTime
|
|
664
|
-
- duration
|
|
665
|
-
"""
|
|
666
|
-
job_id = request_metadata['job_id']
|
|
667
|
-
request_time = request_metadata[CREATION_TIMESTAMP]
|
|
668
|
-
|
|
669
|
-
start = end = None
|
|
670
|
-
for event in events:
|
|
671
|
-
metadata = event['metadata']
|
|
672
|
-
kind_step_id = (event['kind'], metadata['step_sequence_id'], metadata['job_id'])
|
|
673
|
-
if kind_step_id == (EXECUTIONCOMMAND, 0, job_id):
|
|
674
|
-
start = metadata[CREATION_TIMESTAMP]
|
|
675
|
-
elif kind_step_id == (EXECUTIONRESULT, -2, job_id):
|
|
676
|
-
end = metadata[CREATION_TIMESTAMP]
|
|
677
|
-
if start and end:
|
|
678
|
-
break
|
|
679
|
-
else:
|
|
680
|
-
return {'job_id': job_id, 'requestTime': request_time}
|
|
681
|
-
|
|
682
|
-
return {
|
|
683
|
-
'requestTime': request_time,
|
|
684
|
-
'startTime': start,
|
|
685
|
-
'endTime': end,
|
|
686
|
-
'duration': (
|
|
687
|
-
datetime.fromisoformat(end) - datetime.fromisoformat(start)
|
|
688
|
-
).total_seconds()
|
|
689
|
-
* 1000,
|
|
690
|
-
'job_id': job_id,
|
|
691
|
-
}
|
|
692
|
-
|
|
693
|
-
|
|
694
|
-
def _make_job_datasource(
|
|
695
|
-
job_name: str,
|
|
696
|
-
request_metadata: Dict[str, Any],
|
|
697
|
-
job: Dict[str, Any],
|
|
698
|
-
parent: Dict[str, Any],
|
|
699
|
-
events: List[Dict[str, Any]],
|
|
700
|
-
) -> Dict[str, Any]:
|
|
701
|
-
"""Make datasource object for job.
|
|
702
|
-
|
|
703
|
-
# Required parameters
|
|
704
|
-
|
|
705
|
-
- job_name: a string, the 'short' job name
|
|
706
|
-
- request_metadata: the channel request metadata for the job or {}
|
|
707
|
-
- job: a dictionary, the job definition
|
|
708
|
-
- parent: a workflow or a generatorresult event
|
|
709
|
-
- events: a list of events
|
|
710
|
-
|
|
711
|
-
# Returned value
|
|
712
|
-
|
|
713
|
-
A 'Job' datasource object.
|
|
714
|
-
"""
|
|
715
|
-
if request_metadata:
|
|
716
|
-
job_times_id = _collect_job_times_and_id(events, request_metadata)
|
|
717
|
-
else:
|
|
718
|
-
job_times_id = {}
|
|
719
|
-
|
|
720
|
-
return {
|
|
721
|
-
'apiVersion': 'opentestfactory.org/v1alpha1',
|
|
722
|
-
'kind': 'Job',
|
|
723
|
-
'metadata': {
|
|
724
|
-
'name': job_name,
|
|
725
|
-
'id': job_times_id.get('job_id'),
|
|
726
|
-
'namespace': parent['metadata']['namespace'],
|
|
727
|
-
'workflow_id': parent['metadata']['workflow_id'],
|
|
728
|
-
CREATION_TIMESTAMP: parent['metadata'].get(CREATION_TIMESTAMP),
|
|
729
|
-
},
|
|
730
|
-
'spec': {
|
|
731
|
-
'runs-on': job['runs-on'],
|
|
732
|
-
'variables': {
|
|
733
|
-
**parent.get('variables', {}),
|
|
734
|
-
**job.get('variables', {}),
|
|
735
|
-
},
|
|
736
|
-
},
|
|
737
|
-
'status': {
|
|
738
|
-
'phase': 'SUCCEEDED',
|
|
739
|
-
'requestTime': job_times_id.get('requestTime'),
|
|
740
|
-
'startTime': job_times_id.get('startTime'),
|
|
741
|
-
'endTime': job_times_id.get('endTime'),
|
|
742
|
-
'duration': job_times_id.get('duration'),
|
|
743
|
-
'testCaseCount': 0,
|
|
744
|
-
'testCaseStatusSummary': {
|
|
745
|
-
'success': 0,
|
|
746
|
-
'failure': 0,
|
|
747
|
-
'error': 0,
|
|
748
|
-
'skipped': 0,
|
|
749
|
-
'cancelled': 0,
|
|
750
|
-
},
|
|
751
|
-
},
|
|
752
|
-
}
|
|
753
|
-
|
|
754
|
-
|
|
755
|
-
def get_jobs(events: List[Dict[str, Any]]) -> Dict[str, Any]:
|
|
756
|
-
"""Extract metadata for each job.
|
|
757
|
-
|
|
758
|
-
# Required parameters:
|
|
759
|
-
|
|
760
|
-
- events: a list of events
|
|
761
|
-
|
|
762
|
-
# Returned value:
|
|
763
|
-
|
|
764
|
-
A dictionary. Keys are job names, values are dictionaries with
|
|
765
|
-
job metadata, spec, and status.
|
|
766
|
-
|
|
767
|
-
`jobs_testcases` is a dictionary of entries like:
|
|
768
|
-
|
|
769
|
-
```
|
|
770
|
-
apiVersion: opentestfactory.org/v1alpha1
|
|
771
|
-
kind: Job
|
|
772
|
-
metadata:
|
|
773
|
-
name: <<<Job name>>
|
|
774
|
-
id: <<<Job uuid>>>
|
|
775
|
-
namespace: <<<Job namespace>>>
|
|
776
|
-
workflow_id: <<<Job workflow id>>>
|
|
777
|
-
creationTimestamp: <<<Job creation timestamp>>>
|
|
778
|
-
spec:
|
|
779
|
-
runs-on: <<<Job execution environment tags>>>
|
|
780
|
-
variables: <<<Workflow and job specific environment variables>>>
|
|
781
|
-
status:
|
|
782
|
-
phase: <<<Job phase>>>
|
|
783
|
-
requestTime: <<<Job execution environment request time>>>
|
|
784
|
-
startTime: <<<Job start time>>>
|
|
785
|
-
endTime: <<<Job end time>>>
|
|
786
|
-
duration: <<<Job duration (endTime - startTime)>>>
|
|
787
|
-
testCaseCount: <<<Job test case count>>>
|
|
788
|
-
testCaseStatusSummary: <<<Job test case count by status>>>
|
|
789
|
-
success: N
|
|
790
|
-
failure: N
|
|
791
|
-
error: N
|
|
792
|
-
skipped: N
|
|
793
|
-
cancelled: N
|
|
794
|
-
```
|
|
795
|
-
"""
|
|
796
|
-
|
|
797
|
-
def _matches(item, items):
|
|
798
|
-
if item and items:
|
|
799
|
-
return items[-1] == item
|
|
800
|
-
return not item and not items
|
|
801
|
-
|
|
802
|
-
if not (workflow_jobs := _get_workflow_jobs(events)):
|
|
803
|
-
raise DataSourceDataError(
|
|
804
|
-
'No job events found in workflow. Cannot extract data for jobs.'
|
|
805
|
-
)
|
|
806
|
-
|
|
807
|
-
jobs_events = list(
|
|
808
|
-
filter(
|
|
809
|
-
lambda event: event['kind'] in (EXECUTIONCOMMAND, EXECUTIONRESULT)
|
|
810
|
-
and event['metadata']['step_sequence_id'] in (0, -1, -2),
|
|
811
|
-
events,
|
|
812
|
-
)
|
|
813
|
-
)
|
|
814
|
-
jobs = {}
|
|
815
|
-
for job_name, (job, parent) in workflow_jobs.items():
|
|
816
|
-
name, _, uuid = job_name.partition(' ')
|
|
817
|
-
channel_request_metadata = next(
|
|
818
|
-
(
|
|
819
|
-
event
|
|
820
|
-
for event in jobs_events
|
|
821
|
-
if event['kind'] == EXECUTIONCOMMAND
|
|
822
|
-
and event['metadata']['step_sequence_id'] == -1
|
|
823
|
-
and event['metadata']['name'] == name
|
|
824
|
-
and _matches(uuid, event['metadata']['job_origin'])
|
|
825
|
-
),
|
|
826
|
-
{'metadata': {}},
|
|
827
|
-
)['metadata']
|
|
828
|
-
|
|
829
|
-
data = _make_job_datasource(
|
|
830
|
-
name, channel_request_metadata, job, parent, jobs_events
|
|
831
|
-
)
|
|
832
|
-
jobs[data['metadata']['id']] = data
|
|
833
|
-
|
|
834
|
-
return jobs
|
|
File without changes
|
|
File without changes
|
|
File without changes
|