opentf-toolkit-nightly 0.55.0.dev941__py3-none-any.whl → 0.55.0.dev962__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -585,7 +585,10 @@ def is_uuid(uuid: str) -> bool:
585
585
 
586
586
 
587
587
  def make_status_response(
588
- reason: str, message: str, details: Optional[Dict[str, Any]] = None
588
+ reason: str,
589
+ message: str,
590
+ details: Optional[Dict[str, Any]] = None,
591
+ silent: bool = False,
589
592
  ) -> Response:
590
593
  """Return a new status response object.
591
594
 
@@ -597,6 +600,7 @@ def make_status_response(
597
600
  # Optional parameters:
598
601
 
599
602
  - details: a dictionary or None (None by default)
603
+ - silent: a boolean (False by default)
600
604
 
601
605
  # Returned value
602
606
 
@@ -613,10 +617,11 @@ def make_status_response(
613
617
  - code: an integer (derived from `reason`)
614
618
  """
615
619
  code = REASON_STATUS[reason]
616
- if code // 100 == 4:
617
- logging.warning(message)
618
- elif code // 100 == 5:
619
- logging.error(message)
620
+ if not silent:
621
+ if code // 100 == 4:
622
+ logging.warning(message)
623
+ elif code // 100 == 5:
624
+ logging.error(message)
620
625
  return make_response(
621
626
  {
622
627
  'kind': 'Status',
@@ -12,13 +12,16 @@
12
12
  # See the License for the specific language governing permissions and
13
13
  # limitations under the License.
14
14
 
15
- """Test case metadata retrieval helpers"""
15
+ """Datasources (testcases, tags and jobs) retrieval helpers"""
16
16
 
17
- from typing import Any, Dict, Generator, List, Optional, Set
18
- from collections import defaultdict
17
+ from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple
18
+
19
+ from datetime import datetime
20
+
21
+
22
+ from flask import current_app
19
23
 
20
24
  from opentf.commons.expressions import evaluate_bool
21
- from opentf.toolkit.core import warning
22
25
 
23
26
 
24
27
  ########################################################################
@@ -34,51 +37,76 @@ DETAILS_KEYS = ('failureDetails', 'errorDetails', 'warningDetails')
34
37
  STATUSES_ORDER = (SUCCESS, FAILURE, ERROR, SKIPPED)
35
38
  FAILURE_STATUSES = (FAILURE, ERROR)
36
39
 
40
+ PROVIDERCOMMAND = 'ProviderCommand'
41
+ EXECUTIONCOMMAND = 'ExecutionCommand'
42
+ EXECUTIONRESULT = 'ExecutionResult'
43
+ WORKFLOW = 'Workflow'
44
+ GENERATORRESULT = 'GeneratorResult'
45
+ CREATION_TIMESTAMP = 'creationTimestamp'
46
+
37
47
  ########################################################################
38
48
  ## Helpers
39
49
 
40
50
 
41
- def _get_path(src: Dict[str, Any], path: List[str]) -> Any:
42
- if not path:
43
- return src
44
- try:
45
- return _get_path(src[path[0]], path[1:])
46
- except KeyError:
47
- return 'KeyError'
51
+ def _merge_dicts(dict1: Dict[str, Any], dict2: Dict[str, Any]) -> Dict[str, Any]:
52
+ for k, v in dict1.items():
53
+ if k in dict2:
54
+ dict2[k] = _merge_dicts(v.copy(), dict2[k])
55
+ dict3 = dict1.copy()
56
+ dict3.update(dict2)
57
+ return dict3
48
58
 
49
59
 
50
- def _get_sorted_testcases(
51
- testcase_metadata: Dict[str, Any], path: List[str]
60
+ def _as_list(what) -> List[str]:
61
+ return [what] if isinstance(what, str) else what
62
+
63
+
64
+ def _get_metadata(
65
+ filter_: Callable, events: Iterable[Dict[str, Any]], kind_: str
52
66
  ) -> Dict[str, Any]:
53
- sorted_testcases = {}
54
- for testcase, data in testcase_metadata.items():
55
- sorted_testcases.setdefault(_get_path(data, path), {})[testcase] = data
56
- return sorted_testcases
67
+ """Get metadata of the first workflow event that satisfies filter.
68
+
69
+ # Required parameters
57
70
 
71
+ - filter_: a callable, filtering fuction
72
+ - events: a list of events or iterator
73
+ - kind_: a string, considered events kind
58
74
 
59
- def _get_sum_for_status(testcases: Dict[str, Any], status: str) -> int:
60
- return sum(1 for testcase in testcases.values() if testcase['status'] == status)
75
+ # Returned value
61
76
 
77
+ A possibly empty dictionary, the `.metadata` part of the
78
+ first event that satisfies kind and filter conditions.
79
+ """
80
+ src = (event for event in events if event['kind'] == kind_)
81
+ return next(filter(filter_, src), {}).get('metadata', {})
62
82
 
63
- def _as_list(what) -> List[str]:
64
- return [what] if isinstance(what, str) else what
83
+
84
+ def parse_testcase_name(full_name: str) -> Tuple[str, str]:
85
+ """Parse test case name from testResults notification.
86
+
87
+ full_name is a string: classname#testcase name
88
+
89
+ # Returned value
90
+
91
+ A tuple of two strings: suite and test case name. If one
92
+ of strings is empty, returns not empty element value instead.
93
+ """
94
+ suite, _, name = full_name.partition('#')
95
+ return suite or name, name or suite
65
96
 
66
97
 
67
98
  ########################################################################
68
99
  ## Datasource: Testcases
69
100
 
70
101
 
71
- def in_scope(expr: str, contexts: Dict[str, Any], scopes_errors: Set[str]) -> bool:
72
- """Safely evaluate quality gate scope."""
102
+ def in_scope(expr: str, contexts: Dict[str, Any]) -> bool:
103
+ """Safely evaluate datasource scope."""
73
104
  try:
74
105
  return evaluate_bool(expr, contexts)
75
106
  except ValueError as err:
76
- msg = f'Invalid conditional {expr}: {err}.'
77
- scopes_errors.add(msg)
107
+ raise ValueError(f'Invalid conditional {expr}: {err}.')
78
108
  except KeyError as err:
79
- msg = f'Nonexisting context entry in expression {expr}: {err}.'
80
- scopes_errors.add(msg)
81
- return False
109
+ raise ValueError(f'Nonexisting context entry in expression {expr}: {err}.')
82
110
 
83
111
 
84
112
  def get_testresults(events: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
@@ -97,10 +125,31 @@ def _has_testresult(item: Dict[str, Any]) -> bool:
97
125
  )
98
126
 
99
127
 
128
+ def _get_workflow_jobs(events: List[Dict[str, Any]]) -> Dict[str, Any]:
129
+ """yada
130
+
131
+ # Required parameters
132
+
133
+ - events: a list of events
134
+
135
+ # Returned value
136
+
137
+ A dictionary. Keys are job names, values are a (dict, event) pair.
138
+
139
+ _event_ is either a workflow or a generatorresult event.
140
+ """
141
+ return {
142
+ job_name + ' ' + event['metadata'].get('job_id', ''): (job, event)
143
+ for event in filter(lambda x: x['kind'] in (WORKFLOW, GENERATORRESULT), events)
144
+ for job_name, job in event.get('jobs', {}).items()
145
+ if job.get('steps')
146
+ }
147
+
148
+
100
149
  def _uses_inception(events: List[Dict[str, Any]]) -> bool:
101
150
  """Determine if a workflow is the inception workflow."""
102
151
  workflow_event = next(
103
- (event for event in events if event['kind'] == 'Workflow'), None
152
+ (event for event in events if event['kind'] == WORKFLOW), None
104
153
  )
105
154
  if not workflow_event:
106
155
  raise ValueError('No Workflow event in workflow events...')
@@ -156,49 +205,44 @@ def _get_testresult_params(param_step_id: str, job: Dict[str, Any]) -> Dict[str,
156
205
  ].pop()
157
206
 
158
207
 
159
- def _create_testresult_labels(
160
- exec_step: Dict[str, Any],
161
- job_name: str,
162
- job: Dict[str, Any],
163
- parent: Dict[str, Any],
164
- ) -> Dict[str, Any]:
165
- """Create labels for test result.
208
+ def _get_testcase_timestamps_and_job_id(step_origin: str, events: List[Dict[str, Any]]):
209
+ def _is_origin_provider(event: Dict[str, Any]) -> bool:
210
+ return event['metadata']['step_id'] == step_origin
166
211
 
167
- # Required parameters
212
+ def _is_origin_execution(event: Dict[str, Any]) -> bool:
213
+ return step_origin in event['metadata']['step_origin']
168
214
 
169
- - exec_step: a dictionary, the 'execute' step
170
- - job_name: a string (the name of the job containing exec_step)
171
- - job: a dictionary, the job containing exec_step
172
- - parent: a dictionary, the event defining the job
173
-
174
- # Returned value
215
+ creation = _get_metadata(_is_origin_provider, events, PROVIDERCOMMAND)
216
+ start = _get_metadata(_is_origin_execution, events, EXECUTIONCOMMAND)
217
+ end = _get_metadata(_is_origin_execution, reversed(events), EXECUTIONRESULT)
175
218
 
176
- A labels dictionary.
177
- """
178
- exec_step_id = exec_step['id']
179
- labels = {
180
- 'job': job_name.split()[0],
181
- 'uses': exec_step['uses'],
182
- 'technology': exec_step['uses'].partition('/')[0],
183
- 'runs-on': _as_list(job['runs-on']),
184
- 'managed': False,
219
+ return {
220
+ 'creationTimestamp': creation.get(CREATION_TIMESTAMP, None),
221
+ 'startTime': start.get(CREATION_TIMESTAMP, None),
222
+ 'endTime': end.get(CREATION_TIMESTAMP, None),
223
+ 'job_id': creation.get('job_id', None),
185
224
  }
186
225
 
187
- if not (managedtests := parent['metadata'].get('managedTests')):
188
- return labels
226
+
227
+ def _complete_labels(
228
+ labels: Dict[str, Any],
229
+ exec_step_id: str,
230
+ managedtests: Dict[str, Any],
231
+ job: Dict[str, Any],
232
+ ) -> Dict[str, Any]:
189
233
  testcases = managedtests.get('testCases')
190
234
  if not testcases or exec_step_id not in testcases:
191
235
  if not testcases:
192
- warning(
236
+ current_app.logger.warning(
193
237
  f'Was expecting a "testCases" part in parent of step {exec_step_id}, ignoring.'
194
238
  )
195
239
  return labels
196
240
 
197
- labels['managed'] = True
241
+ labels['test']['managed'] = True
198
242
  testcase_metadata = testcases[exec_step_id]
199
- labels['technology-name'] = testcase_metadata['technology']
200
- labels['collection'] = managedtests.get('testPlan', {})
201
- labels.update(
243
+ labels['test']['technology-name'] = testcase_metadata['technology']
244
+ labels['test']['collection'] = managedtests.get('testPlan', {})
245
+ labels['test'].update(
202
246
  {
203
247
  key: value
204
248
  for key, value in testcase_metadata.items()
@@ -216,15 +260,68 @@ def _create_testresult_labels(
216
260
  )
217
261
  try:
218
262
  params = _get_testresult_params(testcase_metadata['param_step_id'], job)
219
- labels['global'] = params.get('global', {})
220
- labels['data'] = params.get('test', {})
263
+ labels['test']['global'] = params.get('global', {})
264
+ labels['test']['data'] = params.get('test', {})
221
265
  except IndexError:
222
- warning(
266
+ current_app.logger.warning(
223
267
  f'Could not find "params" step associated to "execute" step {exec_step_id}, ignoring.'
224
268
  )
225
269
  return labels
226
270
 
227
271
 
272
+ def _create_testresult_labels(
273
+ events: List[Dict[str, Any]],
274
+ step_origin: str,
275
+ exec_step: Dict[str, Any],
276
+ job_name: str,
277
+ job: Dict[str, Any],
278
+ parent: Dict[str, Any],
279
+ ) -> Dict[str, Any]:
280
+ """Create labels for test result.
281
+
282
+ # Required parameters
283
+
284
+ - events: a list, workflow events
285
+ - step_origin: a string, the 'execute' step uuid
286
+ - exec_step: a dictionary, the 'execute' step
287
+ - job_name: a string (the name of the job containing exec_step)
288
+ - job: a dictionary, the job containing exec_step
289
+ - parent: a dictionary, the event defining the job
290
+
291
+ # Returned value
292
+
293
+ A labels dictionary.
294
+ """
295
+ exec_step_id = exec_step['id']
296
+ times_jobid = _get_testcase_timestamps_and_job_id(step_origin, events)
297
+ labels = {
298
+ 'apiVersion': 'testing.opentestfactory.org/v1alpha1',
299
+ 'kind': 'TestCase',
300
+ 'metadata': {
301
+ 'creationTimestamp': times_jobid[CREATION_TIMESTAMP],
302
+ 'execution_id': exec_step_id,
303
+ 'job_id': times_jobid['job_id'],
304
+ 'namespace': parent['metadata']['namespace'],
305
+ 'workflow_id': parent['metadata']['workflow_id'],
306
+ },
307
+ 'test': {
308
+ 'job': job_name.split()[0],
309
+ 'managed': False,
310
+ 'runs-on': _as_list(job['runs-on']),
311
+ 'technology': exec_step['uses'].partition('/')[0],
312
+ 'test': exec_step.get('with', {}).get('test'),
313
+ 'uses': exec_step['uses'],
314
+ },
315
+ 'execution': {
316
+ 'startTime': times_jobid['startTime'],
317
+ 'endTime': times_jobid['endTime'],
318
+ },
319
+ }
320
+ if not (managedtests := parent['metadata'].get('managedTests')):
321
+ return labels
322
+ return _complete_labels(labels, exec_step_id, managedtests, job)
323
+
324
+
228
325
  def _get_testresult_steporigin(
229
326
  attachment_origin: str, events: List[Dict[str, Any]]
230
327
  ) -> Optional[str]:
@@ -240,7 +337,9 @@ def _get_testresult_steporigin(
240
337
  A step ID (a string) or None.
241
338
  """
242
339
  for event in events:
243
- if not (event['kind'] == 'ExecutionResult' and event.get('attachments')):
340
+ if not (
341
+ event['kind'] == EXECUTIONRESULT and event['metadata'].get('attachments')
342
+ ):
244
343
  continue
245
344
  metadata = event['metadata']
246
345
  for value in metadata.get('attachments', {}).values():
@@ -269,64 +368,55 @@ def _get_testresult_labels(
269
368
  A _labels_ dictionary or None.
270
369
  """
271
370
  if step_origin := _get_testresult_steporigin(attachment_origin, events):
272
- jobs_with_steps = {
273
- job_name + ' ' + event['metadata'].get('job_id', ''): (job, event)
274
- for event in events
275
- for job_name, job in event.get('jobs', {}).items()
276
- if event['kind'] in ('Workflow', 'GeneratorResult') and job.get('steps')
277
- }
371
+ jobs_with_steps = _get_workflow_jobs(events)
278
372
  for job_name, (job, parent) in jobs_with_steps.items():
279
373
  for exec_step in job['steps']:
280
374
  if exec_step.get('id') == step_origin:
281
- return _create_testresult_labels(exec_step, job_name, job, parent)
375
+ return _create_testresult_labels(
376
+ events, step_origin, exec_step, job_name, job, parent
377
+ )
282
378
  return None
283
379
 
284
380
 
285
- def _get_timestamp(
286
- event: Dict[str, Any], providerid_creationtimestamps: Dict[str, str]
287
- ) -> str:
288
- """Return first provider creationtimestamp or ''.
289
-
290
- # Required parameters
291
-
292
- - event: an ExecutionResult object
293
- - providerid_creationtimestamps: a dictionary
294
- """
295
- for origin_id in event['metadata'].get('step_origin', []):
296
- if origin_id in providerid_creationtimestamps:
297
- return providerid_creationtimestamps[origin_id]
298
- return ''
299
-
300
-
301
- def _get_testresult_timestamps(
302
- events: List[Dict[str, Any]],
303
- testresults: List[Dict[str, Any]],
304
- testcase_metadata: Dict[str, Any],
305
- ):
306
- """Set timestamp for each testcase in testcase_metadata.
307
-
308
- The timestamp is the one of the originating ProviderResult.
309
- """
310
- providerid_creationtimestamps = {
311
- event['metadata']['step_id']: event['metadata'].get('creationTimestamp', '')
312
- for event in events
313
- if event['kind'] == 'ProviderResult'
381
+ def _make_testcase_from_testresult(
382
+ item: Dict[str, Any],
383
+ labels: Dict[str, Any],
384
+ scope: str,
385
+ ) -> Dict[str, Any]:
386
+ suite_name, testcase_name = parse_testcase_name(item['name'])
387
+ item_data = {
388
+ 'metadata': {
389
+ 'name': item['name'],
390
+ 'id': item['id'],
391
+ },
392
+ 'test': {
393
+ 'outcome': item['status'].lower(),
394
+ 'suiteName': suite_name,
395
+ 'testCaseName': testcase_name,
396
+ },
397
+ 'status': item['status'],
398
+ 'execution': {
399
+ 'duration': item.get('duration', 0),
400
+ },
314
401
  }
315
-
316
- origins_results = defaultdict(list)
317
- for item in testresults:
318
- for result in item['spec']['testResults']:
319
- origins_results[result['attachment_origin']].append(result['id'])
320
-
321
- for event in filter(lambda event: event['kind'] == 'ExecutionResult', events):
322
- for attachment in event['metadata'].get('attachments', {}).values():
323
- if attachment['uuid'] in origins_results:
324
- timestamp = _get_timestamp(event, providerid_creationtimestamps)
325
- for result_id in origins_results[attachment['uuid']]:
326
- testcase_metadata[result_id]['timestamp'] = timestamp
402
+ if item['status'] in FAILURE_STATUSES:
403
+ for key in DETAILS_KEYS:
404
+ if item.get(key):
405
+ item_data['execution'][key] = item[key]
406
+ if item.get('errorsList'):
407
+ item_data['execution']['errorsList'] = item['errorsList']
408
+ testcase = _merge_dicts(labels, item_data)
409
+ try:
410
+ if not in_scope(scope, testcase):
411
+ return {}
412
+ except ValueError as err:
413
+ raise ValueError(f'[SCOPE ERROR] {err}')
414
+ return testcase
327
415
 
328
416
 
329
- def get_testcases(events: List[Dict[str, Any]]) -> Dict[str, Dict[str, Any]]:
417
+ def get_testcases(
418
+ events: List[Dict[str, Any]], scope: str = 'true'
419
+ ) -> Dict[str, Dict[str, Any]]:
330
420
  """Extract metadata for each test result.
331
421
 
332
422
  Test results are Notification events with a `.spec.testResults`
@@ -338,85 +428,66 @@ def get_testcases(events: List[Dict[str, Any]]) -> Dict[str, Dict[str, Any]]:
338
428
 
339
429
  # Returned value
340
430
 
341
- A possibly empty dictionary. Keys are the testresult IDs, values
342
- are dictionaries with the following entries:
343
-
344
- - name: a string, the test case name
345
- - status: a string, the test case status
346
- - duration: a string, the test case execution time in ms
347
- - timestamp: a string, provider creation timestamp
348
- - test: a dictionary, the test case metadata
349
- - failureDetails|errorDetails|warningDetails: a dictionary with test
350
- case failure details
351
- - errorsList: a Robot Framework specific list with execution general
352
- errors
431
+ A possibly empty dictionary. Keys are the test result IDs, values
432
+ are dictionaries with test case metadata, labels, status, and
433
+ execution info.
353
434
 
354
435
  `testcases` is a dictionary of entries like:
355
436
 
356
437
  ```
357
- "<<<testcase_uuid>>>": {
358
- "name": "<<<[Test suite#]Test case name>>>",
359
- "status": "<<<SUCCESS|FAILURE|ERROR|SKIPPED>>>",
360
- "duration": "<<<test execution time in ms>>>",
361
- "timestamp": "<<<provider creation timestamp>>>"
362
- "test": {
363
- "job": "<<<job name>>>",
364
- "uses": "<<<provider function>>>",
365
- "technology": "<<<test technology>>>",
366
- "runs-on": [<<<list of execution environment tags>>>],
367
- "managed": boolean, True for test cases managed by a test referential
368
- "status": "<<<SUCCESS|FAILURE|ERROR|SKIPPED>>>"
369
- },
370
- "failureDetails"|"errorDetails"|"warningDetails": {
371
- "message": "<<<error message>>>",
372
- "type": "<<<error type>>>",
373
- "text": "<<<error trace>>>"
374
- },
375
- "errorsList": [
376
- {
377
- "message": "<<<Robot Framework general error message>>>",
378
- "timestamp": "<<<Robot Framework error message timestamp>>>"
379
- }
380
- ]
381
- }
438
+ apiVersion: testing.opentestfactory.org/v1alpha1
439
+ kind: TestCase
440
+ metadata:
441
+ name: <<<Test case full name>>>
442
+ id: <<<Test case uuid>>>
443
+ job_id: <<<Test case job uuid>>>
444
+ execution_id: <<<Test case attachment origin uuid>>>
445
+ workflow_id: <<<Test case workflow uuid>>>
446
+ namespace: <<<Test case namespace>>>
447
+ creationTimestamp: <<<Test case provider creation timestamp>>>
448
+ test:
449
+ runs-on: <<<Test case execution environment tags>>>
450
+ uses: <<<Test case provider>>>
451
+ technology: <<<Test case technology>>>
452
+ managed: bool <<<True for test referential managed test cases>>>
453
+ job: <<<Test case job name>>>
454
+ test: <<<Test case test reference>>>
455
+ suiteName: <<<Test case suite>>>
456
+ testCaseName: <<<Test case short name>>>
457
+ outcome: <<<success|failure|skipped|error>>>
458
+ status: <<<SUCCESS|FAILURE|SKIPPED|ERROR>>>
459
+ execution:
460
+ startTime: <<<Test case execution start time>>>
461
+ endTime: <<<Test case execution end time>>>
462
+ duration: <<<Test case execution duration (from result notification)>>>
463
+ errorsList: [<<<Test case general execution errors>>>]
464
+ (failure|warning|error)Details: {<<<Test case failure details>>>}
382
465
  ```
383
466
 
384
467
  # Raised exceptions
385
468
 
386
469
  A _ValueError_ exception is raised if there were no test results in
387
- `events`.
470
+ `events` or some scope errors occured retrieving test results.
388
471
  """
389
- testcases = {}
390
- results = False
391
472
  if _uses_inception(events):
392
473
  testresults = _get_inception_testresults(events)
393
474
  else:
394
475
  testresults = get_testresults(events)
395
476
 
477
+ if not testresults:
478
+ raise ValueError('No test results in events.')
479
+
480
+ testcases = {}
396
481
  for testresult in testresults:
397
- results = True
398
- labels = _get_testresult_labels(
399
- testresult['metadata']['attachment_origin'][0], events
400
- )
482
+ execution_id = testresult['metadata']['attachment_origin'][0]
483
+ labels = _get_testresult_labels(execution_id, events)
401
484
  if not labels:
402
485
  continue
403
- for testcase in testresult['spec']['testResults']:
404
- testcases[testcase['id']] = {
405
- 'name': testcase['name'],
406
- 'status': testcase['status'],
407
- 'duration': testcase.get('duration', 0),
408
- 'test': labels.copy(),
409
- }
410
- testcases[testcase['id']]['test']['status'] = testcase['status']
411
- data = {}
412
- if testcase['status'] in FAILURE_STATUSES:
413
- data = {key: testcase[key] for key in DETAILS_KEYS if testcase.get(key)}
414
- if testcase.get('errorsList'):
415
- data['errorsList'] = testcase['errorsList']
416
- testcases[testcase['id']].update(data)
417
- if not results:
418
- raise ValueError('No test results in events.')
419
- _get_testresult_timestamps(events, testresults, testcases)
486
+ for item in testresult['spec']['testResults']:
487
+ if testcase := _make_testcase_from_testresult(item, labels, scope):
488
+ testcases[item['id']] = testcase
489
+ if not testcases:
490
+ raise ValueError(f'No test cases matching scope `{scope}`.')
420
491
  return testcases
421
492
 
422
493
 
@@ -424,9 +495,30 @@ def get_testcases(events: List[Dict[str, Any]]) -> Dict[str, Dict[str, Any]]:
424
495
  ## Datasource: Tags
425
496
 
426
497
 
427
- def get_tags(
428
- events: List[Dict[str, Any]], testcase_metadata: Optional[Dict[str, Any]] = None
429
- ) -> Dict[str, Any]:
498
+ def _make_tag_datasource(tag: str, parent: Dict[str, Any]) -> Dict[str, Any]:
499
+ return {
500
+ 'apiVersion': 'opentestfactory.org/v1alpha1',
501
+ 'kind': 'Tag',
502
+ 'metadata': {
503
+ 'name': tag,
504
+ 'workflow_id': parent['metadata']['workflow_id'],
505
+ 'namespace': parent['metadata']['namespace'],
506
+ },
507
+ 'status': {
508
+ 'jobCount': 0,
509
+ 'testCaseCount': 0,
510
+ 'testCaseStatusSummary': {
511
+ 'success': 0,
512
+ 'failure': 0,
513
+ 'error': 0,
514
+ 'skipped': 0,
515
+ 'cancelled': 0,
516
+ },
517
+ },
518
+ }
519
+
520
+
521
+ def get_tags(events: List[Dict[str, Any]], scope: str = 'true') -> Dict[str, Any]:
430
522
  """Extract metadata for each execution environment tag.
431
523
 
432
524
  # Required parameters:
@@ -435,35 +527,51 @@ def get_tags(
435
527
 
436
528
  # Returned value:
437
529
 
438
- A dictionary. Keys are tags names, values are dictionaries with testcase
439
- by tag status counters.
530
+ A dictionary. Keys are tags names, values are dictionaries with tag metadata and status.
440
531
 
441
532
  `tags` is a dictionary of entries like:
442
533
 
443
534
  ```
444
- "<<<tag name>>>": {
445
- "FAILURE": <<<failed tests count>>>,
446
- "SUCCESS": <<<passed tests count>>>,
447
- "SKIPPED": <<<skipped tests count>>>,
448
- "ERROR": <<<technical KO tests count>>>,
449
- 'total': <<<total tests count>>>,
450
- 'other': <<<SKIPPED + ERROR tests count>>>
451
- }
535
+ apiVersion: opentestfactory.org/v1alpha1
536
+ kind: Tag
537
+ metadata:
538
+ name: <<<Tag name>>>
539
+ workflow_id: <<<Tag workflow id>>>
540
+ namespace: <<<Tag namespace>>>
541
+ status:
542
+ jobCount: <<<Tag related jobs count>>>
543
+ testCaseCount: <<<Tag related test cases count>>>
544
+ testCaseStatusSummary: <<<Tag test case count by status>>>
545
+ success: N
546
+ failure: N
547
+ error: N
548
+ skipped: N
549
+ cancelled: N
452
550
  ```
453
551
  """
552
+ if not (jobs := _get_workflow_jobs(events)):
553
+ raise ValueError(
554
+ 'No job events found in workflow. Cannot extract data for tags.'
555
+ )
454
556
  try:
455
- testcase_metadata = testcase_metadata or get_testcases(events)
557
+ testcases = get_testcases(events, scope)
456
558
  except ValueError as err:
457
- raise ValueError(str(err) + ' Cannot extract metadata for tags.')
559
+ if str(err).startswith('[SCOPE ERROR] '):
560
+ raise ValueError(str(err))
561
+ current_app.logger.debug(str(err))
562
+ testcases = {}
458
563
  tags = {}
459
- for testcase in testcase_metadata.values():
564
+ for job, parent in jobs.values():
565
+ for tag in job['runs-on']:
566
+ tags.setdefault(tag, _make_tag_datasource(tag, parent))
567
+ tags[tag]['status']['jobCount'] += 1
568
+
569
+ for testcase in testcases.values():
460
570
  for tag in testcase['test']['runs-on']:
461
- tags.setdefault(tag, {SUCCESS: 0, FAILURE: 0, ERROR: 0, SKIPPED: 0})
462
- tags[tag][testcase['status']] += 1
463
- for tag, counts in tags.items():
464
- counts['total'] = sum(counts[status] for status in STATUSES_ORDER)
465
- counts['other'] = sum(counts[status] for status in (SKIPPED, ERROR))
466
- tags[tag] = {k.lower(): v for k, v in counts.items()}
571
+ tags[tag]['status']['testCaseCount'] += 1
572
+ tags[tag]['status']['testCaseStatusSummary'][
573
+ testcase['test']['outcome']
574
+ ] += 1
467
575
  return tags
468
576
 
469
577
 
@@ -471,32 +579,119 @@ def get_tags(
471
579
  ## Datasource: Jobs
472
580
 
473
581
 
474
- def _evaluate_test_results(jobs_testcases: Dict[str, Any]) -> Dict[str, Any]:
475
- """Summarize job testcases.
582
+ def _collect_job_times_and_id(
583
+ events: List[Dict[str, Any]], request_metadata: Dict[str, Any]
584
+ ) -> Dict[str, Any]:
585
+ """Collect job start and end time, if available.
586
+
587
+ # Required parameters
476
588
 
477
- # Returned value
589
+ - events: a list of events
590
+ - request_metadata: the channel request metadata for the job
591
+
592
+ # Returned object
478
593
 
479
- A dictionary with one entry per job (a dictionary with keys being
480
- statuses and values being counts).
594
+ A dictionary with the following entries:
595
+
596
+ - job_id
597
+ - requestTime
598
+
599
+ If the job started, it contains the additional entries:
600
+
601
+ - startTime
602
+ - endTime
603
+ - duration
481
604
  """
482
- summaries = {}
483
- for job, testcases in jobs_testcases.items():
484
- successes, failures, errors, skipped = [
485
- _get_sum_for_status(testcases, status) for status in STATUSES_ORDER
486
- ]
487
- summaries[job] = {
488
- SUCCESS: successes,
489
- FAILURE: failures,
490
- ERROR: errors,
491
- SKIPPED: skipped,
492
- TOTAL: len(testcases),
493
- }
494
- return summaries
605
+ job_id = request_metadata['job_id']
606
+ request_time = request_metadata[CREATION_TIMESTAMP]
607
+
608
+ start = end = None
609
+ for event in events:
610
+ metadata = event['metadata']
611
+ kind_step_id = (event['kind'], metadata['step_sequence_id'], metadata['job_id'])
612
+ if kind_step_id == (EXECUTIONCOMMAND, 0, job_id):
613
+ start = metadata[CREATION_TIMESTAMP]
614
+ elif kind_step_id == (EXECUTIONRESULT, -2, job_id):
615
+ end = metadata[CREATION_TIMESTAMP]
616
+ if start and end:
617
+ break
618
+ else:
619
+ return {'job_id': job_id, 'requestTime': request_time}
620
+
621
+ return {
622
+ 'requestTime': request_time,
623
+ 'startTime': start,
624
+ 'endTime': end,
625
+ 'duration': (
626
+ datetime.fromisoformat(end) - datetime.fromisoformat(start)
627
+ ).total_seconds()
628
+ * 1000,
629
+ 'job_id': job_id,
630
+ }
495
631
 
496
632
 
497
- def get_jobs(
498
- events: List[Dict[str, Any]], testcase_metadata: Optional[Dict[str, Any]] = None
633
+ def _make_job_datasource(
634
+ job_name: str,
635
+ request_metadata: Dict[str, Any],
636
+ job: Dict[str, Any],
637
+ parent: Dict[str, Any],
638
+ events: List[Dict[str, Any]],
499
639
  ) -> Dict[str, Any]:
640
+ """Make datasource object for job.
641
+
642
+ # Required parameters
643
+
644
+ - job_name: a string, the 'short' job name
645
+ - request_metadata: the channel request metadata for the job or {}
646
+ - job: a dictionary, the job definition
647
+ - parent: a workflow or a generatorresult event
648
+ - events: a list of events
649
+
650
+ # Returned value
651
+
652
+ A 'Job' datasource object.
653
+ """
654
+ if request_metadata:
655
+ job_times_id = _collect_job_times_and_id(events, request_metadata)
656
+ else:
657
+ job_times_id = {}
658
+
659
+ return {
660
+ 'apiVersion': 'opentestfactory.org/v1alpha1',
661
+ 'kind': 'Job',
662
+ 'metadata': {
663
+ 'name': job_name,
664
+ 'id': job_times_id.get('job_id'),
665
+ 'namespace': parent['metadata']['namespace'],
666
+ 'workflow_id': parent['metadata']['workflow_id'],
667
+ 'creationTimestamp': parent['metadata'][CREATION_TIMESTAMP],
668
+ },
669
+ 'spec': {
670
+ 'runs-on': job['runs-on'],
671
+ 'variables': {
672
+ **parent.get('variables', {}),
673
+ **job.get('variables', {}),
674
+ },
675
+ },
676
+ 'status': {
677
+ 'phase': 'SUCCEEDED',
678
+ 'requestTime': job_times_id.get('requestTime'),
679
+ 'startTime': job_times_id.get('startTime'),
680
+ 'endTime': job_times_id.get('endTime'),
681
+ 'duration': job_times_id.get('duration'),
682
+ 'testCaseCount': 0,
683
+ 'testCaseStatusSummary': {
684
+ 'success': 0,
685
+ 'failure': 0,
686
+ 'error': 0,
687
+ 'skipped': 0,
688
+ 'cancelled': 0,
689
+ },
690
+ },
691
+ }
692
+
693
+
694
+ def get_jobs(events: List[Dict[str, Any]], scope: str = 'true') -> Dict[str, Any]:
500
695
  """Extract metadata for each job.
501
696
 
502
697
  # Required parameters:
@@ -505,60 +700,88 @@ def get_jobs(
505
700
 
506
701
  # Returned value:
507
702
 
508
- A dictionary. Keys are job names, values are dictionaries with the
509
- following entries:
510
-
511
- - name: a string, job name
512
- - runs-on: a list, execution environment tags
513
- - testcases: a dictionary, job-related test cases
514
- - counts: a dictionary, tests statuses count by job
515
- - variables: a dictionary, job-related environment variables
703
+ A dictionary. Keys are job names, values are dictionaries with
704
+ job metadata, spec, and status.
516
705
 
517
706
  `jobs_testcases` is a dictionary of entries like:
518
707
 
519
708
  ```
520
- "<<<job_name>>>": {
521
- "runs-on": [<<<execution environment tags>>>],
522
- "counts": {
523
- "FAILURE": <<<failed tests count>>>,
524
- "SUCCESS": <<<passed tests count>>>,
525
- "ERROR": <<<technical KOs count>>>,
526
- "SKIPPED": <<<skipped tests count>>>,
527
- "total count": <<<total tests count>>>
528
- },
529
- "variables": {
530
- "<<<variable name>>>": "<<<variable value>>>",
531
- ...
532
- }
533
- }
709
+ apiVersion: opentestfactory.org/v1alpha1
710
+ kind: Job
711
+ metadata:
712
+ name: <<<Job name>>
713
+ id: <<<Job uuid>>>
714
+ namespace: <<<Job namespace>>>
715
+ workflow_id: <<<Job workflow id>>>
716
+ creationTimestamp: <<<Job creation timestamp>>>
717
+ spec:
718
+ runs-on: <<<Job execution environment tags>>>
719
+ variables: <<<Workflow and job specific environment variables>>>
720
+ status:
721
+ phase: <<<Job phase>>>
722
+ requestTime: <<<Job execution environment request time>>>
723
+ startTime: <<<Job start time>>>
724
+ endTime: <<<Job end time>>>
725
+ duration: <<<Job duration (endTime - startTime)>>>
726
+ testCaseCount: <<<Job test case count>>>
727
+ testCaseStatusSummary: <<<Job test case count by status>>>
728
+ success: N
729
+ failure: N
730
+ error: N
731
+ skipped: N
732
+ cancelled: N
534
733
  ```
535
734
  """
735
+
736
+ def _matches(item, items):
737
+ if item and items:
738
+ return items[-1] == item
739
+ return not item and not items
740
+
741
+ if not (workflow_jobs := _get_workflow_jobs(events)):
742
+ raise ValueError(
743
+ 'No job events found in workflow. Cannot extract data for jobs.'
744
+ )
745
+
536
746
  try:
537
- testcase_metadata = testcase_metadata or get_testcases(events)
747
+ testcases = get_testcases(events, scope)
538
748
  except ValueError as err:
539
- raise ValueError(str(err) + ' Cannot extract metadata for jobs.')
540
- jobs_testcases = _get_sorted_testcases(testcase_metadata, ['test', 'job'])
541
- if 'KeyError' in jobs_testcases:
542
- raise ValueError('Cannot get jobs-ordered dataset from testcases.')
543
- job_events = [
544
- event
545
- for event in events
546
- if event['kind'] == 'ExecutionCommand'
547
- and event['metadata']['step_sequence_id'] == -1
548
- and event['metadata']['name'] in jobs_testcases
549
- ]
550
- results = _evaluate_test_results(jobs_testcases)
749
+ if str(err).startswith('[SCOPE ERROR] '):
750
+ raise ValueError(str(err))
751
+ current_app.logger.debug(str(err))
752
+ testcases = {}
753
+
754
+ jobs_events = list(
755
+ filter(
756
+ lambda event: event['kind'] in (EXECUTIONCOMMAND, EXECUTIONRESULT)
757
+ and event['metadata']['step_sequence_id'] in (0, -1, -2),
758
+ events,
759
+ )
760
+ )
551
761
  jobs = {}
552
- for job in job_events:
553
- job_name = job['metadata']['name']
554
- jobs.setdefault(
555
- job['metadata']['name'],
556
- {
557
- 'runs-on': job['runs-on'],
558
- 'counts': results[job_name],
559
- 'variables': {
560
- name: value for name, value in job.get('variables', {}).items()
561
- },
562
- },
762
+ for job_name, (job, parent) in workflow_jobs.items():
763
+ name, _, uuid = job_name.partition(' ')
764
+ channel_request_metadata = next(
765
+ (
766
+ event
767
+ for event in jobs_events
768
+ if event['kind'] == EXECUTIONCOMMAND
769
+ and event['metadata']['step_sequence_id'] == -1
770
+ and event['metadata']['name'] == name
771
+ and _matches(uuid, event['metadata']['job_origin'])
772
+ ),
773
+ {'metadata': {}},
774
+ )['metadata']
775
+
776
+ data = _make_job_datasource(
777
+ name, channel_request_metadata, job, parent, jobs_events
563
778
  )
779
+ jobs[data['metadata']['id']] = data
780
+
781
+ for testcase in testcases.values():
782
+ jobs[testcase['metadata']['job_id']]['status']['testCaseCount'] += 1
783
+ jobs[testcase['metadata']['job_id']]['status']['testCaseStatusSummary'][
784
+ testcase['test']['outcome']
785
+ ] += 1
786
+
564
787
  return jobs
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: opentf-toolkit-nightly
3
- Version: 0.55.0.dev941
3
+ Version: 0.55.0.dev962
4
4
  Summary: OpenTestFactory Orchestrator Toolkit
5
5
  Home-page: https://gitlab.com/henixdevelopment/open-source/opentestfactory/python-toolkit
6
6
  Author: Martin Lafaix
@@ -1,7 +1,7 @@
1
- opentf/commons/__init__.py,sha256=ITzg1zfZgA5-4wvmJfjLN94_Z06HeMl0szd6dalrrKY,21839
1
+ opentf/commons/__init__.py,sha256=KRY8ShQw_0ZZ0oEOiKi4-xnWofE_QsFjEe1T8wUxJ-w,21952
2
2
  opentf/commons/auth.py,sha256=bM2Z3kxm2Wku1lKXaRAIg37LHvXWAXIZIqjplDfN2P8,15899
3
3
  opentf/commons/config.py,sha256=GmvInVnUsXIwlNfgTQeQ_pPs97GeGTGn2S2QZEFwss8,7828
4
- opentf/commons/datasources.py,sha256=IKljfR59JSRutXF1Q10MwCuhH5bnVLjo0h7IteNFn_U,18191
4
+ opentf/commons/datasources.py,sha256=8qWC6KFiFGiIOEH7FGdbP_4hd_gVJFxbpR-051rhNZI,24452
5
5
  opentf/commons/expressions.py,sha256=A68F27Our8oVVphUrRvB5haSlqj2YCrH2OxHPNLBio4,19251
6
6
  opentf/commons/pubsub.py,sha256=7khxAHVZiwJRcwIBJ6MPR-f3xY9144-2eNLROwq5F-4,5894
7
7
  opentf/commons/schemas.py,sha256=lokZCU-wmsIkzVA-TVENtC7Io_GmYxrP-FQaOOowg4s,4044
@@ -48,8 +48,8 @@ opentf/scripts/startup.py,sha256=Da2zo93pBWbdRmj-wgekgLcF94rpNc3ZkbvR8R0w8XY,212
48
48
  opentf/toolkit/__init__.py,sha256=g3DiTZlSvvzZWKgM8qU47muLqjQrpWZ6M6PWZ-sBsvQ,19610
49
49
  opentf/toolkit/channels.py,sha256=Cng3b4LUsxvCHUbp_skys9CFcKZMfcKhA_ODg_EAlIE,17156
50
50
  opentf/toolkit/core.py,sha256=L1fT4YzwZjqE7PUXhJL6jSVQge3ohBQv5UBb9DAC6oo,9320
51
- opentf_toolkit_nightly-0.55.0.dev941.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
52
- opentf_toolkit_nightly-0.55.0.dev941.dist-info/METADATA,sha256=Z6JJRflN4y9tRp6aCn8WWhNF3b6J6PIU1jbndC5ys_4,1945
53
- opentf_toolkit_nightly-0.55.0.dev941.dist-info/WHEEL,sha256=GJ7t_kWBFywbagK5eo9IoUwLW6oyOeTKmQ-9iHFVNxQ,92
54
- opentf_toolkit_nightly-0.55.0.dev941.dist-info/top_level.txt,sha256=_gPuE6GTT6UNXy1DjtmQSfCcZb_qYA2vWmjg7a30AGk,7
55
- opentf_toolkit_nightly-0.55.0.dev941.dist-info/RECORD,,
51
+ opentf_toolkit_nightly-0.55.0.dev962.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
52
+ opentf_toolkit_nightly-0.55.0.dev962.dist-info/METADATA,sha256=0kY47GzqpwhnPRyFp-IcSB_fDNh0tIIcgwKgH9IRnck,1945
53
+ opentf_toolkit_nightly-0.55.0.dev962.dist-info/WHEEL,sha256=GJ7t_kWBFywbagK5eo9IoUwLW6oyOeTKmQ-9iHFVNxQ,92
54
+ opentf_toolkit_nightly-0.55.0.dev962.dist-info/top_level.txt,sha256=_gPuE6GTT6UNXy1DjtmQSfCcZb_qYA2vWmjg7a30AGk,7
55
+ opentf_toolkit_nightly-0.55.0.dev962.dist-info/RECORD,,