opentf-toolkit-nightly 0.55.0.dev950__py3-none-any.whl → 0.55.0.dev968__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -12,14 +12,16 @@
12
12
  # See the License for the specific language governing permissions and
13
13
  # limitations under the License.
14
14
 
15
- """Test case metadata retrieval helpers"""
15
+ """Datasources (testcases, tags and jobs) retrieval helpers"""
16
16
 
17
- from typing import Any, Dict, List, Optional
17
+ from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple
18
18
 
19
- from collections import defaultdict
19
+ from datetime import datetime
20
+
21
+
22
+ from flask import current_app
20
23
 
21
24
  from opentf.commons.expressions import evaluate_bool
22
- from opentf.toolkit.core import warning
23
25
 
24
26
 
25
27
  ########################################################################
@@ -35,34 +37,62 @@ DETAILS_KEYS = ('failureDetails', 'errorDetails', 'warningDetails')
35
37
  STATUSES_ORDER = (SUCCESS, FAILURE, ERROR, SKIPPED)
36
38
  FAILURE_STATUSES = (FAILURE, ERROR)
37
39
 
40
+ PROVIDERCOMMAND = 'ProviderCommand'
41
+ EXECUTIONCOMMAND = 'ExecutionCommand'
42
+ EXECUTIONRESULT = 'ExecutionResult'
43
+ WORKFLOW = 'Workflow'
44
+ GENERATORRESULT = 'GeneratorResult'
45
+ CREATION_TIMESTAMP = 'creationTimestamp'
46
+
38
47
  ########################################################################
39
48
  ## Helpers
40
49
 
41
50
 
42
- def _get_path(src: Dict[str, Any], path: List[str]) -> Any:
43
- if not path:
44
- return src
45
- try:
46
- return _get_path(src[path[0]], path[1:])
47
- except KeyError:
48
- return 'KeyError'
51
+ def _merge_dicts(dict1: Dict[str, Any], dict2: Dict[str, Any]) -> Dict[str, Any]:
52
+ for k, v in dict1.items():
53
+ if k in dict2:
54
+ dict2[k] = _merge_dicts(v.copy(), dict2[k])
55
+ dict3 = dict1.copy()
56
+ dict3.update(dict2)
57
+ return dict3
49
58
 
50
59
 
51
- def _get_sorted_testcases(
52
- testcase_metadata: Dict[str, Any], path: List[str]
60
+ def _as_list(what) -> List[str]:
61
+ return [what] if isinstance(what, str) else what
62
+
63
+
64
+ def _get_metadata(
65
+ filter_: Callable, events: Iterable[Dict[str, Any]], kind_: str
53
66
  ) -> Dict[str, Any]:
54
- sorted_testcases = {}
55
- for testcase, data in testcase_metadata.items():
56
- sorted_testcases.setdefault(_get_path(data, path), {})[testcase] = data
57
- return sorted_testcases
67
+ """Get metadata of the first workflow event that satisfies filter.
58
68
 
69
+ # Required parameters
59
70
 
60
- def _get_sum_for_status(testcases: Dict[str, Any], status: str) -> int:
61
- return sum(1 for testcase in testcases.values() if testcase['status'] == status)
71
+ - filter_: a callable, filtering fuction
72
+ - events: a list of events or iterator
73
+ - kind_: a string, considered events kind
62
74
 
75
+ # Returned value
63
76
 
64
- def _as_list(what) -> List[str]:
65
- return [what] if isinstance(what, str) else what
77
+ A possibly empty dictionary, the `.metadata` part of the
78
+ first event that satisfies kind and filter conditions.
79
+ """
80
+ src = (event for event in events if event['kind'] == kind_)
81
+ return next(filter(filter_, src), {}).get('metadata', {})
82
+
83
+
84
+ def parse_testcase_name(full_name: str) -> Tuple[str, str]:
85
+ """Parse test case name from testResults notification.
86
+
87
+ full_name is a string: classname#testcase name
88
+
89
+ # Returned value
90
+
91
+ A tuple of two strings: suite and test case name. If one
92
+ of strings is empty, returns not empty element value instead.
93
+ """
94
+ suite, _, name = full_name.partition('#')
95
+ return suite or name, name or suite
66
96
 
67
97
 
68
98
  ########################################################################
@@ -95,10 +125,31 @@ def _has_testresult(item: Dict[str, Any]) -> bool:
95
125
  )
96
126
 
97
127
 
128
+ def _get_workflow_jobs(events: List[Dict[str, Any]]) -> Dict[str, Any]:
129
+ """yada
130
+
131
+ # Required parameters
132
+
133
+ - events: a list of events
134
+
135
+ # Returned value
136
+
137
+ A dictionary. Keys are job names, values are a (dict, event) pair.
138
+
139
+ _event_ is either a workflow or a generatorresult event.
140
+ """
141
+ return {
142
+ job_name + ' ' + event['metadata'].get('job_id', ''): (job, event)
143
+ for event in filter(lambda x: x['kind'] in (WORKFLOW, GENERATORRESULT), events)
144
+ for job_name, job in event.get('jobs', {}).items()
145
+ if job.get('steps')
146
+ }
147
+
148
+
98
149
  def _uses_inception(events: List[Dict[str, Any]]) -> bool:
99
150
  """Determine if a workflow is the inception workflow."""
100
151
  workflow_event = next(
101
- (event for event in events if event['kind'] == 'Workflow'), None
152
+ (event for event in events if event['kind'] == WORKFLOW), None
102
153
  )
103
154
  if not workflow_event:
104
155
  raise ValueError('No Workflow event in workflow events...')
@@ -154,49 +205,44 @@ def _get_testresult_params(param_step_id: str, job: Dict[str, Any]) -> Dict[str,
154
205
  ].pop()
155
206
 
156
207
 
157
- def _create_testresult_labels(
158
- exec_step: Dict[str, Any],
159
- job_name: str,
160
- job: Dict[str, Any],
161
- parent: Dict[str, Any],
162
- ) -> Dict[str, Any]:
163
- """Create labels for test result.
208
+ def _get_testcase_timestamps_and_job_id(step_origin: str, events: List[Dict[str, Any]]):
209
+ def _is_origin_provider(event: Dict[str, Any]) -> bool:
210
+ return event['metadata']['step_id'] == step_origin
164
211
 
165
- # Required parameters
166
-
167
- - exec_step: a dictionary, the 'execute' step
168
- - job_name: a string (the name of the job containing exec_step)
169
- - job: a dictionary, the job containing exec_step
170
- - parent: a dictionary, the event defining the job
212
+ def _is_origin_execution(event: Dict[str, Any]) -> bool:
213
+ return step_origin in event['metadata']['step_origin']
171
214
 
172
- # Returned value
215
+ creation = _get_metadata(_is_origin_provider, events, PROVIDERCOMMAND)
216
+ start = _get_metadata(_is_origin_execution, events, EXECUTIONCOMMAND)
217
+ end = _get_metadata(_is_origin_execution, reversed(events), EXECUTIONRESULT)
173
218
 
174
- A labels dictionary.
175
- """
176
- exec_step_id = exec_step['id']
177
- labels = {
178
- 'job': job_name.split()[0],
179
- 'uses': exec_step['uses'],
180
- 'technology': exec_step['uses'].partition('/')[0],
181
- 'runs-on': _as_list(job['runs-on']),
182
- 'managed': False,
219
+ return {
220
+ 'creationTimestamp': creation.get(CREATION_TIMESTAMP, None),
221
+ 'startTime': start.get(CREATION_TIMESTAMP, None),
222
+ 'endTime': end.get(CREATION_TIMESTAMP, None),
223
+ 'job_id': creation.get('job_id', None),
183
224
  }
184
225
 
185
- if not (managedtests := parent['metadata'].get('managedTests')):
186
- return labels
226
+
227
+ def _complete_labels(
228
+ labels: Dict[str, Any],
229
+ exec_step_id: str,
230
+ managedtests: Dict[str, Any],
231
+ job: Dict[str, Any],
232
+ ) -> Dict[str, Any]:
187
233
  testcases = managedtests.get('testCases')
188
234
  if not testcases or exec_step_id not in testcases:
189
235
  if not testcases:
190
- warning(
236
+ current_app.logger.warning(
191
237
  f'Was expecting a "testCases" part in parent of step {exec_step_id}, ignoring.'
192
238
  )
193
239
  return labels
194
240
 
195
- labels['managed'] = True
241
+ labels['test']['managed'] = True
196
242
  testcase_metadata = testcases[exec_step_id]
197
- labels['technology-name'] = testcase_metadata['technology']
198
- labels['collection'] = managedtests.get('testPlan', {})
199
- labels.update(
243
+ labels['test']['technology-name'] = testcase_metadata['technology']
244
+ labels['test']['collection'] = managedtests.get('testPlan', {})
245
+ labels['test'].update(
200
246
  {
201
247
  key: value
202
248
  for key, value in testcase_metadata.items()
@@ -214,15 +260,68 @@ def _create_testresult_labels(
214
260
  )
215
261
  try:
216
262
  params = _get_testresult_params(testcase_metadata['param_step_id'], job)
217
- labels['global'] = params.get('global', {})
218
- labels['data'] = params.get('test', {})
263
+ labels['test']['global'] = params.get('global', {})
264
+ labels['test']['data'] = params.get('test', {})
219
265
  except IndexError:
220
- warning(
266
+ current_app.logger.warning(
221
267
  f'Could not find "params" step associated to "execute" step {exec_step_id}, ignoring.'
222
268
  )
223
269
  return labels
224
270
 
225
271
 
272
+ def _create_testresult_labels(
273
+ events: List[Dict[str, Any]],
274
+ step_origin: str,
275
+ exec_step: Dict[str, Any],
276
+ job_name: str,
277
+ job: Dict[str, Any],
278
+ parent: Dict[str, Any],
279
+ ) -> Dict[str, Any]:
280
+ """Create labels for test result.
281
+
282
+ # Required parameters
283
+
284
+ - events: a list, workflow events
285
+ - step_origin: a string, the 'execute' step uuid
286
+ - exec_step: a dictionary, the 'execute' step
287
+ - job_name: a string (the name of the job containing exec_step)
288
+ - job: a dictionary, the job containing exec_step
289
+ - parent: a dictionary, the event defining the job
290
+
291
+ # Returned value
292
+
293
+ A labels dictionary.
294
+ """
295
+ exec_step_id = exec_step['id']
296
+ times_jobid = _get_testcase_timestamps_and_job_id(step_origin, events)
297
+ labels = {
298
+ 'apiVersion': 'testing.opentestfactory.org/v1alpha1',
299
+ 'kind': 'TestCase',
300
+ 'metadata': {
301
+ 'creationTimestamp': times_jobid[CREATION_TIMESTAMP],
302
+ 'execution_id': exec_step_id,
303
+ 'job_id': times_jobid['job_id'],
304
+ 'namespace': parent['metadata']['namespace'],
305
+ 'workflow_id': parent['metadata']['workflow_id'],
306
+ },
307
+ 'test': {
308
+ 'job': job_name.split()[0],
309
+ 'managed': False,
310
+ 'runs-on': _as_list(job['runs-on']),
311
+ 'technology': exec_step['uses'].partition('/')[0],
312
+ 'test': exec_step.get('with', {}).get('test'),
313
+ 'uses': exec_step['uses'],
314
+ },
315
+ 'execution': {
316
+ 'startTime': times_jobid['startTime'],
317
+ 'endTime': times_jobid['endTime'],
318
+ },
319
+ }
320
+ if not (managedtests := parent['metadata'].get('managedTests')):
321
+ return labels
322
+ return _complete_labels(labels, exec_step_id, managedtests, job)
323
+
324
+
226
325
  def _get_testresult_steporigin(
227
326
  attachment_origin: str, events: List[Dict[str, Any]]
228
327
  ) -> Optional[str]:
@@ -238,7 +337,9 @@ def _get_testresult_steporigin(
238
337
  A step ID (a string) or None.
239
338
  """
240
339
  for event in events:
241
- if not (event['kind'] == 'ExecutionResult' and event.get('attachments')):
340
+ if not (
341
+ event['kind'] == EXECUTIONRESULT and event['metadata'].get('attachments')
342
+ ):
242
343
  continue
243
344
  metadata = event['metadata']
244
345
  for value in metadata.get('attachments', {}).values():
@@ -267,80 +368,49 @@ def _get_testresult_labels(
267
368
  A _labels_ dictionary or None.
268
369
  """
269
370
  if step_origin := _get_testresult_steporigin(attachment_origin, events):
270
- jobs_with_steps = {
271
- job_name + ' ' + event['metadata'].get('job_id', ''): (job, event)
272
- for event in events
273
- for job_name, job in event.get('jobs', {}).items()
274
- if event['kind'] in ('Workflow', 'GeneratorResult') and job.get('steps')
275
- }
371
+ jobs_with_steps = _get_workflow_jobs(events)
276
372
  for job_name, (job, parent) in jobs_with_steps.items():
277
373
  for exec_step in job['steps']:
278
374
  if exec_step.get('id') == step_origin:
279
- return _create_testresult_labels(exec_step, job_name, job, parent)
375
+ return _create_testresult_labels(
376
+ events, step_origin, exec_step, job_name, job, parent
377
+ )
280
378
  return None
281
379
 
282
380
 
283
- def _get_timestamp(
284
- event: Dict[str, Any], providerid_creationtimestamps: Dict[str, str]
285
- ) -> str:
286
- """Return first provider creationtimestamp or ''.
287
-
288
- # Required parameters
289
-
290
- - event: an ExecutionResult object
291
- - providerid_creationtimestamps: a dictionary
292
- """
293
- for origin_id in event['metadata'].get('step_origin', []):
294
- if origin_id in providerid_creationtimestamps:
295
- return providerid_creationtimestamps[origin_id]
296
- return ''
297
-
298
-
299
- def _get_testresult_timestamps(
300
- events: List[Dict[str, Any]],
301
- testcases: Dict[str, Any],
302
- ):
303
- """Set timestamp for each testcase in testcases.
304
-
305
- The timestamp is the one of the originating ProviderResult.
306
- """
307
- providerid_creationtimestamps = {
308
- event['metadata']['step_id']: event['metadata'].get('creationTimestamp', '')
309
- for event in events
310
- if event['kind'] == 'ProviderResult'
311
- }
312
-
313
- origins_results = defaultdict(list)
314
- for uuid, data in testcases.items():
315
- origins_results[data['execution']].append(uuid)
316
-
317
- for event in filter(lambda event: event['kind'] == 'ExecutionResult', events):
318
- for attachment in event['metadata'].get('attachments', {}).values():
319
- if attachment['uuid'] in origins_results:
320
- timestamp = _get_timestamp(event, providerid_creationtimestamps)
321
- for result_id in origins_results[attachment['uuid']]:
322
- testcases[result_id]['timestamp'] = timestamp
323
-
324
-
325
381
  def _make_testcase_from_testresult(
326
- item: Dict[str, Any], execution_id: str, labels: Dict[str, Any], scope: str
382
+ item: Dict[str, Any],
383
+ labels: Dict[str, Any],
384
+ scope: str,
327
385
  ) -> Dict[str, Any]:
328
- testcase = {
329
- 'name': item['name'],
386
+ suite_name, testcase_name = parse_testcase_name(item['name'])
387
+ item_data = {
388
+ 'metadata': {
389
+ 'name': item['name'],
390
+ 'id': item['id'],
391
+ },
392
+ 'test': {
393
+ 'outcome': item['status'].lower(),
394
+ 'suiteName': suite_name,
395
+ 'testCaseName': testcase_name,
396
+ },
330
397
  'status': item['status'],
331
- 'duration': item.get('duration', 0),
332
- 'execution': execution_id,
333
- 'test': labels.copy(),
398
+ 'execution': {
399
+ 'duration': item.get('duration', 0),
400
+ },
334
401
  }
335
- testcase['test']['status'] = item['status']
336
402
  if item['status'] in FAILURE_STATUSES:
337
403
  for key in DETAILS_KEYS:
338
404
  if item.get(key):
339
- testcase[key] = item[key]
405
+ item_data['execution'][key] = item[key]
340
406
  if item.get('errorsList'):
341
- testcase['errorsList'] = item['errorsList']
342
- if not in_scope(scope, testcase):
343
- return {}
407
+ item_data['execution']['errorsList'] = item['errorsList']
408
+ testcase = _merge_dicts(labels, item_data)
409
+ try:
410
+ if not in_scope(scope, testcase):
411
+ return {}
412
+ except ValueError as err:
413
+ raise ValueError(f'[SCOPE ERROR] {err}')
344
414
  return testcase
345
415
 
346
416
 
@@ -358,49 +428,40 @@ def get_testcases(
358
428
 
359
429
  # Returned value
360
430
 
361
- A possibly empty dictionary. Keys are the testresult IDs, values
362
- are dictionaries with the following entries:
363
-
364
- - name: a string, the test case name
365
- - status: a string, the test case status
366
- - duration: a string, the test case execution time in ms
367
- - execution: a string, the test case execution (=related attachment) uuid
368
- - timestamp: a string, provider creation timestamp
369
- - test: a dictionary, the test case metadata
370
- - failureDetails|errorDetails|warningDetails: a dictionary with test
371
- case failure details
372
- - errorsList: a provider generated attachment specific list with execution general
373
- errors (currently for Robot Framework only)
431
+ A possibly empty dictionary. Keys are the test result IDs, values
432
+ are dictionaries with test case metadata, labels, status, and
433
+ execution info.
374
434
 
375
435
  `testcases` is a dictionary of entries like:
376
436
 
377
437
  ```
378
- "<<<testcase_uuid>>>": {
379
- "name": "<<<[Test suite#]Test case name>>>",
380
- "status": "<<<SUCCESS|FAILURE|ERROR|SKIPPED>>>",
381
- "duration": "<<<test execution time in ms>>>",
382
- "timestamp": "<<<provider creation timestamp>>>",
383
- "execution": "<<<execution ID>>>",
384
- "test": {
385
- "job": "<<<job name>>>",
386
- "uses": "<<<provider function>>>",
387
- "technology": "<<<test technology>>>",
388
- "runs-on": [<<<list of execution environment tags>>>],
389
- "managed": boolean, True for test cases managed by a test referential
390
- "status": "<<<SUCCESS|FAILURE|ERROR|SKIPPED>>>"
391
- },
392
- "failureDetails"|"errorDetails"|"warningDetails": {
393
- "message": "<<<error message>>>",
394
- "type": "<<<error type>>>",
395
- "text": "<<<error trace>>>"
396
- },
397
- "errorsList": [
398
- {
399
- "message": "<<<Robot Framework general error message>>>",
400
- "timestamp": "<<<Robot Framework error message timestamp>>>"
401
- }
402
- ]
403
- }
438
+ apiVersion: testing.opentestfactory.org/v1alpha1
439
+ kind: TestCase
440
+ metadata:
441
+ name: <<<Test case full name>>>
442
+ id: <<<Test case uuid>>>
443
+ job_id: <<<Test case job uuid>>>
444
+ execution_id: <<<Test case attachment origin uuid>>>
445
+ workflow_id: <<<Test case workflow uuid>>>
446
+ namespace: <<<Test case namespace>>>
447
+ creationTimestamp: <<<Test case provider creation timestamp>>>
448
+ test:
449
+ runs-on: <<<Test case execution environment tags>>>
450
+ uses: <<<Test case provider>>>
451
+ technology: <<<Test case technology>>>
452
+ managed: bool <<<True for test referential managed test cases>>>
453
+ job: <<<Test case job name>>>
454
+ test: <<<Test case test reference>>>
455
+ suiteName: <<<Test case suite>>>
456
+ testCaseName: <<<Test case short name>>>
457
+ outcome: <<<success|failure|skipped|error>>>
458
+ status: <<<SUCCESS|FAILURE|SKIPPED|ERROR>>>
459
+ execution:
460
+ startTime: <<<Test case execution start time>>>
461
+ endTime: <<<Test case execution end time>>>
462
+ duration: <<<Test case execution duration (from result notification)>>>
463
+ errorsList: [<<<Test case general execution errors>>>]
464
+ (failure|warning|error)Details: {<<<Test case failure details>>>}
404
465
  ```
405
466
 
406
467
  # Raised exceptions
@@ -423,15 +484,10 @@ def get_testcases(
423
484
  if not labels:
424
485
  continue
425
486
  for item in testresult['spec']['testResults']:
426
- if testcase := _make_testcase_from_testresult(
427
- item, execution_id, labels, scope
428
- ):
487
+ if testcase := _make_testcase_from_testresult(item, labels, scope):
429
488
  testcases[item['id']] = testcase
430
489
  if not testcases:
431
490
  raise ValueError(f'No test cases matching scope `{scope}`.')
432
-
433
- _get_testresult_timestamps(events, testcases)
434
-
435
491
  return testcases
436
492
 
437
493
 
@@ -439,6 +495,29 @@ def get_testcases(
439
495
  ## Datasource: Tags
440
496
 
441
497
 
498
+ def _make_tag_datasource(tag: str, parent: Dict[str, Any]) -> Dict[str, Any]:
499
+ return {
500
+ 'apiVersion': 'opentestfactory.org/v1alpha1',
501
+ 'kind': 'Tag',
502
+ 'metadata': {
503
+ 'name': tag,
504
+ 'workflow_id': parent['metadata']['workflow_id'],
505
+ 'namespace': parent['metadata']['namespace'],
506
+ },
507
+ 'status': {
508
+ 'jobCount': 0,
509
+ 'testCaseCount': 0,
510
+ 'testCaseStatusSummary': {
511
+ 'success': 0,
512
+ 'failure': 0,
513
+ 'error': 0,
514
+ 'skipped': 0,
515
+ 'cancelled': 0,
516
+ },
517
+ },
518
+ }
519
+
520
+
442
521
  def get_tags(events: List[Dict[str, Any]], scope: str = 'true') -> Dict[str, Any]:
443
522
  """Extract metadata for each execution environment tag.
444
523
 
@@ -448,35 +527,51 @@ def get_tags(events: List[Dict[str, Any]], scope: str = 'true') -> Dict[str, Any
448
527
 
449
528
  # Returned value:
450
529
 
451
- A dictionary. Keys are tags names, values are dictionaries with testcase
452
- by tag status counters.
530
+ A dictionary. Keys are tags names, values are dictionaries with tag metadata and status.
453
531
 
454
532
  `tags` is a dictionary of entries like:
455
533
 
456
534
  ```
457
- "<<<tag name>>>": {
458
- "FAILURE": <<<failed tests count>>>,
459
- "SUCCESS": <<<passed tests count>>>,
460
- "SKIPPED": <<<skipped tests count>>>,
461
- "ERROR": <<<technical KO tests count>>>,
462
- 'total': <<<total tests count>>>,
463
- 'other': <<<SKIPPED + ERROR tests count>>>
464
- }
535
+ apiVersion: opentestfactory.org/v1alpha1
536
+ kind: Tag
537
+ metadata:
538
+ name: <<<Tag name>>>
539
+ workflow_id: <<<Tag workflow id>>>
540
+ namespace: <<<Tag namespace>>>
541
+ status:
542
+ jobCount: <<<Tag related jobs count>>>
543
+ testCaseCount: <<<Tag related test cases count>>>
544
+ testCaseStatusSummary: <<<Tag test case count by status>>>
545
+ success: N
546
+ failure: N
547
+ error: N
548
+ skipped: N
549
+ cancelled: N
465
550
  ```
466
551
  """
552
+ if not (jobs := _get_workflow_jobs(events)):
553
+ raise ValueError(
554
+ 'No job events found in workflow. Cannot extract data for tags.'
555
+ )
467
556
  try:
468
- testcase_metadata = get_testcases(events, scope)
557
+ testcases = get_testcases(events, scope)
469
558
  except ValueError as err:
470
- raise ValueError(str(err) + ' Cannot extract metadata for tags.')
559
+ if str(err).startswith('[SCOPE ERROR] '):
560
+ raise ValueError(str(err))
561
+ current_app.logger.debug(str(err))
562
+ testcases = {}
471
563
  tags = {}
472
- for testcase in testcase_metadata.values():
564
+ for job, parent in jobs.values():
565
+ for tag in job['runs-on']:
566
+ tags.setdefault(tag, _make_tag_datasource(tag, parent))
567
+ tags[tag]['status']['jobCount'] += 1
568
+
569
+ for testcase in testcases.values():
473
570
  for tag in testcase['test']['runs-on']:
474
- tags.setdefault(tag, {SUCCESS: 0, FAILURE: 0, ERROR: 0, SKIPPED: 0})
475
- tags[tag][testcase['status']] += 1
476
- for tag, counts in tags.items():
477
- counts['total'] = sum(counts[status] for status in STATUSES_ORDER)
478
- counts['other'] = sum(counts[status] for status in (SKIPPED, ERROR))
479
- tags[tag] = {k.lower(): v for k, v in counts.items()}
571
+ tags[tag]['status']['testCaseCount'] += 1
572
+ tags[tag]['status']['testCaseStatusSummary'][
573
+ testcase['test']['outcome']
574
+ ] += 1
480
575
  return tags
481
576
 
482
577
 
@@ -484,27 +579,116 @@ def get_tags(events: List[Dict[str, Any]], scope: str = 'true') -> Dict[str, Any
484
579
  ## Datasource: Jobs
485
580
 
486
581
 
487
- def _evaluate_test_results(jobs_testcases: Dict[str, Any]) -> Dict[str, Dict[str, int]]:
488
- """Summarize job testcases.
582
+ def _collect_job_times_and_id(
583
+ events: List[Dict[str, Any]], request_metadata: Dict[str, Any]
584
+ ) -> Dict[str, Any]:
585
+ """Collect job start and end time, if available.
586
+
587
+ # Required parameters
588
+
589
+ - events: a list of events
590
+ - request_metadata: the channel request metadata for the job
591
+
592
+ # Returned object
593
+
594
+ A dictionary with the following entries:
595
+
596
+ - job_id
597
+ - requestTime
598
+
599
+ If the job started, it contains the additional entries:
600
+
601
+ - startTime
602
+ - endTime
603
+ - duration
604
+ """
605
+ job_id = request_metadata['job_id']
606
+ request_time = request_metadata[CREATION_TIMESTAMP]
607
+
608
+ start = end = None
609
+ for event in events:
610
+ metadata = event['metadata']
611
+ kind_step_id = (event['kind'], metadata['step_sequence_id'], metadata['job_id'])
612
+ if kind_step_id == (EXECUTIONCOMMAND, 0, job_id):
613
+ start = metadata[CREATION_TIMESTAMP]
614
+ elif kind_step_id == (EXECUTIONRESULT, -2, job_id):
615
+ end = metadata[CREATION_TIMESTAMP]
616
+ if start and end:
617
+ break
618
+ else:
619
+ return {'job_id': job_id, 'requestTime': request_time}
620
+
621
+ return {
622
+ 'requestTime': request_time,
623
+ 'startTime': start,
624
+ 'endTime': end,
625
+ 'duration': (
626
+ datetime.fromisoformat(end) - datetime.fromisoformat(start)
627
+ ).total_seconds()
628
+ * 1000,
629
+ 'job_id': job_id,
630
+ }
631
+
632
+
633
+ def _make_job_datasource(
634
+ job_name: str,
635
+ request_metadata: Dict[str, Any],
636
+ job: Dict[str, Any],
637
+ parent: Dict[str, Any],
638
+ events: List[Dict[str, Any]],
639
+ ) -> Dict[str, Any]:
640
+ """Make datasource object for job.
641
+
642
+ # Required parameters
643
+
644
+ - job_name: a string, the 'short' job name
645
+ - request_metadata: the channel request metadata for the job or {}
646
+ - job: a dictionary, the job definition
647
+ - parent: a workflow or a generatorresult event
648
+ - events: a list of events
489
649
 
490
650
  # Returned value
491
651
 
492
- A dictionary with one entry per job (a dictionary with keys being
493
- statuses and values being counts).
652
+ A 'Job' datasource object.
494
653
  """
495
- summaries = {}
496
- for job, testcases in jobs_testcases.items():
497
- successes, failures, errors, skipped = [
498
- _get_sum_for_status(testcases, status) for status in STATUSES_ORDER
499
- ]
500
- summaries[job] = {
501
- SUCCESS: successes,
502
- FAILURE: failures,
503
- ERROR: errors,
504
- SKIPPED: skipped,
505
- TOTAL: len(testcases),
506
- }
507
- return summaries
654
+ if request_metadata:
655
+ job_times_id = _collect_job_times_and_id(events, request_metadata)
656
+ else:
657
+ job_times_id = {}
658
+
659
+ return {
660
+ 'apiVersion': 'opentestfactory.org/v1alpha1',
661
+ 'kind': 'Job',
662
+ 'metadata': {
663
+ 'name': job_name,
664
+ 'id': job_times_id.get('job_id'),
665
+ 'namespace': parent['metadata']['namespace'],
666
+ 'workflow_id': parent['metadata']['workflow_id'],
667
+ 'creationTimestamp': parent['metadata'][CREATION_TIMESTAMP],
668
+ },
669
+ 'spec': {
670
+ 'runs-on': job['runs-on'],
671
+ 'variables': {
672
+ **parent.get('variables', {}),
673
+ **job.get('variables', {}),
674
+ },
675
+ },
676
+ 'status': {
677
+ 'phase': 'SUCCEEDED',
678
+ 'requestTime': job_times_id.get('requestTime'),
679
+ 'startTime': job_times_id.get('startTime'),
680
+ 'endTime': job_times_id.get('endTime'),
681
+ 'duration': job_times_id.get('duration'),
682
+ 'testCaseCount': 0,
683
+ 'testCaseStatusSummary': {
684
+ 'success': 0,
685
+ 'failure': 0,
686
+ 'error': 0,
687
+ 'skipped': 0,
688
+ 'cancelled': 0,
689
+ },
690
+ },
691
+ }
508
692
 
509
693
 
510
694
  def get_jobs(events: List[Dict[str, Any]], scope: str = 'true') -> Dict[str, Any]:
@@ -516,60 +700,88 @@ def get_jobs(events: List[Dict[str, Any]], scope: str = 'true') -> Dict[str, Any
516
700
 
517
701
  # Returned value:
518
702
 
519
- A dictionary. Keys are job names, values are dictionaries with the
520
- following entries:
521
-
522
- - name: a string, job name
523
- - runs-on: a list, execution environment tags
524
- - testcases: a dictionary, job-related test cases
525
- - counts: a dictionary, tests statuses count by job
526
- - variables: a dictionary, job-related environment variables
703
+ A dictionary. Keys are job names, values are dictionaries with
704
+ job metadata, spec, and status.
527
705
 
528
706
  `jobs_testcases` is a dictionary of entries like:
529
707
 
530
708
  ```
531
- "<<<job_name>>>": {
532
- "runs-on": [<<<execution environment tags>>>],
533
- "counts": {
534
- "FAILURE": <<<failed tests count>>>,
535
- "SUCCESS": <<<passed tests count>>>,
536
- "ERROR": <<<technical KOs count>>>,
537
- "SKIPPED": <<<skipped tests count>>>,
538
- "total count": <<<total tests count>>>
539
- },
540
- "variables": {
541
- "<<<variable name>>>": "<<<variable value>>>",
542
- ...
543
- }
544
- }
709
+ apiVersion: opentestfactory.org/v1alpha1
710
+ kind: Job
711
+ metadata:
712
+ name: <<<Job name>>
713
+ id: <<<Job uuid>>>
714
+ namespace: <<<Job namespace>>>
715
+ workflow_id: <<<Job workflow id>>>
716
+ creationTimestamp: <<<Job creation timestamp>>>
717
+ spec:
718
+ runs-on: <<<Job execution environment tags>>>
719
+ variables: <<<Workflow and job specific environment variables>>>
720
+ status:
721
+ phase: <<<Job phase>>>
722
+ requestTime: <<<Job execution environment request time>>>
723
+ startTime: <<<Job start time>>>
724
+ endTime: <<<Job end time>>>
725
+ duration: <<<Job duration (endTime - startTime)>>>
726
+ testCaseCount: <<<Job test case count>>>
727
+ testCaseStatusSummary: <<<Job test case count by status>>>
728
+ success: N
729
+ failure: N
730
+ error: N
731
+ skipped: N
732
+ cancelled: N
545
733
  ```
546
734
  """
735
+
736
+ def _matches(item, items):
737
+ if item and items:
738
+ return items[-1] == item
739
+ return not item and not items
740
+
741
+ if not (workflow_jobs := _get_workflow_jobs(events)):
742
+ raise ValueError(
743
+ 'No job events found in workflow. Cannot extract data for jobs.'
744
+ )
745
+
547
746
  try:
548
- testcase_metadata = get_testcases(events, scope)
747
+ testcases = get_testcases(events, scope)
549
748
  except ValueError as err:
550
- raise ValueError(str(err) + ' Cannot extract metadata for jobs.')
551
- jobs_testcases = _get_sorted_testcases(testcase_metadata, ['test', 'job'])
552
- if 'KeyError' in jobs_testcases:
553
- raise ValueError('Cannot get jobs-ordered dataset from testcases.')
554
- job_events = [
555
- event
556
- for event in events
557
- if event['kind'] == 'ExecutionCommand'
558
- and event['metadata']['step_sequence_id'] == -1
559
- and event['metadata']['name'] in jobs_testcases
560
- ]
561
- results = _evaluate_test_results(jobs_testcases)
749
+ if str(err).startswith('[SCOPE ERROR] '):
750
+ raise ValueError(str(err))
751
+ current_app.logger.debug(str(err))
752
+ testcases = {}
753
+
754
+ jobs_events = list(
755
+ filter(
756
+ lambda event: event['kind'] in (EXECUTIONCOMMAND, EXECUTIONRESULT)
757
+ and event['metadata']['step_sequence_id'] in (0, -1, -2),
758
+ events,
759
+ )
760
+ )
562
761
  jobs = {}
563
- for job in job_events:
564
- job_name = job['metadata']['name']
565
- jobs.setdefault(
566
- job['metadata']['name'],
567
- {
568
- 'runs-on': job['runs-on'],
569
- 'counts': results[job_name],
570
- 'variables': {
571
- name: value for name, value in job.get('variables', {}).items()
572
- },
573
- },
762
+ for job_name, (job, parent) in workflow_jobs.items():
763
+ name, _, uuid = job_name.partition(' ')
764
+ channel_request_metadata = next(
765
+ (
766
+ event
767
+ for event in jobs_events
768
+ if event['kind'] == EXECUTIONCOMMAND
769
+ and event['metadata']['step_sequence_id'] == -1
770
+ and event['metadata']['name'] == name
771
+ and _matches(uuid, event['metadata']['job_origin'])
772
+ ),
773
+ {'metadata': {}},
774
+ )['metadata']
775
+
776
+ data = _make_job_datasource(
777
+ name, channel_request_metadata, job, parent, jobs_events
574
778
  )
779
+ jobs[data['metadata']['id']] = data
780
+
781
+ for testcase in testcases.values():
782
+ jobs[testcase['metadata']['job_id']]['status']['testCaseCount'] += 1
783
+ jobs[testcase['metadata']['job_id']]['status']['testCaseStatusSummary'][
784
+ testcase['test']['outcome']
785
+ ] += 1
786
+
575
787
  return jobs
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: opentf-toolkit-nightly
3
- Version: 0.55.0.dev950
3
+ Version: 0.55.0.dev968
4
4
  Summary: OpenTestFactory Orchestrator Toolkit
5
5
  Home-page: https://gitlab.com/henixdevelopment/open-source/opentestfactory/python-toolkit
6
6
  Author: Martin Lafaix
@@ -1,7 +1,7 @@
1
1
  opentf/commons/__init__.py,sha256=KRY8ShQw_0ZZ0oEOiKi4-xnWofE_QsFjEe1T8wUxJ-w,21952
2
2
  opentf/commons/auth.py,sha256=bM2Z3kxm2Wku1lKXaRAIg37LHvXWAXIZIqjplDfN2P8,15899
3
3
  opentf/commons/config.py,sha256=GmvInVnUsXIwlNfgTQeQ_pPs97GeGTGn2S2QZEFwss8,7828
4
- opentf/commons/datasources.py,sha256=EIssc6CyC8s5FYVZtdEo7IpCQxX92mFntYWmsoO62go,18412
4
+ opentf/commons/datasources.py,sha256=8qWC6KFiFGiIOEH7FGdbP_4hd_gVJFxbpR-051rhNZI,24452
5
5
  opentf/commons/expressions.py,sha256=A68F27Our8oVVphUrRvB5haSlqj2YCrH2OxHPNLBio4,19251
6
6
  opentf/commons/pubsub.py,sha256=7khxAHVZiwJRcwIBJ6MPR-f3xY9144-2eNLROwq5F-4,5894
7
7
  opentf/commons/schemas.py,sha256=lokZCU-wmsIkzVA-TVENtC7Io_GmYxrP-FQaOOowg4s,4044
@@ -48,8 +48,8 @@ opentf/scripts/startup.py,sha256=Da2zo93pBWbdRmj-wgekgLcF94rpNc3ZkbvR8R0w8XY,212
48
48
  opentf/toolkit/__init__.py,sha256=g3DiTZlSvvzZWKgM8qU47muLqjQrpWZ6M6PWZ-sBsvQ,19610
49
49
  opentf/toolkit/channels.py,sha256=Cng3b4LUsxvCHUbp_skys9CFcKZMfcKhA_ODg_EAlIE,17156
50
50
  opentf/toolkit/core.py,sha256=L1fT4YzwZjqE7PUXhJL6jSVQge3ohBQv5UBb9DAC6oo,9320
51
- opentf_toolkit_nightly-0.55.0.dev950.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
52
- opentf_toolkit_nightly-0.55.0.dev950.dist-info/METADATA,sha256=gT2hqExdA5awFpPmq43fIso434TuFdAQOrSlqz1EsG4,1945
53
- opentf_toolkit_nightly-0.55.0.dev950.dist-info/WHEEL,sha256=GJ7t_kWBFywbagK5eo9IoUwLW6oyOeTKmQ-9iHFVNxQ,92
54
- opentf_toolkit_nightly-0.55.0.dev950.dist-info/top_level.txt,sha256=_gPuE6GTT6UNXy1DjtmQSfCcZb_qYA2vWmjg7a30AGk,7
55
- opentf_toolkit_nightly-0.55.0.dev950.dist-info/RECORD,,
51
+ opentf_toolkit_nightly-0.55.0.dev968.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
52
+ opentf_toolkit_nightly-0.55.0.dev968.dist-info/METADATA,sha256=HPwpx76WDHU5UplXkpArT14r7mOahUkVL6sUu5E9Mao,1945
53
+ opentf_toolkit_nightly-0.55.0.dev968.dist-info/WHEEL,sha256=GJ7t_kWBFywbagK5eo9IoUwLW6oyOeTKmQ-9iHFVNxQ,92
54
+ opentf_toolkit_nightly-0.55.0.dev968.dist-info/top_level.txt,sha256=_gPuE6GTT6UNXy1DjtmQSfCcZb_qYA2vWmjg7a30AGk,7
55
+ opentf_toolkit_nightly-0.55.0.dev968.dist-info/RECORD,,