opentf-toolkit-nightly 0.55.0.dev934__py3-none-any.whl → 0.55.0.dev941__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- opentf/commons/datasources.py +195 -12
- {opentf_toolkit_nightly-0.55.0.dev934.dist-info → opentf_toolkit_nightly-0.55.0.dev941.dist-info}/METADATA +1 -1
- {opentf_toolkit_nightly-0.55.0.dev934.dist-info → opentf_toolkit_nightly-0.55.0.dev941.dist-info}/RECORD +6 -6
- {opentf_toolkit_nightly-0.55.0.dev934.dist-info → opentf_toolkit_nightly-0.55.0.dev941.dist-info}/LICENSE +0 -0
- {opentf_toolkit_nightly-0.55.0.dev934.dist-info → opentf_toolkit_nightly-0.55.0.dev941.dist-info}/WHEEL +0 -0
- {opentf_toolkit_nightly-0.55.0.dev934.dist-info → opentf_toolkit_nightly-0.55.0.dev941.dist-info}/top_level.txt +0 -0
opentf/commons/datasources.py
CHANGED
|
@@ -24,11 +24,48 @@ from opentf.toolkit.core import warning
|
|
|
24
24
|
########################################################################
|
|
25
25
|
# Constants
|
|
26
26
|
|
|
27
|
+
SUCCESS = 'SUCCESS'
|
|
28
|
+
FAILURE = 'FAILURE'
|
|
29
|
+
ERROR = 'ERROR'
|
|
30
|
+
SKIPPED = 'SKIPPED'
|
|
31
|
+
TOTAL = 'total count'
|
|
32
|
+
|
|
27
33
|
DETAILS_KEYS = ('failureDetails', 'errorDetails', 'warningDetails')
|
|
28
|
-
|
|
34
|
+
STATUSES_ORDER = (SUCCESS, FAILURE, ERROR, SKIPPED)
|
|
35
|
+
FAILURE_STATUSES = (FAILURE, ERROR)
|
|
29
36
|
|
|
30
37
|
########################################################################
|
|
31
|
-
##
|
|
38
|
+
## Helpers
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
def _get_path(src: Dict[str, Any], path: List[str]) -> Any:
|
|
42
|
+
if not path:
|
|
43
|
+
return src
|
|
44
|
+
try:
|
|
45
|
+
return _get_path(src[path[0]], path[1:])
|
|
46
|
+
except KeyError:
|
|
47
|
+
return 'KeyError'
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
def _get_sorted_testcases(
|
|
51
|
+
testcase_metadata: Dict[str, Any], path: List[str]
|
|
52
|
+
) -> Dict[str, Any]:
|
|
53
|
+
sorted_testcases = {}
|
|
54
|
+
for testcase, data in testcase_metadata.items():
|
|
55
|
+
sorted_testcases.setdefault(_get_path(data, path), {})[testcase] = data
|
|
56
|
+
return sorted_testcases
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
def _get_sum_for_status(testcases: Dict[str, Any], status: str) -> int:
|
|
60
|
+
return sum(1 for testcase in testcases.values() if testcase['status'] == status)
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
def _as_list(what) -> List[str]:
|
|
64
|
+
return [what] if isinstance(what, str) else what
|
|
65
|
+
|
|
66
|
+
|
|
67
|
+
########################################################################
|
|
68
|
+
## Datasource: Testcases
|
|
32
69
|
|
|
33
70
|
|
|
34
71
|
def in_scope(expr: str, contexts: Dict[str, Any], scopes_errors: Set[str]) -> bool:
|
|
@@ -53,26 +90,20 @@ def get_testresults(events: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
|
|
|
53
90
|
return [item for item in events if _has_testresult(item)]
|
|
54
91
|
|
|
55
92
|
|
|
56
|
-
def _as_list(what) -> List[str]:
|
|
57
|
-
return [what] if isinstance(what, str) else what
|
|
58
|
-
|
|
59
|
-
|
|
60
93
|
def _has_testresult(item: Dict[str, Any]) -> bool:
|
|
61
|
-
"""
|
|
94
|
+
"""Determine if a workflow notification has a testResults element."""
|
|
62
95
|
return item.get('kind') == 'Notification' and item.get('spec', {}).get(
|
|
63
96
|
'testResults', False
|
|
64
97
|
)
|
|
65
98
|
|
|
66
99
|
|
|
67
100
|
def _uses_inception(events: List[Dict[str, Any]]) -> bool:
|
|
68
|
-
"""
|
|
101
|
+
"""Determine if a workflow is the inception workflow."""
|
|
69
102
|
workflow_event = next(
|
|
70
103
|
(event for event in events if event['kind'] == 'Workflow'), None
|
|
71
104
|
)
|
|
72
105
|
if not workflow_event:
|
|
73
|
-
raise ValueError(
|
|
74
|
-
'No Workflow event in workflow events, is there any event at all?'
|
|
75
|
-
)
|
|
106
|
+
raise ValueError('No Workflow event in workflow events...')
|
|
76
107
|
return any(
|
|
77
108
|
'inception' in _as_list(job['runs-on'])
|
|
78
109
|
for job in workflow_event['jobs'].values()
|
|
@@ -194,7 +225,9 @@ def _create_testresult_labels(
|
|
|
194
225
|
return labels
|
|
195
226
|
|
|
196
227
|
|
|
197
|
-
def _get_testresult_steporigin(
|
|
228
|
+
def _get_testresult_steporigin(
|
|
229
|
+
attachment_origin: str, events: List[Dict[str, Any]]
|
|
230
|
+
) -> Optional[str]:
|
|
198
231
|
"""Find the step that produced the attachment.
|
|
199
232
|
|
|
200
233
|
# Required parameters
|
|
@@ -310,7 +343,13 @@ def get_testcases(events: List[Dict[str, Any]]) -> Dict[str, Dict[str, Any]]:
|
|
|
310
343
|
|
|
311
344
|
- name: a string, the test case name
|
|
312
345
|
- status: a string, the test case status
|
|
346
|
+
- duration: a string, the test case execution time in ms
|
|
347
|
+
- timestamp: a string, provider creation timestamp
|
|
313
348
|
- test: a dictionary, the test case metadata
|
|
349
|
+
- failureDetails|errorDetails|warningDetails: a dictionary with test
|
|
350
|
+
case failure details
|
|
351
|
+
- errorsList: a Robot Framework specific list with execution general
|
|
352
|
+
errors
|
|
314
353
|
|
|
315
354
|
`testcases` is a dictionary of entries like:
|
|
316
355
|
|
|
@@ -379,3 +418,147 @@ def get_testcases(events: List[Dict[str, Any]]) -> Dict[str, Dict[str, Any]]:
|
|
|
379
418
|
raise ValueError('No test results in events.')
|
|
380
419
|
_get_testresult_timestamps(events, testresults, testcases)
|
|
381
420
|
return testcases
|
|
421
|
+
|
|
422
|
+
|
|
423
|
+
########################################################################
|
|
424
|
+
## Datasource: Tags
|
|
425
|
+
|
|
426
|
+
|
|
427
|
+
def get_tags(
|
|
428
|
+
events: List[Dict[str, Any]], testcase_metadata: Optional[Dict[str, Any]] = None
|
|
429
|
+
) -> Dict[str, Any]:
|
|
430
|
+
"""Extract metadata for each execution environment tag.
|
|
431
|
+
|
|
432
|
+
# Required parameters:
|
|
433
|
+
|
|
434
|
+
- events: a list of events
|
|
435
|
+
|
|
436
|
+
# Returned value:
|
|
437
|
+
|
|
438
|
+
A dictionary. Keys are tags names, values are dictionaries with testcase
|
|
439
|
+
by tag status counters.
|
|
440
|
+
|
|
441
|
+
`tags` is a dictionary of entries like:
|
|
442
|
+
|
|
443
|
+
```
|
|
444
|
+
"<<<tag name>>>": {
|
|
445
|
+
"FAILURE": <<<failed tests count>>>,
|
|
446
|
+
"SUCCESS": <<<passed tests count>>>,
|
|
447
|
+
"SKIPPED": <<<skipped tests count>>>,
|
|
448
|
+
"ERROR": <<<technical KO tests count>>>,
|
|
449
|
+
'total': <<<total tests count>>>,
|
|
450
|
+
'other': <<<SKIPPED + ERROR tests count>>>
|
|
451
|
+
}
|
|
452
|
+
```
|
|
453
|
+
"""
|
|
454
|
+
try:
|
|
455
|
+
testcase_metadata = testcase_metadata or get_testcases(events)
|
|
456
|
+
except ValueError as err:
|
|
457
|
+
raise ValueError(str(err) + ' Cannot extract metadata for tags.')
|
|
458
|
+
tags = {}
|
|
459
|
+
for testcase in testcase_metadata.values():
|
|
460
|
+
for tag in testcase['test']['runs-on']:
|
|
461
|
+
tags.setdefault(tag, {SUCCESS: 0, FAILURE: 0, ERROR: 0, SKIPPED: 0})
|
|
462
|
+
tags[tag][testcase['status']] += 1
|
|
463
|
+
for tag, counts in tags.items():
|
|
464
|
+
counts['total'] = sum(counts[status] for status in STATUSES_ORDER)
|
|
465
|
+
counts['other'] = sum(counts[status] for status in (SKIPPED, ERROR))
|
|
466
|
+
tags[tag] = {k.lower(): v for k, v in counts.items()}
|
|
467
|
+
return tags
|
|
468
|
+
|
|
469
|
+
|
|
470
|
+
########################################################################
|
|
471
|
+
## Datasource: Jobs
|
|
472
|
+
|
|
473
|
+
|
|
474
|
+
def _evaluate_test_results(jobs_testcases: Dict[str, Any]) -> Dict[str, Any]:
|
|
475
|
+
"""Summarize job testcases.
|
|
476
|
+
|
|
477
|
+
# Returned value
|
|
478
|
+
|
|
479
|
+
A dictionary with one entry per job (a dictionary with keys being
|
|
480
|
+
statuses and values being counts).
|
|
481
|
+
"""
|
|
482
|
+
summaries = {}
|
|
483
|
+
for job, testcases in jobs_testcases.items():
|
|
484
|
+
successes, failures, errors, skipped = [
|
|
485
|
+
_get_sum_for_status(testcases, status) for status in STATUSES_ORDER
|
|
486
|
+
]
|
|
487
|
+
summaries[job] = {
|
|
488
|
+
SUCCESS: successes,
|
|
489
|
+
FAILURE: failures,
|
|
490
|
+
ERROR: errors,
|
|
491
|
+
SKIPPED: skipped,
|
|
492
|
+
TOTAL: len(testcases),
|
|
493
|
+
}
|
|
494
|
+
return summaries
|
|
495
|
+
|
|
496
|
+
|
|
497
|
+
def get_jobs(
|
|
498
|
+
events: List[Dict[str, Any]], testcase_metadata: Optional[Dict[str, Any]] = None
|
|
499
|
+
) -> Dict[str, Any]:
|
|
500
|
+
"""Extract metadata for each job.
|
|
501
|
+
|
|
502
|
+
# Required parameters:
|
|
503
|
+
|
|
504
|
+
- events: a list of events
|
|
505
|
+
|
|
506
|
+
# Returned value:
|
|
507
|
+
|
|
508
|
+
A dictionary. Keys are job names, values are dictionaries with the
|
|
509
|
+
following entries:
|
|
510
|
+
|
|
511
|
+
- name: a string, job name
|
|
512
|
+
- runs-on: a list, execution environment tags
|
|
513
|
+
- testcases: a dictionary, job-related test cases
|
|
514
|
+
- counts: a dictionary, tests statuses count by job
|
|
515
|
+
- variables: a dictionary, job-related environment variables
|
|
516
|
+
|
|
517
|
+
`jobs_testcases` is a dictionary of entries like:
|
|
518
|
+
|
|
519
|
+
```
|
|
520
|
+
"<<<job_name>>>": {
|
|
521
|
+
"runs-on": [<<<execution environment tags>>>],
|
|
522
|
+
"counts": {
|
|
523
|
+
"FAILURE": <<<failed tests count>>>,
|
|
524
|
+
"SUCCESS": <<<passed tests count>>>,
|
|
525
|
+
"ERROR": <<<technical KOs count>>>,
|
|
526
|
+
"SKIPPED": <<<skipped tests count>>>,
|
|
527
|
+
"total count": <<<total tests count>>>
|
|
528
|
+
},
|
|
529
|
+
"variables": {
|
|
530
|
+
"<<<variable name>>>": "<<<variable value>>>",
|
|
531
|
+
...
|
|
532
|
+
}
|
|
533
|
+
}
|
|
534
|
+
```
|
|
535
|
+
"""
|
|
536
|
+
try:
|
|
537
|
+
testcase_metadata = testcase_metadata or get_testcases(events)
|
|
538
|
+
except ValueError as err:
|
|
539
|
+
raise ValueError(str(err) + ' Cannot extract metadata for jobs.')
|
|
540
|
+
jobs_testcases = _get_sorted_testcases(testcase_metadata, ['test', 'job'])
|
|
541
|
+
if 'KeyError' in jobs_testcases:
|
|
542
|
+
raise ValueError('Cannot get jobs-ordered dataset from testcases.')
|
|
543
|
+
job_events = [
|
|
544
|
+
event
|
|
545
|
+
for event in events
|
|
546
|
+
if event['kind'] == 'ExecutionCommand'
|
|
547
|
+
and event['metadata']['step_sequence_id'] == -1
|
|
548
|
+
and event['metadata']['name'] in jobs_testcases
|
|
549
|
+
]
|
|
550
|
+
results = _evaluate_test_results(jobs_testcases)
|
|
551
|
+
jobs = {}
|
|
552
|
+
for job in job_events:
|
|
553
|
+
job_name = job['metadata']['name']
|
|
554
|
+
jobs.setdefault(
|
|
555
|
+
job['metadata']['name'],
|
|
556
|
+
{
|
|
557
|
+
'runs-on': job['runs-on'],
|
|
558
|
+
'counts': results[job_name],
|
|
559
|
+
'variables': {
|
|
560
|
+
name: value for name, value in job.get('variables', {}).items()
|
|
561
|
+
},
|
|
562
|
+
},
|
|
563
|
+
)
|
|
564
|
+
return jobs
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
opentf/commons/__init__.py,sha256=ITzg1zfZgA5-4wvmJfjLN94_Z06HeMl0szd6dalrrKY,21839
|
|
2
2
|
opentf/commons/auth.py,sha256=bM2Z3kxm2Wku1lKXaRAIg37LHvXWAXIZIqjplDfN2P8,15899
|
|
3
3
|
opentf/commons/config.py,sha256=GmvInVnUsXIwlNfgTQeQ_pPs97GeGTGn2S2QZEFwss8,7828
|
|
4
|
-
opentf/commons/datasources.py,sha256=
|
|
4
|
+
opentf/commons/datasources.py,sha256=IKljfR59JSRutXF1Q10MwCuhH5bnVLjo0h7IteNFn_U,18191
|
|
5
5
|
opentf/commons/expressions.py,sha256=A68F27Our8oVVphUrRvB5haSlqj2YCrH2OxHPNLBio4,19251
|
|
6
6
|
opentf/commons/pubsub.py,sha256=7khxAHVZiwJRcwIBJ6MPR-f3xY9144-2eNLROwq5F-4,5894
|
|
7
7
|
opentf/commons/schemas.py,sha256=lokZCU-wmsIkzVA-TVENtC7Io_GmYxrP-FQaOOowg4s,4044
|
|
@@ -48,8 +48,8 @@ opentf/scripts/startup.py,sha256=Da2zo93pBWbdRmj-wgekgLcF94rpNc3ZkbvR8R0w8XY,212
|
|
|
48
48
|
opentf/toolkit/__init__.py,sha256=g3DiTZlSvvzZWKgM8qU47muLqjQrpWZ6M6PWZ-sBsvQ,19610
|
|
49
49
|
opentf/toolkit/channels.py,sha256=Cng3b4LUsxvCHUbp_skys9CFcKZMfcKhA_ODg_EAlIE,17156
|
|
50
50
|
opentf/toolkit/core.py,sha256=L1fT4YzwZjqE7PUXhJL6jSVQge3ohBQv5UBb9DAC6oo,9320
|
|
51
|
-
opentf_toolkit_nightly-0.55.0.
|
|
52
|
-
opentf_toolkit_nightly-0.55.0.
|
|
53
|
-
opentf_toolkit_nightly-0.55.0.
|
|
54
|
-
opentf_toolkit_nightly-0.55.0.
|
|
55
|
-
opentf_toolkit_nightly-0.55.0.
|
|
51
|
+
opentf_toolkit_nightly-0.55.0.dev941.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
|
|
52
|
+
opentf_toolkit_nightly-0.55.0.dev941.dist-info/METADATA,sha256=Z6JJRflN4y9tRp6aCn8WWhNF3b6J6PIU1jbndC5ys_4,1945
|
|
53
|
+
opentf_toolkit_nightly-0.55.0.dev941.dist-info/WHEEL,sha256=GJ7t_kWBFywbagK5eo9IoUwLW6oyOeTKmQ-9iHFVNxQ,92
|
|
54
|
+
opentf_toolkit_nightly-0.55.0.dev941.dist-info/top_level.txt,sha256=_gPuE6GTT6UNXy1DjtmQSfCcZb_qYA2vWmjg7a30AGk,7
|
|
55
|
+
opentf_toolkit_nightly-0.55.0.dev941.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|