gha-utils 4.14.1__tar.gz → 4.15.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of gha-utils might be problematic. Click here for more details.
- {gha_utils-4.14.1 → gha_utils-4.15.0}/PKG-INFO +6 -8
- {gha_utils-4.14.1 → gha_utils-4.15.0}/gha_utils/__init__.py +1 -1
- {gha_utils-4.14.1 → gha_utils-4.15.0}/gha_utils/changelog.py +1 -5
- {gha_utils-4.14.1 → gha_utils-4.15.0}/gha_utils/cli.py +81 -16
- {gha_utils-4.14.1 → gha_utils-4.15.0}/gha_utils/metadata.py +17 -27
- {gha_utils-4.14.1 → gha_utils-4.15.0}/gha_utils/test_plan.py +119 -68
- {gha_utils-4.14.1 → gha_utils-4.15.0}/gha_utils.egg-info/PKG-INFO +6 -8
- {gha_utils-4.14.1 → gha_utils-4.15.0}/gha_utils.egg-info/requires.txt +3 -6
- {gha_utils-4.14.1 → gha_utils-4.15.0}/pyproject.toml +7 -9
- {gha_utils-4.14.1 → gha_utils-4.15.0}/readme.md +1 -1
- {gha_utils-4.14.1 → gha_utils-4.15.0}/tests/test_mailmap.py +2 -2
- {gha_utils-4.14.1 → gha_utils-4.15.0}/tests/test_metadata.py +11 -12
- {gha_utils-4.14.1 → gha_utils-4.15.0}/gha_utils/__main__.py +0 -0
- {gha_utils-4.14.1 → gha_utils-4.15.0}/gha_utils/mailmap.py +0 -0
- {gha_utils-4.14.1 → gha_utils-4.15.0}/gha_utils/matrix.py +0 -0
- {gha_utils-4.14.1 → gha_utils-4.15.0}/gha_utils/py.typed +0 -0
- {gha_utils-4.14.1 → gha_utils-4.15.0}/gha_utils.egg-info/SOURCES.txt +0 -0
- {gha_utils-4.14.1 → gha_utils-4.15.0}/gha_utils.egg-info/dependency_links.txt +0 -0
- {gha_utils-4.14.1 → gha_utils-4.15.0}/gha_utils.egg-info/entry_points.txt +0 -0
- {gha_utils-4.14.1 → gha_utils-4.15.0}/gha_utils.egg-info/top_level.txt +0 -0
- {gha_utils-4.14.1 → gha_utils-4.15.0}/setup.cfg +0 -0
- {gha_utils-4.14.1 → gha_utils-4.15.0}/tests/test_changelog.py +0 -0
- {gha_utils-4.14.1 → gha_utils-4.15.0}/tests/test_matrix.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.2
|
|
2
2
|
Name: gha-utils
|
|
3
|
-
Version: 4.
|
|
3
|
+
Version: 4.15.0
|
|
4
4
|
Summary: ⚙️ CLI helpers for GitHub Actions + reuseable workflows
|
|
5
5
|
Author-email: Kevin Deldycke <kevin@deldycke.com>
|
|
6
6
|
Project-URL: Homepage, https://github.com/kdeldycke/workflows
|
|
@@ -19,7 +19,6 @@ Classifier: Operating System :: MacOS :: MacOS X
|
|
|
19
19
|
Classifier: Operating System :: Microsoft :: Windows
|
|
20
20
|
Classifier: Operating System :: POSIX :: Linux
|
|
21
21
|
Classifier: Programming Language :: Python :: 3
|
|
22
|
-
Classifier: Programming Language :: Python :: 3.10
|
|
23
22
|
Classifier: Programming Language :: Python :: 3.11
|
|
24
23
|
Classifier: Programming Language :: Python :: 3.12
|
|
25
24
|
Classifier: Programming Language :: Python :: 3.13
|
|
@@ -43,17 +42,16 @@ Classifier: Topic :: Text Processing :: Markup :: HTML
|
|
|
43
42
|
Classifier: Topic :: Text Processing :: Markup :: Markdown
|
|
44
43
|
Classifier: Topic :: Utilities
|
|
45
44
|
Classifier: Typing :: Typed
|
|
46
|
-
Requires-Python: >=3.
|
|
45
|
+
Requires-Python: >=3.11
|
|
47
46
|
Description-Content-Type: text/markdown
|
|
48
|
-
Requires-Dist: backports.strenum~=1.3.1; python_version < "3.11"
|
|
49
47
|
Requires-Dist: boltons>=24.0.0
|
|
50
|
-
Requires-Dist: bump-my-version>=0.
|
|
51
|
-
Requires-Dist: click-extra~=4.
|
|
48
|
+
Requires-Dist: bump-my-version>=0.32.2
|
|
49
|
+
Requires-Dist: click-extra~=4.15.0
|
|
50
|
+
Requires-Dist: extra-platforms~=3.1.0
|
|
52
51
|
Requires-Dist: packaging~=24.1
|
|
53
52
|
Requires-Dist: PyDriller~=2.6
|
|
54
53
|
Requires-Dist: pyproject-metadata~=0.9.0
|
|
55
54
|
Requires-Dist: pyyaml~=6.0.0
|
|
56
|
-
Requires-Dist: tomli~=2.0.1; python_version < "3.11"
|
|
57
55
|
Requires-Dist: wcmatch>=8.5
|
|
58
56
|
Provides-Extra: test
|
|
59
57
|
Requires-Dist: coverage[toml]~=7.6.0; extra == "test"
|
|
@@ -137,7 +135,7 @@ $ uvx gha-utils --version
|
|
|
137
135
|
gha-utils, version 4.9.0
|
|
138
136
|
```
|
|
139
137
|
|
|
140
|
-
That's the best way to get started with `gha-utils
|
|
138
|
+
That's the best way to get started with `gha-utils` and experiment with it.
|
|
141
139
|
|
|
142
140
|
### Executables
|
|
143
141
|
|
|
@@ -18,15 +18,11 @@ from __future__ import annotations
|
|
|
18
18
|
|
|
19
19
|
import logging
|
|
20
20
|
import re
|
|
21
|
-
import sys
|
|
22
21
|
from functools import cached_property
|
|
23
22
|
from pathlib import Path
|
|
24
23
|
from textwrap import indent
|
|
25
24
|
|
|
26
|
-
|
|
27
|
-
import tomllib
|
|
28
|
-
else:
|
|
29
|
-
import tomli as tomllib # type: ignore[import-not-found]
|
|
25
|
+
import tomllib
|
|
30
26
|
|
|
31
27
|
|
|
32
28
|
class Changelog:
|
|
@@ -19,11 +19,12 @@ from __future__ import annotations
|
|
|
19
19
|
import logging
|
|
20
20
|
import os
|
|
21
21
|
import sys
|
|
22
|
+
from collections import Counter
|
|
22
23
|
from datetime import datetime
|
|
23
24
|
from pathlib import Path
|
|
24
25
|
from typing import IO
|
|
25
26
|
|
|
26
|
-
import
|
|
27
|
+
from boltons.iterutils import unique
|
|
27
28
|
from click_extra import (
|
|
28
29
|
Choice,
|
|
29
30
|
Context,
|
|
@@ -35,12 +36,14 @@ from click_extra import (
|
|
|
35
36
|
option,
|
|
36
37
|
pass_context,
|
|
37
38
|
)
|
|
39
|
+
from click_extra.envvar import merge_envvar_ids
|
|
40
|
+
from extra_platforms import ALL_IDS
|
|
38
41
|
|
|
39
42
|
from . import __version__
|
|
40
43
|
from .changelog import Changelog
|
|
41
44
|
from .mailmap import Mailmap
|
|
42
45
|
from .metadata import Dialects, Metadata
|
|
43
|
-
from .test_plan import DEFAULT_TEST_PLAN, parse_test_plan
|
|
46
|
+
from .test_plan import DEFAULT_TEST_PLAN, SkippedTest, parse_test_plan
|
|
44
47
|
|
|
45
48
|
|
|
46
49
|
def is_stdout(filepath: Path) -> bool:
|
|
@@ -275,16 +278,38 @@ def mailmap_sync(ctx, source, create_if_missing, destination_mailmap):
|
|
|
275
278
|
@gha_utils.command(short_help="Run a test plan from a file against a binary")
|
|
276
279
|
@option(
|
|
277
280
|
"--binary",
|
|
278
|
-
|
|
279
|
-
# `file_path` type.
|
|
280
|
-
type=click.Path(exists=True, executable=True, resolve_path=True),
|
|
281
|
+
type=file_path(exists=True, executable=True, resolve_path=True),
|
|
281
282
|
required=True,
|
|
282
|
-
|
|
283
|
+
metavar="FILE_PATH",
|
|
284
|
+
help="Path to the binary file to test.",
|
|
283
285
|
)
|
|
284
286
|
@option(
|
|
287
|
+
"-F",
|
|
288
|
+
"--plan-file",
|
|
289
|
+
# TODO: remove deprecated --plan option to avoid confusion.
|
|
285
290
|
"--plan",
|
|
286
291
|
type=file_path(exists=True, readable=True, resolve_path=True),
|
|
287
|
-
|
|
292
|
+
multiple=True,
|
|
293
|
+
metavar="FILE_PATH",
|
|
294
|
+
help="Path to a test plan file in YAML. This option can be repeated to run "
|
|
295
|
+
"multiple test plans in sequence. If not provided, a default test plan will be "
|
|
296
|
+
"executed.",
|
|
297
|
+
)
|
|
298
|
+
@option(
|
|
299
|
+
"-E",
|
|
300
|
+
"--plan-envvar",
|
|
301
|
+
multiple=True,
|
|
302
|
+
metavar="ENVVAR_NAME",
|
|
303
|
+
help="Name of an environment variable containing a test plan in YAML. This "
|
|
304
|
+
"option can be repeated to collect multiple test plans.",
|
|
305
|
+
)
|
|
306
|
+
@option(
|
|
307
|
+
"-s",
|
|
308
|
+
"--skip-platform",
|
|
309
|
+
type=Choice(sorted(ALL_IDS), case_sensitive=False),
|
|
310
|
+
multiple=True,
|
|
311
|
+
help="Skip tests for the specified platforms. This option can be repeated to "
|
|
312
|
+
"skip multiple platforms.",
|
|
288
313
|
)
|
|
289
314
|
@option(
|
|
290
315
|
"-t",
|
|
@@ -293,18 +318,58 @@ def mailmap_sync(ctx, source, create_if_missing, destination_mailmap):
|
|
|
293
318
|
# 0.0 is negative values are provided, so we mimic this behavior here:
|
|
294
319
|
# https://github.com/python/cpython/blob/5740b95076b57feb6293cda4f5504f706a7d622d/Lib/subprocess.py#L1596-L1597
|
|
295
320
|
type=FloatRange(min=0, clamp=True),
|
|
321
|
+
metavar="SECONDS",
|
|
296
322
|
help="Set the default timeout for each CLI call, if not specified in the "
|
|
297
323
|
"test plan.",
|
|
298
324
|
)
|
|
299
|
-
def test_plan(
|
|
325
|
+
def test_plan(
|
|
326
|
+
binary: Path,
|
|
327
|
+
plan_file: tuple[Path, ...] | None,
|
|
328
|
+
plan_envvar: tuple[str, ...] | None,
|
|
329
|
+
skip_platform: tuple[str, ...] | None,
|
|
330
|
+
timeout: float | None,
|
|
331
|
+
) -> None:
|
|
300
332
|
# Load test plan from workflow input, or use a default one.
|
|
301
|
-
|
|
302
|
-
|
|
303
|
-
|
|
333
|
+
test_list = []
|
|
334
|
+
if plan_file or plan_envvar:
|
|
335
|
+
for file in unique(plan_file):
|
|
336
|
+
logging.info(f"Get test plan from {file} file")
|
|
337
|
+
tests = list(parse_test_plan(file.read_text(encoding="UTF-8")))
|
|
338
|
+
logging.info(f"{len(tests)} test cases found.")
|
|
339
|
+
test_list.extend(tests)
|
|
340
|
+
for envvar_id in merge_envvar_ids(plan_envvar):
|
|
341
|
+
logging.info(f"Get test plan from {envvar_id!r} environment variable")
|
|
342
|
+
tests = list(parse_test_plan(os.getenv(envvar_id)))
|
|
343
|
+
logging.info(f"{len(tests)} test cases found.")
|
|
344
|
+
test_list.extend(tests)
|
|
345
|
+
|
|
304
346
|
else:
|
|
305
|
-
logging.warning(
|
|
306
|
-
|
|
347
|
+
logging.warning(
|
|
348
|
+
"No test plan provided through --plan-file/-F or --plan-envvar/-E options:"
|
|
349
|
+
" use default test plan."
|
|
350
|
+
)
|
|
351
|
+
test_list = DEFAULT_TEST_PLAN
|
|
352
|
+
logging.debug(f"Test plan: {test_list}")
|
|
353
|
+
|
|
354
|
+
stats = Counter(total=len(test_list), skipped=0, failed=0)
|
|
307
355
|
|
|
308
|
-
for index, test_case in enumerate(
|
|
309
|
-
logging.info(f"Run test #{index}")
|
|
310
|
-
|
|
356
|
+
for index, test_case in enumerate(test_list):
|
|
357
|
+
logging.info(f"Run test #{index + 1}")
|
|
358
|
+
try:
|
|
359
|
+
logging.debug(f"Test case parameters: {test_case}")
|
|
360
|
+
test_case.run_cli_test(
|
|
361
|
+
binary, additional_skip_platforms=skip_platform, default_timeout=timeout
|
|
362
|
+
)
|
|
363
|
+
except SkippedTest as ex:
|
|
364
|
+
stats["skipped"] += 1
|
|
365
|
+
logging.warning(f"Test skipped: {ex}")
|
|
366
|
+
except Exception as ex:
|
|
367
|
+
stats["failed"] += 1
|
|
368
|
+
logging.error(f"Test failed: {ex}")
|
|
369
|
+
|
|
370
|
+
logging.info(
|
|
371
|
+
"Test plan results - "
|
|
372
|
+
+ ", ".join((f"{k.title()}: {v}" for k, v in stats.items()))
|
|
373
|
+
)
|
|
374
|
+
if stats["failed"]:
|
|
375
|
+
sys.exit(1)
|
|
@@ -49,7 +49,7 @@ release_commits_matrix={'commit': ['6f27db47612aaee06fdf08744b09a9f5f6c2'],
|
|
|
49
49
|
nuitka_matrix={'entry_point': ['mpm'],
|
|
50
50
|
'commit': ['346ce664f055fbd042a25ee0b7e96702e95',
|
|
51
51
|
'6f27db47612aaee06fdf08744b09a9f5f6c2'],
|
|
52
|
-
'os': ['ubuntu-24.04', 'ubuntu-24.04-arm', 'macos-15', 'macos-13', 'windows-
|
|
52
|
+
'os': ['ubuntu-24.04', 'ubuntu-24.04-arm', 'macos-15', 'macos-13', 'windows-2025'],
|
|
53
53
|
'include': [{'entry_point': 'mpm',
|
|
54
54
|
'cli_id': 'mpm',
|
|
55
55
|
'module_id': 'meta_package_manager.__main__',
|
|
@@ -77,7 +77,7 @@ nuitka_matrix={'entry_point': ['mpm'],
|
|
|
77
77
|
'platform_id': 'macos',
|
|
78
78
|
'arch': 'x64',
|
|
79
79
|
'extension': 'bin'},
|
|
80
|
-
{'os': 'windows-
|
|
80
|
+
{'os': 'windows-2025',
|
|
81
81
|
'platform_id': 'windows',
|
|
82
82
|
'arch': 'x64',
|
|
83
83
|
'extension': 'exe'},
|
|
@@ -123,12 +123,12 @@ nuitka_matrix={'entry_point': ['mpm'],
|
|
|
123
123
|
'bin_name': 'mpm-macos-x64-build-6f27db4.bin'},
|
|
124
124
|
{'entry_point': 'mpm',
|
|
125
125
|
'commit': '346ce664f055fbd042a25ee0b7e96702e95',
|
|
126
|
-
'os': 'windows-
|
|
126
|
+
'os': 'windows-2025',
|
|
127
127
|
'arch': 'x64',
|
|
128
128
|
'bin_name': 'mpm-windows-x64-build-346ce66.exe'},
|
|
129
129
|
{'entry_point': 'mpm',
|
|
130
130
|
'commit': '6f27db47612aaee06fdf08744b09a9f5f6c2',
|
|
131
|
-
'os': 'windows-
|
|
131
|
+
'os': 'windows-2025',
|
|
132
132
|
'arch': 'x64',
|
|
133
133
|
'bin_name': 'mpm-windows-x64-build-6f27db4.exe'}]}
|
|
134
134
|
```
|
|
@@ -147,22 +147,15 @@ import json
|
|
|
147
147
|
import logging
|
|
148
148
|
import os
|
|
149
149
|
import re
|
|
150
|
-
import sys
|
|
151
150
|
from collections.abc import Iterable
|
|
151
|
+
from enum import StrEnum
|
|
152
152
|
from functools import cached_property
|
|
153
153
|
from pathlib import Path
|
|
154
154
|
from random import randint
|
|
155
155
|
from re import escape
|
|
156
156
|
from typing import Any, Final, Iterator, cast
|
|
157
157
|
|
|
158
|
-
|
|
159
|
-
from enum import StrEnum
|
|
160
|
-
|
|
161
|
-
import tomllib
|
|
162
|
-
else:
|
|
163
|
-
import tomli as tomllib # type: ignore[import-not-found]
|
|
164
|
-
from backports.strenum import StrEnum # type: ignore[import-not-found]
|
|
165
|
-
|
|
158
|
+
import tomllib
|
|
166
159
|
from bumpversion.config import get_configuration # type: ignore[import-untyped]
|
|
167
160
|
from bumpversion.config.files import find_config_file # type: ignore[import-untyped]
|
|
168
161
|
from bumpversion.show import resolve_name # type: ignore[import-untyped]
|
|
@@ -457,7 +450,7 @@ class Metadata:
|
|
|
457
450
|
return matrix
|
|
458
451
|
|
|
459
452
|
@cached_property
|
|
460
|
-
def event_type(self) -> WorkflowEvent | None:
|
|
453
|
+
def event_type(self) -> WorkflowEvent | None:
|
|
461
454
|
"""Returns the type of event that triggered the workflow run.
|
|
462
455
|
|
|
463
456
|
.. caution::
|
|
@@ -481,8 +474,8 @@ class Metadata:
|
|
|
481
474
|
return None
|
|
482
475
|
|
|
483
476
|
if bool(os.environ.get("GITHUB_BASE_REF")):
|
|
484
|
-
return WorkflowEvent.pull_request
|
|
485
|
-
return WorkflowEvent.push
|
|
477
|
+
return WorkflowEvent.pull_request
|
|
478
|
+
return WorkflowEvent.push
|
|
486
479
|
|
|
487
480
|
@cached_property
|
|
488
481
|
def commit_range(self) -> tuple[str, str] | None:
|
|
@@ -515,7 +508,7 @@ class Metadata:
|
|
|
515
508
|
if not self.github_context or not self.event_type:
|
|
516
509
|
return None
|
|
517
510
|
# Pull request event.
|
|
518
|
-
if self.event_type in (
|
|
511
|
+
if self.event_type in (
|
|
519
512
|
WorkflowEvent.pull_request,
|
|
520
513
|
WorkflowEvent.pull_request_target,
|
|
521
514
|
):
|
|
@@ -880,7 +873,7 @@ class Metadata:
|
|
|
880
873
|
"ubuntu-24.04-arm",
|
|
881
874
|
"macos-15",
|
|
882
875
|
"macos-13",
|
|
883
|
-
"windows-
|
|
876
|
+
"windows-2025",
|
|
884
877
|
],
|
|
885
878
|
"include": [
|
|
886
879
|
{
|
|
@@ -925,7 +918,7 @@ class Metadata:
|
|
|
925
918
|
"extension": "bin",
|
|
926
919
|
},
|
|
927
920
|
{
|
|
928
|
-
"os": "windows-
|
|
921
|
+
"os": "windows-2025",
|
|
929
922
|
"platform_id": "windows",
|
|
930
923
|
"arch": "x64",
|
|
931
924
|
"extension": "exe",
|
|
@@ -989,14 +982,14 @@ class Metadata:
|
|
|
989
982
|
{
|
|
990
983
|
"entry_point": "mpm",
|
|
991
984
|
"commit": "346ce664f055fbd042a25ee0b7e96702e95",
|
|
992
|
-
"os": "windows-
|
|
985
|
+
"os": "windows-2025",
|
|
993
986
|
"arch": "x64",
|
|
994
987
|
"bin_name": "mpm-windows-x64-build-346ce66.exe",
|
|
995
988
|
},
|
|
996
989
|
{
|
|
997
990
|
"entry_point": "mpm",
|
|
998
991
|
"commit": "6f27db47612aaee06fdf08744b09a9f5f6c2",
|
|
999
|
-
"os": "windows-
|
|
992
|
+
"os": "windows-2025",
|
|
1000
993
|
"arch": "x64",
|
|
1001
994
|
"bin_name": "mpm-windows-x64-build-6f27db4.exe",
|
|
1002
995
|
},
|
|
@@ -1021,7 +1014,7 @@ class Metadata:
|
|
|
1021
1014
|
"ubuntu-24.04-arm", # arm64
|
|
1022
1015
|
"macos-15", # arm64
|
|
1023
1016
|
"macos-13", # x64
|
|
1024
|
-
"windows-
|
|
1017
|
+
"windows-2025", # x64
|
|
1025
1018
|
),
|
|
1026
1019
|
)
|
|
1027
1020
|
|
|
@@ -1086,7 +1079,7 @@ class Metadata:
|
|
|
1086
1079
|
"extension": "bin",
|
|
1087
1080
|
},
|
|
1088
1081
|
{
|
|
1089
|
-
"os": "windows-
|
|
1082
|
+
"os": "windows-2025",
|
|
1090
1083
|
"platform_id": "windows",
|
|
1091
1084
|
"arch": "x64",
|
|
1092
1085
|
"extension": "exe",
|
|
@@ -1180,10 +1173,7 @@ class Metadata:
|
|
|
1180
1173
|
|
|
1181
1174
|
return cast(str, value)
|
|
1182
1175
|
|
|
1183
|
-
def dump(
|
|
1184
|
-
self,
|
|
1185
|
-
dialect: Dialects = Dialects.github, # type: ignore[valid-type]
|
|
1186
|
-
) -> str:
|
|
1176
|
+
def dump(self, dialect: Dialects = Dialects.github) -> str:
|
|
1187
1177
|
"""Returns all metadata in the specified format.
|
|
1188
1178
|
|
|
1189
1179
|
Defaults to GitHub dialect.
|
|
@@ -18,6 +18,8 @@ from __future__ import annotations
|
|
|
18
18
|
|
|
19
19
|
import logging
|
|
20
20
|
import re
|
|
21
|
+
import shlex
|
|
22
|
+
import sys
|
|
21
23
|
from dataclasses import asdict, dataclass, field
|
|
22
24
|
from pathlib import Path
|
|
23
25
|
from subprocess import TimeoutExpired, run
|
|
@@ -26,26 +28,41 @@ from typing import Generator, Sequence
|
|
|
26
28
|
import yaml
|
|
27
29
|
from boltons.iterutils import flatten
|
|
28
30
|
from boltons.strutils import strip_ansi
|
|
29
|
-
from click_extra.testing import args_cleanup,
|
|
31
|
+
from click_extra.testing import args_cleanup, render_cli_run
|
|
32
|
+
from extra_platforms import Group, _TNestedReferences, current_os
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
class SkippedTest(Exception):
|
|
36
|
+
"""Raised when a test case should be skipped."""
|
|
37
|
+
|
|
38
|
+
pass
|
|
30
39
|
|
|
31
40
|
|
|
32
41
|
@dataclass(order=True)
|
|
33
|
-
class
|
|
42
|
+
class CLITestCase:
|
|
34
43
|
cli_parameters: tuple[str, ...] | str = field(default_factory=tuple)
|
|
35
44
|
"""Parameters, arguments and options to pass to the CLI."""
|
|
36
45
|
|
|
46
|
+
skip_platforms: _TNestedReferences = field(default_factory=tuple)
|
|
47
|
+
only_platforms: _TNestedReferences = field(default_factory=tuple)
|
|
37
48
|
timeout: float | str | None = None
|
|
38
49
|
exit_code: int | str | None = None
|
|
39
50
|
strip_ansi: bool = False
|
|
40
51
|
output_contains: tuple[str, ...] | str = field(default_factory=tuple)
|
|
41
52
|
stdout_contains: tuple[str, ...] | str = field(default_factory=tuple)
|
|
42
53
|
stderr_contains: tuple[str, ...] | str = field(default_factory=tuple)
|
|
43
|
-
output_regex_matches: tuple[str, ...] | str = field(
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
54
|
+
output_regex_matches: tuple[re.Pattern | str, ...] | str = field(
|
|
55
|
+
default_factory=tuple
|
|
56
|
+
)
|
|
57
|
+
stdout_regex_matches: tuple[re.Pattern | str, ...] | str = field(
|
|
58
|
+
default_factory=tuple
|
|
59
|
+
)
|
|
60
|
+
stderr_regex_matches: tuple[re.Pattern | str, ...] | str = field(
|
|
61
|
+
default_factory=tuple
|
|
62
|
+
)
|
|
63
|
+
output_regex_fullmatch: re.Pattern | str | None = None
|
|
64
|
+
stdout_regex_fullmatch: re.Pattern | str | None = None
|
|
65
|
+
stderr_regex_fullmatch: re.Pattern | str | None = None
|
|
49
66
|
|
|
50
67
|
def __post_init__(self) -> None:
|
|
51
68
|
"""Normalize all fields."""
|
|
@@ -72,53 +89,78 @@ class TestCase:
|
|
|
72
89
|
if not isinstance(field_data, bool):
|
|
73
90
|
raise ValueError(f"strip_ansi is not a boolean: {field_data}")
|
|
74
91
|
|
|
75
|
-
# Validates and normalize regex fullmatch fields.
|
|
76
|
-
elif field_id.endswith("_fullmatch"):
|
|
77
|
-
if field_data:
|
|
78
|
-
if not isinstance(field_data, str):
|
|
79
|
-
raise ValueError(f"{field_id} is not a string: {field_data}")
|
|
80
|
-
# Normalize empty strings to None.
|
|
81
|
-
else:
|
|
82
|
-
field_data = None
|
|
83
|
-
|
|
84
92
|
# Validates and normalize tuple of strings.
|
|
85
93
|
else:
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
94
|
+
if field_data:
|
|
95
|
+
# Wraps single string and other types into a tuple.
|
|
96
|
+
if isinstance(field_data, str) or not isinstance(
|
|
97
|
+
field_data, Sequence
|
|
98
|
+
):
|
|
99
|
+
# CLI parameters needs to be split on Unix-like systems.
|
|
100
|
+
# XXX If we need the same for Windows, have a look at:
|
|
101
|
+
# https://github.com/maxpat78/w32lex
|
|
102
|
+
if field_id == "cli_parameters" and sys.platform != "win32":
|
|
103
|
+
field_data = tuple(shlex.split(field_data))
|
|
104
|
+
else:
|
|
105
|
+
field_data = (field_data,)
|
|
106
|
+
|
|
107
|
+
for item in field_data:
|
|
108
|
+
if not isinstance(item, str):
|
|
109
|
+
raise ValueError(f"Invalid string in {field_id}: {item}")
|
|
110
|
+
# Ignore blank value.
|
|
111
|
+
field_data = tuple(i for i in field_data if i.strip())
|
|
112
|
+
|
|
113
|
+
# Normalize any mishmash of platform and group IDs into a set of platforms.
|
|
114
|
+
if field_id.endswith("_platforms") and field_data:
|
|
115
|
+
field_data = frozenset(Group._extract_platforms(field_data))
|
|
116
|
+
|
|
117
|
+
# Validates fields containing one or more regexes.
|
|
118
|
+
if "_regex_" in field_id and field_data:
|
|
119
|
+
# Compile all regexes.
|
|
120
|
+
valid_regexes = []
|
|
102
121
|
for regex in flatten((field_data,)):
|
|
103
122
|
try:
|
|
104
|
-
|
|
123
|
+
# Let dots in regex match newlines.
|
|
124
|
+
valid_regexes.append(re.compile(regex, re.DOTALL))
|
|
105
125
|
except re.error as ex:
|
|
106
126
|
raise ValueError(
|
|
107
127
|
f"Invalid regex in {field_id}: {regex}"
|
|
108
128
|
) from ex
|
|
129
|
+
# Normalize single regex to a single element.
|
|
130
|
+
if field_id.endswith("_fullmatch"):
|
|
131
|
+
if valid_regexes:
|
|
132
|
+
field_data = valid_regexes.pop()
|
|
133
|
+
else:
|
|
134
|
+
field_data = None
|
|
135
|
+
else:
|
|
136
|
+
field_data = tuple(valid_regexes)
|
|
109
137
|
|
|
110
138
|
setattr(self, field_id, field_data)
|
|
111
139
|
|
|
112
|
-
def
|
|
140
|
+
def run_cli_test(
|
|
141
|
+
self,
|
|
142
|
+
binary: str | Path,
|
|
143
|
+
additional_skip_platforms: _TNestedReferences | None,
|
|
144
|
+
default_timeout: float | None,
|
|
145
|
+
):
|
|
113
146
|
"""Run a CLI command and check its output against the test case.
|
|
114
147
|
|
|
115
148
|
..todo::
|
|
116
149
|
Add support for environment variables.
|
|
117
150
|
|
|
118
151
|
..todo::
|
|
119
|
-
Add support for proper mixed stdout
|
|
152
|
+
Add support for proper mixed <stdout>/<stderr> stream as a single,
|
|
120
153
|
intertwined output.
|
|
121
154
|
"""
|
|
155
|
+
if self.only_platforms:
|
|
156
|
+
if current_os() not in self.only_platforms: # type: ignore[operator]
|
|
157
|
+
raise SkippedTest(f"Test case only runs on platform: {current_os()}")
|
|
158
|
+
|
|
159
|
+
if current_os() in Group._extract_platforms(
|
|
160
|
+
self.skip_platforms, additional_skip_platforms
|
|
161
|
+
):
|
|
162
|
+
raise SkippedTest(f"Skipping test case on platform: {current_os()}")
|
|
163
|
+
|
|
122
164
|
if self.timeout is None and default_timeout is not None:
|
|
123
165
|
logging.info(f"Set default test case timeout to {default_timeout} seconds")
|
|
124
166
|
self.timeout = default_timeout
|
|
@@ -156,24 +198,33 @@ class TestCase:
|
|
|
156
198
|
# encoding="utf-8",
|
|
157
199
|
text=True,
|
|
158
200
|
)
|
|
159
|
-
except TimeoutExpired
|
|
201
|
+
except TimeoutExpired:
|
|
160
202
|
raise TimeoutError(
|
|
161
|
-
f"CLI timed out after {self.timeout} seconds: {clean_args}"
|
|
162
|
-
)
|
|
203
|
+
f"CLI timed out after {self.timeout} seconds: {' '.join(clean_args)}"
|
|
204
|
+
)
|
|
163
205
|
|
|
164
|
-
|
|
206
|
+
for line in render_cli_run(clean_args, result).splitlines():
|
|
207
|
+
logging.info(line)
|
|
165
208
|
|
|
166
209
|
for field_id, field_data in asdict(self).items():
|
|
167
|
-
if field_id == "
|
|
210
|
+
if field_id == "exit_code":
|
|
211
|
+
if field_data is not None:
|
|
212
|
+
logging.info(f"Test exit code, expecting: {field_data}")
|
|
213
|
+
if result.returncode != field_data:
|
|
214
|
+
raise AssertionError(
|
|
215
|
+
f"CLI exited with code {result.returncode}, "
|
|
216
|
+
f"expected {field_data}"
|
|
217
|
+
)
|
|
218
|
+
# The specific exit code matches, let's proceed to the next test.
|
|
168
219
|
continue
|
|
169
220
|
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
)
|
|
221
|
+
# Ignore non-output fields, and empty test cases.
|
|
222
|
+
elif not (
|
|
223
|
+
field_id.startswith(("output_", "stdout_", "stderr_")) and field_data
|
|
224
|
+
):
|
|
225
|
+
continue
|
|
176
226
|
|
|
227
|
+
# Prepare output and name for comparison.
|
|
177
228
|
output = ""
|
|
178
229
|
name = ""
|
|
179
230
|
if field_id.startswith("output_"):
|
|
@@ -188,50 +239,50 @@ class TestCase:
|
|
|
188
239
|
name = "<stderr>"
|
|
189
240
|
|
|
190
241
|
if self.strip_ansi:
|
|
242
|
+
logging.info(f"Strip ANSI sequences from {name}")
|
|
191
243
|
output = strip_ansi(output)
|
|
192
244
|
|
|
193
245
|
if field_id.endswith("_contains"):
|
|
194
246
|
for sub_string in field_data:
|
|
247
|
+
logging.info(f"Check if {name} contains {sub_string!r}")
|
|
195
248
|
if sub_string not in output:
|
|
196
|
-
raise AssertionError(
|
|
197
|
-
f"CLI's {name} does not contain {sub_string!r}"
|
|
198
|
-
)
|
|
249
|
+
raise AssertionError(f"{name} does not contain {sub_string!r}")
|
|
199
250
|
|
|
200
251
|
elif field_id.endswith("_regex_matches"):
|
|
201
252
|
for regex in field_data:
|
|
202
|
-
if
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
)
|
|
253
|
+
logging.info(f"Check if {name} matches {sub_string!r}")
|
|
254
|
+
if not regex.search(output):
|
|
255
|
+
raise AssertionError(f"{name} does not match regex {regex}")
|
|
206
256
|
|
|
207
257
|
elif field_id.endswith("_regex_fullmatch"):
|
|
208
258
|
regex = field_data
|
|
209
|
-
if not
|
|
210
|
-
raise AssertionError(
|
|
211
|
-
f"CLI's {name} does not fully match regex {regex!r}"
|
|
212
|
-
)
|
|
259
|
+
if not regex.fullmatch(output):
|
|
260
|
+
raise AssertionError(f"{name} does not fully match regex {regex}")
|
|
213
261
|
|
|
214
262
|
|
|
215
|
-
DEFAULT_TEST_PLAN =
|
|
263
|
+
DEFAULT_TEST_PLAN: list[CLITestCase] = [
|
|
216
264
|
# Output the version of the CLI.
|
|
217
|
-
|
|
265
|
+
CLITestCase(cli_parameters="--version"),
|
|
218
266
|
# Test combination of version and verbosity.
|
|
219
|
-
|
|
267
|
+
CLITestCase(cli_parameters=("--verbosity", "DEBUG", "--version")),
|
|
220
268
|
# Test help output.
|
|
221
|
-
|
|
222
|
-
|
|
269
|
+
CLITestCase(cli_parameters="--help"),
|
|
270
|
+
]
|
|
271
|
+
|
|
223
272
|
|
|
273
|
+
def parse_test_plan(plan_string: str | None) -> Generator[CLITestCase, None, None]:
|
|
274
|
+
if not plan_string:
|
|
275
|
+
raise ValueError("Empty test plan")
|
|
224
276
|
|
|
225
|
-
|
|
226
|
-
plan = yaml.full_load(plan_path.read_text(encoding="UTF-8"))
|
|
277
|
+
plan = yaml.full_load(plan_string)
|
|
227
278
|
|
|
228
279
|
# Validates test plan structure.
|
|
229
280
|
if not plan:
|
|
230
|
-
raise ValueError(
|
|
281
|
+
raise ValueError("Empty test plan")
|
|
231
282
|
if not isinstance(plan, list):
|
|
232
283
|
raise ValueError(f"Test plan is not a list: {plan}")
|
|
233
284
|
|
|
234
|
-
directives = frozenset(
|
|
285
|
+
directives = frozenset(CLITestCase.__dataclass_fields__.keys())
|
|
235
286
|
|
|
236
287
|
for index, test_case in enumerate(plan):
|
|
237
288
|
# Validates test case structure.
|
|
@@ -243,4 +294,4 @@ def parse_test_plan(plan_path: Path) -> Generator[TestCase, None, None]:
|
|
|
243
294
|
f"{set(test_case) - directives}"
|
|
244
295
|
)
|
|
245
296
|
|
|
246
|
-
yield
|
|
297
|
+
yield CLITestCase(**test_case)
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.2
|
|
2
2
|
Name: gha-utils
|
|
3
|
-
Version: 4.
|
|
3
|
+
Version: 4.15.0
|
|
4
4
|
Summary: ⚙️ CLI helpers for GitHub Actions + reuseable workflows
|
|
5
5
|
Author-email: Kevin Deldycke <kevin@deldycke.com>
|
|
6
6
|
Project-URL: Homepage, https://github.com/kdeldycke/workflows
|
|
@@ -19,7 +19,6 @@ Classifier: Operating System :: MacOS :: MacOS X
|
|
|
19
19
|
Classifier: Operating System :: Microsoft :: Windows
|
|
20
20
|
Classifier: Operating System :: POSIX :: Linux
|
|
21
21
|
Classifier: Programming Language :: Python :: 3
|
|
22
|
-
Classifier: Programming Language :: Python :: 3.10
|
|
23
22
|
Classifier: Programming Language :: Python :: 3.11
|
|
24
23
|
Classifier: Programming Language :: Python :: 3.12
|
|
25
24
|
Classifier: Programming Language :: Python :: 3.13
|
|
@@ -43,17 +42,16 @@ Classifier: Topic :: Text Processing :: Markup :: HTML
|
|
|
43
42
|
Classifier: Topic :: Text Processing :: Markup :: Markdown
|
|
44
43
|
Classifier: Topic :: Utilities
|
|
45
44
|
Classifier: Typing :: Typed
|
|
46
|
-
Requires-Python: >=3.
|
|
45
|
+
Requires-Python: >=3.11
|
|
47
46
|
Description-Content-Type: text/markdown
|
|
48
|
-
Requires-Dist: backports.strenum~=1.3.1; python_version < "3.11"
|
|
49
47
|
Requires-Dist: boltons>=24.0.0
|
|
50
|
-
Requires-Dist: bump-my-version>=0.
|
|
51
|
-
Requires-Dist: click-extra~=4.
|
|
48
|
+
Requires-Dist: bump-my-version>=0.32.2
|
|
49
|
+
Requires-Dist: click-extra~=4.15.0
|
|
50
|
+
Requires-Dist: extra-platforms~=3.1.0
|
|
52
51
|
Requires-Dist: packaging~=24.1
|
|
53
52
|
Requires-Dist: PyDriller~=2.6
|
|
54
53
|
Requires-Dist: pyproject-metadata~=0.9.0
|
|
55
54
|
Requires-Dist: pyyaml~=6.0.0
|
|
56
|
-
Requires-Dist: tomli~=2.0.1; python_version < "3.11"
|
|
57
55
|
Requires-Dist: wcmatch>=8.5
|
|
58
56
|
Provides-Extra: test
|
|
59
57
|
Requires-Dist: coverage[toml]~=7.6.0; extra == "test"
|
|
@@ -137,7 +135,7 @@ $ uvx gha-utils --version
|
|
|
137
135
|
gha-utils, version 4.9.0
|
|
138
136
|
```
|
|
139
137
|
|
|
140
|
-
That's the best way to get started with `gha-utils
|
|
138
|
+
That's the best way to get started with `gha-utils` and experiment with it.
|
|
141
139
|
|
|
142
140
|
### Executables
|
|
143
141
|
|
|
@@ -1,16 +1,13 @@
|
|
|
1
1
|
boltons>=24.0.0
|
|
2
|
-
bump-my-version>=0.
|
|
3
|
-
click-extra~=4.
|
|
2
|
+
bump-my-version>=0.32.2
|
|
3
|
+
click-extra~=4.15.0
|
|
4
|
+
extra-platforms~=3.1.0
|
|
4
5
|
packaging~=24.1
|
|
5
6
|
PyDriller~=2.6
|
|
6
7
|
pyproject-metadata~=0.9.0
|
|
7
8
|
pyyaml~=6.0.0
|
|
8
9
|
wcmatch>=8.5
|
|
9
10
|
|
|
10
|
-
[:python_version < "3.11"]
|
|
11
|
-
backports.strenum~=1.3.1
|
|
12
|
-
tomli~=2.0.1
|
|
13
|
-
|
|
14
11
|
[test]
|
|
15
12
|
coverage[toml]~=7.6.0
|
|
16
13
|
pytest~=8.3.1
|
|
@@ -1,9 +1,9 @@
|
|
|
1
1
|
[project]
|
|
2
2
|
# Docs: https://packaging.python.org/en/latest/guides/writing-pyproject-toml/
|
|
3
3
|
name = "gha-utils"
|
|
4
|
-
version = "4.
|
|
4
|
+
version = "4.15.0"
|
|
5
5
|
# Python versions and their status: https://devguide.python.org/versions/
|
|
6
|
-
requires-python = ">= 3.
|
|
6
|
+
requires-python = ">= 3.11"
|
|
7
7
|
description = "⚙️ CLI helpers for GitHub Actions + reuseable workflows"
|
|
8
8
|
authors = [{ name = "Kevin Deldycke", email = "kevin@deldycke.com" }]
|
|
9
9
|
readme = "readme.md"
|
|
@@ -42,7 +42,6 @@ classifiers = [
|
|
|
42
42
|
'Operating System :: Microsoft :: Windows',
|
|
43
43
|
'Operating System :: POSIX :: Linux',
|
|
44
44
|
"Programming Language :: Python :: 3",
|
|
45
|
-
"Programming Language :: Python :: 3.10",
|
|
46
45
|
"Programming Language :: Python :: 3.11",
|
|
47
46
|
"Programming Language :: Python :: 3.12",
|
|
48
47
|
"Programming Language :: Python :: 3.13",
|
|
@@ -68,19 +67,18 @@ classifiers = [
|
|
|
68
67
|
'Typing :: Typed',
|
|
69
68
|
]
|
|
70
69
|
dependencies = [
|
|
71
|
-
"backports.strenum ~= 1.3.1 ; python_version < '3.11'",
|
|
72
70
|
# Loose version to let click-extra set its own stricter dependency.
|
|
73
71
|
"boltons >= 24.0.0",
|
|
74
72
|
# Dependency version is more relaxed on bump-my-version to prevent chicken and egg
|
|
75
73
|
# while releasing gha-utils itself.
|
|
76
|
-
# v0.
|
|
77
|
-
"bump-my-version >= 0.
|
|
78
|
-
"click-extra ~= 4.
|
|
74
|
+
# v0.32.2 is the first fixing an issue preventing compilation with Nuitka.
|
|
75
|
+
"bump-my-version >= 0.32.2",
|
|
76
|
+
"click-extra ~= 4.15.0",
|
|
77
|
+
"extra-platforms ~= 3.1.0",
|
|
79
78
|
"packaging ~= 24.1",
|
|
80
79
|
"PyDriller ~= 2.6",
|
|
81
80
|
"pyproject-metadata ~= 0.9.0",
|
|
82
81
|
"pyyaml ~= 6.0.0",
|
|
83
|
-
"tomli ~= 2.0.1 ; python_version < '3.11'",
|
|
84
82
|
# Loose version to let click-extra set its own stricter dependency.
|
|
85
83
|
"wcmatch >= 8.5",
|
|
86
84
|
]
|
|
@@ -138,7 +136,7 @@ addopts = [
|
|
|
138
136
|
xfail_strict = true
|
|
139
137
|
|
|
140
138
|
[tool.bumpversion]
|
|
141
|
-
current_version = "4.
|
|
139
|
+
current_version = "4.15.0"
|
|
142
140
|
allow_dirty = true
|
|
143
141
|
ignore_missing_files = true
|
|
144
142
|
|
|
@@ -27,7 +27,7 @@ def test_remove_header():
|
|
|
27
27
|
# Generated by gha-utils mailmap-sync v4.4.3 - https://github.com/kdeldycke/workflows
|
|
28
28
|
# Timestamp: 2024-08-12T08:15:41.083405
|
|
29
29
|
# Format is:
|
|
30
|
-
# Preferred Name <preferred e-mail>
|
|
30
|
+
# Preferred Name <preferred e-mail> Other Name <other e-mail>
|
|
31
31
|
#
|
|
32
32
|
# Reference: https://git-scm.com/docs/git-blame#_mapping_authors
|
|
33
33
|
|
|
@@ -36,7 +36,7 @@ def test_remove_header():
|
|
|
36
36
|
|
|
37
37
|
assert remove_header(content) == dedent("""\
|
|
38
38
|
# Format is:
|
|
39
|
-
# Preferred Name <preferred e-mail>
|
|
39
|
+
# Preferred Name <preferred e-mail> Other Name <other e-mail>
|
|
40
40
|
#
|
|
41
41
|
# Reference: https://git-scm.com/docs/git-blame#_mapping_authors
|
|
42
42
|
|
|
@@ -33,9 +33,9 @@ def test_metadata_github_format():
|
|
|
33
33
|
r"doc_files=[\S ]*\n"
|
|
34
34
|
r"is_python_project=true\n"
|
|
35
35
|
r"package_name=gha-utils\n"
|
|
36
|
-
r"blacken_docs_params=--target-version
|
|
36
|
+
r"blacken_docs_params=--target-version py311 "
|
|
37
37
|
r"--target-version py312 --target-version py313\n"
|
|
38
|
-
r"mypy_params=--python-version 3\.
|
|
38
|
+
r"mypy_params=--python-version 3\.11\n"
|
|
39
39
|
r"current_version=\n"
|
|
40
40
|
r"released_version=\n"
|
|
41
41
|
r"is_sphinx=false\n"
|
|
@@ -44,20 +44,20 @@ def test_metadata_github_format():
|
|
|
44
44
|
r"new_commits_matrix=\n"
|
|
45
45
|
r"release_commits_matrix=\n"
|
|
46
46
|
r'nuitka_matrix=\{"os": \["ubuntu-24\.04", "ubuntu-24\.04-arm", '
|
|
47
|
-
r'"macos-15", "macos-13", "windows-
|
|
47
|
+
r'"macos-15", "macos-13", "windows-2025"\], '
|
|
48
48
|
r'"entry_point": \["gha-utils"\], "commit": \["[a-z0-9]+"\], '
|
|
49
49
|
r'"include": \[\{"entry_point": "gha-utils", '
|
|
50
50
|
r'"cli_id": "gha-utils", "module_id": "gha_utils\.__main__", '
|
|
51
51
|
r'"callable_id": "main", '
|
|
52
52
|
r'"module_path": "gha_utils(/|\\\\)__main__\.py"\}, '
|
|
53
53
|
r'\{"commit": "[a-z0-9]+", "short_sha": "[a-z0-9]+", '
|
|
54
|
-
r'"current_version": "
|
|
54
|
+
r'"current_version": "[0-9\.]+"\}, \{"os": "ubuntu-24\.04", '
|
|
55
55
|
r'"platform_id": "linux", "arch": "x64", "extension": "bin"\}, '
|
|
56
56
|
r'\{"os": "ubuntu-24\.04-arm", "platform_id": "linux", '
|
|
57
57
|
r'"arch": "arm64", "extension": "bin"\}, \{"os": "macos-15", '
|
|
58
58
|
r'"platform_id": "macos", "arch": "arm64", "extension": "bin"\}, '
|
|
59
59
|
r'\{"os": "macos-13", "platform_id": "macos", "arch": "x64", '
|
|
60
|
-
r'"extension": "bin"\}, \{"os": "windows-
|
|
60
|
+
r'"extension": "bin"\}, \{"os": "windows-2025", '
|
|
61
61
|
r'"platform_id": "windows", "arch": "x64", "extension": "exe"\}, '
|
|
62
62
|
r'\{"os": "ubuntu-24\.04", "entry_point": "gha-utils", '
|
|
63
63
|
r'"commit": "[a-z0-9]+", '
|
|
@@ -71,7 +71,7 @@ def test_metadata_github_format():
|
|
|
71
71
|
r'\{"os": "macos-13", "entry_point": "gha-utils", '
|
|
72
72
|
r'"commit": "[a-z0-9]+", '
|
|
73
73
|
r'"bin_name": "gha-utils-macos-x64-build-[a-z0-9]+\.bin"\}, '
|
|
74
|
-
r'\{"os": "windows-
|
|
74
|
+
r'\{"os": "windows-2025", "entry_point": "gha-utils", '
|
|
75
75
|
r'"commit": "[a-z0-9]+", '
|
|
76
76
|
r'"bin_name": "gha-utils-windows-x64-build-[a-z0-9]+\.exe"\}\]\}\n'
|
|
77
77
|
),
|
|
@@ -93,11 +93,10 @@ def test_metadata_plain_format():
|
|
|
93
93
|
r"'is_python_project': True, "
|
|
94
94
|
r"'package_name': 'gha-utils', "
|
|
95
95
|
r"'blacken_docs_params': \("
|
|
96
|
-
r"'--target-version py310', "
|
|
97
96
|
r"'--target-version py311', "
|
|
98
97
|
r"'--target-version py312', "
|
|
99
98
|
r"'--target-version py313'\), "
|
|
100
|
-
r"'mypy_params': '--python-version 3\.
|
|
99
|
+
r"'mypy_params': '--python-version 3\.11', "
|
|
101
100
|
r"'current_version': None, "
|
|
102
101
|
r"'released_version': None, "
|
|
103
102
|
r"'is_sphinx': False, "
|
|
@@ -107,20 +106,20 @@ def test_metadata_plain_format():
|
|
|
107
106
|
r"'release_commits_matrix': None, "
|
|
108
107
|
r"'nuitka_matrix': <Matrix: \{"
|
|
109
108
|
r"'os': \('ubuntu-24\.04', 'ubuntu-24\.04-arm', "
|
|
110
|
-
r"'macos-15', 'macos-13', 'windows-
|
|
109
|
+
r"'macos-15', 'macos-13', 'windows-2025'\), "
|
|
111
110
|
r"'entry_point': \('gha-utils',\), "
|
|
112
111
|
r"'commit': \('[a-z0-9]+',\)\}; "
|
|
113
112
|
r"include=\(\{'entry_point': 'gha-utils', 'cli_id': 'gha-utils', "
|
|
114
113
|
r"'module_id': 'gha_utils\.__main__', 'callable_id': 'main', "
|
|
115
114
|
r"'module_path': 'gha_utils(/|\\\\)__main__\.py'\}, "
|
|
116
115
|
r"\{'commit': '[a-z0-9]+', 'short_sha': '[a-z0-9]+', "
|
|
117
|
-
r"'current_version': '
|
|
116
|
+
r"'current_version': '[0-9\.]+'\}, \{'os': 'ubuntu-24\.04', "
|
|
118
117
|
r"'platform_id': 'linux', 'arch': 'x64', 'extension': 'bin'}, "
|
|
119
118
|
r"{'os': 'ubuntu-24\.04-arm', 'platform_id': 'linux', "
|
|
120
119
|
r"'arch': 'arm64', 'extension': 'bin'\}, \{'os': 'macos-15', "
|
|
121
120
|
r"'platform_id': 'macos', 'arch': 'arm64', 'extension': 'bin'\}, "
|
|
122
121
|
r"\{'os': 'macos-13', 'platform_id': 'macos', 'arch': 'x64', "
|
|
123
|
-
r"'extension': 'bin'\}, \{'os': 'windows-
|
|
122
|
+
r"'extension': 'bin'\}, \{'os': 'windows-2025', 'platform_id': "
|
|
124
123
|
r"'windows', 'arch': 'x64', 'extension': 'exe'\}, "
|
|
125
124
|
r"\{'os': 'ubuntu-24\.04', 'entry_point': 'gha-utils', "
|
|
126
125
|
r"'commit': '[a-z0-9]+', "
|
|
@@ -134,7 +133,7 @@ def test_metadata_plain_format():
|
|
|
134
133
|
r"\{'os': 'macos-13', 'entry_point': 'gha-utils', "
|
|
135
134
|
r"'commit': '[a-z0-9]+', 'bin_name': "
|
|
136
135
|
r"'gha-utils-macos-x64-build-[a-z0-9]+\.bin'\}, "
|
|
137
|
-
r"\{'os': 'windows-
|
|
136
|
+
r"\{'os': 'windows-2025', 'entry_point': 'gha-utils', "
|
|
138
137
|
r"'commit': '[a-z0-9]+', "
|
|
139
138
|
r"'bin_name': 'gha-utils-windows-x64-build-[a-z0-9]+\.exe'\}\); "
|
|
140
139
|
r"exclude=\(\)>\}"
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|