smart-tests-cli 2.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- smart_tests/__init__.py +0 -0
- smart_tests/__main__.py +60 -0
- smart_tests/app.py +67 -0
- smart_tests/args4p/README.md +102 -0
- smart_tests/args4p/__init__.py +13 -0
- smart_tests/args4p/argument.py +45 -0
- smart_tests/args4p/command.py +593 -0
- smart_tests/args4p/converters/__init__.py +75 -0
- smart_tests/args4p/decorators.py +98 -0
- smart_tests/args4p/exceptions.py +12 -0
- smart_tests/args4p/option.py +85 -0
- smart_tests/args4p/parameter.py +84 -0
- smart_tests/args4p/typer/__init__.py +42 -0
- smart_tests/commands/__init__.py +0 -0
- smart_tests/commands/compare/__init__.py +11 -0
- smart_tests/commands/compare/subsets.py +58 -0
- smart_tests/commands/detect_flakes.py +105 -0
- smart_tests/commands/inspect/__init__.py +13 -0
- smart_tests/commands/inspect/model.py +52 -0
- smart_tests/commands/inspect/subset.py +138 -0
- smart_tests/commands/record/__init__.py +19 -0
- smart_tests/commands/record/attachment.py +38 -0
- smart_tests/commands/record/build.py +356 -0
- smart_tests/commands/record/case_event.py +190 -0
- smart_tests/commands/record/commit.py +157 -0
- smart_tests/commands/record/session.py +120 -0
- smart_tests/commands/record/tests.py +498 -0
- smart_tests/commands/stats/__init__.py +11 -0
- smart_tests/commands/stats/test_sessions.py +45 -0
- smart_tests/commands/subset.py +567 -0
- smart_tests/commands/test_path_writer.py +51 -0
- smart_tests/commands/verify.py +153 -0
- smart_tests/jar/exe_deploy.jar +0 -0
- smart_tests/plugins/__init__.py +0 -0
- smart_tests/test_runners/__init__.py +0 -0
- smart_tests/test_runners/adb.py +24 -0
- smart_tests/test_runners/ant.py +35 -0
- smart_tests/test_runners/bazel.py +103 -0
- smart_tests/test_runners/behave.py +62 -0
- smart_tests/test_runners/codeceptjs.py +33 -0
- smart_tests/test_runners/ctest.py +164 -0
- smart_tests/test_runners/cts.py +189 -0
- smart_tests/test_runners/cucumber.py +451 -0
- smart_tests/test_runners/cypress.py +46 -0
- smart_tests/test_runners/dotnet.py +106 -0
- smart_tests/test_runners/file.py +20 -0
- smart_tests/test_runners/flutter.py +251 -0
- smart_tests/test_runners/go_test.py +99 -0
- smart_tests/test_runners/googletest.py +34 -0
- smart_tests/test_runners/gradle.py +96 -0
- smart_tests/test_runners/jest.py +52 -0
- smart_tests/test_runners/maven.py +149 -0
- smart_tests/test_runners/minitest.py +40 -0
- smart_tests/test_runners/nunit.py +190 -0
- smart_tests/test_runners/playwright.py +252 -0
- smart_tests/test_runners/prove.py +74 -0
- smart_tests/test_runners/pytest.py +358 -0
- smart_tests/test_runners/raw.py +238 -0
- smart_tests/test_runners/robot.py +125 -0
- smart_tests/test_runners/rspec.py +5 -0
- smart_tests/test_runners/smart_tests.py +235 -0
- smart_tests/test_runners/vitest.py +49 -0
- smart_tests/test_runners/xctest.py +79 -0
- smart_tests/testpath.py +154 -0
- smart_tests/utils/__init__.py +0 -0
- smart_tests/utils/authentication.py +78 -0
- smart_tests/utils/ci_provider.py +7 -0
- smart_tests/utils/commands.py +14 -0
- smart_tests/utils/commit_ingester.py +59 -0
- smart_tests/utils/common_tz.py +12 -0
- smart_tests/utils/edit_distance.py +11 -0
- smart_tests/utils/env_keys.py +19 -0
- smart_tests/utils/exceptions.py +34 -0
- smart_tests/utils/fail_fast_mode.py +99 -0
- smart_tests/utils/file_name_pattern.py +4 -0
- smart_tests/utils/git_log_parser.py +53 -0
- smart_tests/utils/glob.py +44 -0
- smart_tests/utils/gzipgen.py +46 -0
- smart_tests/utils/http_client.py +169 -0
- smart_tests/utils/java.py +61 -0
- smart_tests/utils/link.py +149 -0
- smart_tests/utils/logger.py +53 -0
- smart_tests/utils/no_build.py +2 -0
- smart_tests/utils/sax.py +119 -0
- smart_tests/utils/session.py +73 -0
- smart_tests/utils/smart_tests_client.py +134 -0
- smart_tests/utils/subprocess.py +12 -0
- smart_tests/utils/tracking.py +95 -0
- smart_tests/utils/typer_types.py +241 -0
- smart_tests/version.py +7 -0
- smart_tests_cli-2.0.0.dist-info/METADATA +168 -0
- smart_tests_cli-2.0.0.dist-info/RECORD +96 -0
- smart_tests_cli-2.0.0.dist-info/WHEEL +5 -0
- smart_tests_cli-2.0.0.dist-info/entry_points.txt +2 -0
- smart_tests_cli-2.0.0.dist-info/licenses/LICENSE.txt +202 -0
- smart_tests_cli-2.0.0.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,138 @@
|
|
|
1
|
+
import json
|
|
2
|
+
import sys
|
|
3
|
+
from abc import ABCMeta, abstractmethod
|
|
4
|
+
from http import HTTPStatus
|
|
5
|
+
from typing import Annotated, List
|
|
6
|
+
|
|
7
|
+
import click
|
|
8
|
+
from tabulate import tabulate
|
|
9
|
+
|
|
10
|
+
import smart_tests.args4p.typer as typer
|
|
11
|
+
|
|
12
|
+
from ... import args4p
|
|
13
|
+
from ...app import Application
|
|
14
|
+
from ...utils.smart_tests_client import SmartTestsClient
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class SubsetResult (object):
|
|
18
|
+
def __init__(self, result: dict, is_subset: bool):
|
|
19
|
+
self._estimated_duration_sec = result.get("duration", 0.0) / 1000 # convert to sec from msec
|
|
20
|
+
self._test_path = "#".join([path["type"] + "=" + path["name"]
|
|
21
|
+
for path in result["testPath"] if path.keys() >= {"type", "name"}])
|
|
22
|
+
self._is_subset = is_subset
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
class SubsetResults(object):
|
|
26
|
+
def __init__(self, results: List[SubsetResult]):
|
|
27
|
+
self._results = results
|
|
28
|
+
|
|
29
|
+
def add_subset(self, subset: List):
|
|
30
|
+
for result in subset:
|
|
31
|
+
self._results.append(SubsetResult(result, True))
|
|
32
|
+
|
|
33
|
+
def add_rest(self, rest: List):
|
|
34
|
+
for result in rest:
|
|
35
|
+
self._results.append(SubsetResult(result, False))
|
|
36
|
+
|
|
37
|
+
def list(self) -> List[SubsetResult]:
|
|
38
|
+
return self.list_subset() + self.list_rest()
|
|
39
|
+
|
|
40
|
+
def list_subset(self) -> List[SubsetResult]:
|
|
41
|
+
return [result for result in self._results if result._is_subset]
|
|
42
|
+
|
|
43
|
+
def list_rest(self) -> List[SubsetResult]:
|
|
44
|
+
return [result for result in self._results if not result._is_subset]
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
class SubsetResultAbstractDisplay(metaclass=ABCMeta):
|
|
48
|
+
def __init__(self, results: SubsetResults):
|
|
49
|
+
self._results = results
|
|
50
|
+
|
|
51
|
+
@abstractmethod
|
|
52
|
+
def display(self):
|
|
53
|
+
raise NotImplementedError("display method is not implemented")
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
class SubsetResultTableDisplay(SubsetResultAbstractDisplay):
|
|
57
|
+
def __init__(self, results: SubsetResults):
|
|
58
|
+
super().__init__(results)
|
|
59
|
+
|
|
60
|
+
def display(self):
|
|
61
|
+
header = ["Order", "Test Path", "In Subset", "Estimated duration (sec)"]
|
|
62
|
+
rows = []
|
|
63
|
+
for idx, result in enumerate(self._results.list()):
|
|
64
|
+
rows.append(
|
|
65
|
+
[
|
|
66
|
+
idx + 1,
|
|
67
|
+
result._test_path,
|
|
68
|
+
"✔" if result._is_subset else "",
|
|
69
|
+
result._estimated_duration_sec,
|
|
70
|
+
]
|
|
71
|
+
)
|
|
72
|
+
click.echo_via_pager(tabulate(rows, header, tablefmt="github", floatfmt=".2f"))
|
|
73
|
+
|
|
74
|
+
|
|
75
|
+
class SubsetResultJSONDisplay(SubsetResultAbstractDisplay):
|
|
76
|
+
def __init__(self, results: SubsetResults):
|
|
77
|
+
super().__init__(results)
|
|
78
|
+
|
|
79
|
+
def display(self):
|
|
80
|
+
result_json = {
|
|
81
|
+
"subset": [],
|
|
82
|
+
"rest": []
|
|
83
|
+
}
|
|
84
|
+
for result in self._results.list_subset():
|
|
85
|
+
result_json["subset"].append({
|
|
86
|
+
"test_path": result._test_path,
|
|
87
|
+
"estimated_duration_sec": round(result._estimated_duration_sec, 2),
|
|
88
|
+
})
|
|
89
|
+
for result in self._results.list_rest():
|
|
90
|
+
result_json["rest"].append({
|
|
91
|
+
"test_path": result._test_path,
|
|
92
|
+
"estimated_duration_sec": round(result._estimated_duration_sec, 2),
|
|
93
|
+
})
|
|
94
|
+
|
|
95
|
+
click.echo(json.dumps(result_json, indent=2))
|
|
96
|
+
|
|
97
|
+
|
|
98
|
+
@args4p.command(help="Inspect subset data")
|
|
99
|
+
def subset(
|
|
100
|
+
app: Application,
|
|
101
|
+
subset_id: Annotated[int, typer.Option(
|
|
102
|
+
help="subset id",
|
|
103
|
+
required=True
|
|
104
|
+
)],
|
|
105
|
+
json: Annotated[bool, typer.Option(
|
|
106
|
+
help="display JSON format"
|
|
107
|
+
)] = False,
|
|
108
|
+
):
|
|
109
|
+
is_json_format = json # Map parameter name
|
|
110
|
+
|
|
111
|
+
subset = []
|
|
112
|
+
rest = []
|
|
113
|
+
client = SmartTestsClient(app=app)
|
|
114
|
+
try:
|
|
115
|
+
res = client.request("get", f"subset/{subset_id}")
|
|
116
|
+
|
|
117
|
+
if res.status_code == HTTPStatus.NOT_FOUND:
|
|
118
|
+
click.secho(
|
|
119
|
+
f"Subset {subset_id} not found. Check subset ID and try again.", fg='yellow', err=True)
|
|
120
|
+
sys.exit(1)
|
|
121
|
+
|
|
122
|
+
res.raise_for_status()
|
|
123
|
+
subset = res.json()["testPaths"]
|
|
124
|
+
rest = res.json()["rest"]
|
|
125
|
+
except Exception as e:
|
|
126
|
+
client.print_exception_and_recover(e, "Warning: failed to inspect subset")
|
|
127
|
+
|
|
128
|
+
results = SubsetResults([])
|
|
129
|
+
results.add_subset(subset)
|
|
130
|
+
results.add_rest(rest)
|
|
131
|
+
|
|
132
|
+
displayer: SubsetResultAbstractDisplay
|
|
133
|
+
if is_json_format:
|
|
134
|
+
displayer = SubsetResultJSONDisplay(results)
|
|
135
|
+
else:
|
|
136
|
+
displayer = SubsetResultTableDisplay(results)
|
|
137
|
+
|
|
138
|
+
displayer.display()
|
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
from ... import args4p
|
|
2
|
+
from ...app import Application
|
|
3
|
+
from .attachment import attachment
|
|
4
|
+
from .build import build
|
|
5
|
+
from .commit import commit
|
|
6
|
+
from .session import session
|
|
7
|
+
from .tests import tests
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
@args4p.group(help="Record test results, builds, commits, and sessions")
|
|
11
|
+
def record(app: Application):
|
|
12
|
+
return app
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
record.add_command(build)
|
|
16
|
+
record.add_command(commit)
|
|
17
|
+
record.add_command(tests)
|
|
18
|
+
record.add_command(session)
|
|
19
|
+
record.add_command(attachment)
|
|
@@ -0,0 +1,38 @@
|
|
|
1
|
+
from typing import Annotated, List
|
|
2
|
+
|
|
3
|
+
import click
|
|
4
|
+
|
|
5
|
+
import smart_tests.args4p.typer as typer
|
|
6
|
+
from smart_tests.utils.session import get_session
|
|
7
|
+
|
|
8
|
+
from ... import args4p
|
|
9
|
+
from ...app import Application
|
|
10
|
+
from ...utils.smart_tests_client import SmartTestsClient
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
@args4p.command(help="Record attachment information")
|
|
14
|
+
def attachment(
|
|
15
|
+
app: Application,
|
|
16
|
+
session: Annotated[str, typer.Option(
|
|
17
|
+
"--session",
|
|
18
|
+
help="test session name",
|
|
19
|
+
required=True
|
|
20
|
+
)],
|
|
21
|
+
attachments: Annotated[List[str], typer.Argument(
|
|
22
|
+
multiple=True,
|
|
23
|
+
help="Attachment files to upload"
|
|
24
|
+
)],
|
|
25
|
+
):
|
|
26
|
+
client = SmartTestsClient(app=app)
|
|
27
|
+
try:
|
|
28
|
+
# Note: Call get_session method to check test session exists
|
|
29
|
+
_ = get_session(session, client)
|
|
30
|
+
for a in attachments:
|
|
31
|
+
click.echo(f"Sending {a}")
|
|
32
|
+
with open(a, mode='rb') as f:
|
|
33
|
+
res = client.request(
|
|
34
|
+
"post", f"{session}/attachment", compress=True, payload=f,
|
|
35
|
+
additional_headers={"Content-Disposition": f"attachment;filename=\"{a}\""})
|
|
36
|
+
res.raise_for_status()
|
|
37
|
+
except Exception as e:
|
|
38
|
+
client.print_exception_and_recover(e)
|
|
@@ -0,0 +1,356 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import re
|
|
3
|
+
import sys
|
|
4
|
+
from typing import Annotated, List
|
|
5
|
+
|
|
6
|
+
import click
|
|
7
|
+
from tabulate import tabulate
|
|
8
|
+
|
|
9
|
+
import smart_tests.args4p.typer as typer
|
|
10
|
+
from smart_tests.commands.record.session import KeyValue, parse_key_value
|
|
11
|
+
from smart_tests.utils.link import CIRCLECI_KEY, GITHUB_ACTIONS_KEY, JENKINS_URL_KEY, capture_links
|
|
12
|
+
from smart_tests.utils.tracking import Tracking, TrackingClient
|
|
13
|
+
|
|
14
|
+
from ... import args4p
|
|
15
|
+
from ...app import Application
|
|
16
|
+
from ...utils import subprocess
|
|
17
|
+
from ...utils.authentication import get_org_workspace
|
|
18
|
+
from ...utils.commands import Command
|
|
19
|
+
from ...utils.fail_fast_mode import set_fail_fast_mode, warn_and_exit_if_fail_fast_mode
|
|
20
|
+
from ...utils.smart_tests_client import SmartTestsClient
|
|
21
|
+
from ...utils.typer_types import validate_datetime_with_tz, validate_key_value, validate_past_datetime
|
|
22
|
+
from .commit import commit
|
|
23
|
+
|
|
24
|
+
JENKINS_GIT_BRANCH_KEY = "GIT_BRANCH"
|
|
25
|
+
JENKINS_GIT_LOCAL_BRANCH_KEY = "GIT_LOCAL_BRANCH"
|
|
26
|
+
GITHUB_ACTIONS_GITHUB_HEAD_REF_KEY = "GITHUB_HEAD_REF"
|
|
27
|
+
GITHUB_ACTIONS_GITHUB_BASE_REF_KEY = "GITHUB_BASE_REF"
|
|
28
|
+
CIRCLECI_CIRCLE_BRANCH_KEY = "CIRCLE_BRANCH"
|
|
29
|
+
CODE_BUILD_BUILD_ID_KEY = "CODEBUILD_BUILD_ID"
|
|
30
|
+
CODE_BUILD_WEBHOOK_HEAD_REF_KEY = "CODEBUILD_WEBHOOK_HEAD_REF"
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
@args4p.command(help="Record build information")
|
|
34
|
+
def build(
|
|
35
|
+
app: Application,
|
|
36
|
+
build_name: Annotated[str, typer.Option(
|
|
37
|
+
"--build",
|
|
38
|
+
help="build name",
|
|
39
|
+
metavar="BUILD_NAME",
|
|
40
|
+
required=True
|
|
41
|
+
)],
|
|
42
|
+
branch: Annotated[str | None, typer.Option(
|
|
43
|
+
"--branch",
|
|
44
|
+
help="Branch name. A branch is a set of test sessions grouped and this option value will be used for a lineage name."
|
|
45
|
+
)] = None,
|
|
46
|
+
repositories: Annotated[List[str], typer.Option(
|
|
47
|
+
"--repo-branch-map",
|
|
48
|
+
multiple=True,
|
|
49
|
+
help="Set repository name and branch name when you use --no-commit-collection option. "
|
|
50
|
+
"Please use the same repository name with a commit option"
|
|
51
|
+
)] = [],
|
|
52
|
+
source: Annotated[List[str], typer.Option(
|
|
53
|
+
multiple=True,
|
|
54
|
+
help="path to local Git workspace, optionally prefixed by a label. "
|
|
55
|
+
"like --source path/to/ws or --source main=path/to/ws",
|
|
56
|
+
metavar="REPO_NAME"
|
|
57
|
+
)] = ["."],
|
|
58
|
+
max_days: Annotated[int, typer.Option(
|
|
59
|
+
help="the maximum number of days to collect commits retroactively"
|
|
60
|
+
)] = 30,
|
|
61
|
+
no_submodules: Annotated[bool, typer.Option(
|
|
62
|
+
help="stop collecting information from Git Submodules"
|
|
63
|
+
)] = False,
|
|
64
|
+
no_commit_collection: Annotated[bool, typer.Option(
|
|
65
|
+
help="do not collect commit data. "
|
|
66
|
+
"This is useful if the repository is a shallow clone and the RevWalk is not "
|
|
67
|
+
"possible. The commit data must be collected with a separate fully-cloned "
|
|
68
|
+
"repository."
|
|
69
|
+
)] = False,
|
|
70
|
+
commits: Annotated[List[str], typer.Option(
|
|
71
|
+
"--commit",
|
|
72
|
+
multiple=True,
|
|
73
|
+
help="set repository name and commit hash when you use --no-commit-collection option"
|
|
74
|
+
)] = [],
|
|
75
|
+
timestamp: Annotated[str | None, typer.Option(
|
|
76
|
+
help="Used to overwrite the build time when importing historical data. "
|
|
77
|
+
"Note: Format must be `YYYY-MM-DDThh:mm:ssTZD` or `YYYY-MM-DDThh:mm:ss` (local timezone applied)"
|
|
78
|
+
)] = None,
|
|
79
|
+
links: Annotated[List[KeyValue], typer.Option(
|
|
80
|
+
"--link",
|
|
81
|
+
multiple=True,
|
|
82
|
+
help="Set external link of a title and url",
|
|
83
|
+
type=parse_key_value,
|
|
84
|
+
)] = [],
|
|
85
|
+
):
|
|
86
|
+
|
|
87
|
+
# Parse key-value pairs for commits
|
|
88
|
+
parsed_commits = [validate_key_value(c) for c in commits]
|
|
89
|
+
|
|
90
|
+
# Parse timestamp if provided
|
|
91
|
+
parsed_timestamp = None
|
|
92
|
+
if timestamp:
|
|
93
|
+
parsed_timestamp = validate_past_datetime(validate_datetime_with_tz(timestamp))
|
|
94
|
+
|
|
95
|
+
tracking_client = TrackingClient(Command.RECORD_BUILD, app=app)
|
|
96
|
+
client = SmartTestsClient(app=app, tracking_client=tracking_client)
|
|
97
|
+
set_fail_fast_mode(client.is_fail_fast_mode())
|
|
98
|
+
|
|
99
|
+
if "/" in build_name or "%2f" in build_name.lower():
|
|
100
|
+
click.echo("--build must not contain a slash and an encoded slash", err=True)
|
|
101
|
+
raise typer.Exit(1)
|
|
102
|
+
if "%25" in build_name:
|
|
103
|
+
click.echo("--build must not contain encoded % (%25)", err=True)
|
|
104
|
+
raise typer.Exit(1)
|
|
105
|
+
if not no_commit_collection and len(parsed_commits) != 0:
|
|
106
|
+
click.echo("--no-commit-collection must be specified when --commit is used", err=True)
|
|
107
|
+
raise typer.Exit(1)
|
|
108
|
+
if not no_commit_collection and len(repositories) != 0:
|
|
109
|
+
click.echo("--no-commit-collection must be specified when --repo-branch-map is used", err=True)
|
|
110
|
+
raise typer.Exit(1)
|
|
111
|
+
|
|
112
|
+
# Information we want to collect for each Git repository
|
|
113
|
+
# The key data structure throughout the implementation of this command
|
|
114
|
+
|
|
115
|
+
class Workspace:
|
|
116
|
+
# identifier given to a Git repository to track the same repository from one 'record build' to next
|
|
117
|
+
name: str
|
|
118
|
+
# path to the Git workspace. Can be None if there's no local workspace present
|
|
119
|
+
dir: str
|
|
120
|
+
# current branch of this workspace
|
|
121
|
+
branch: str | None = None
|
|
122
|
+
# SHA1 commit hash that's currently checked out
|
|
123
|
+
commit_hash: str
|
|
124
|
+
|
|
125
|
+
def __init__(self, name, dir=None, commit_hash=None):
|
|
126
|
+
self.name = name
|
|
127
|
+
self.dir = dir
|
|
128
|
+
self.commit_hash = commit_hash
|
|
129
|
+
|
|
130
|
+
def calc_branch_name(self):
|
|
131
|
+
'''
|
|
132
|
+
figure out the branch using the workspace. requires `dir` and `commit_hash` to be set.
|
|
133
|
+
'''
|
|
134
|
+
|
|
135
|
+
# Jenkins
|
|
136
|
+
# ref:
|
|
137
|
+
# https://www.theserverside.com/blog/Coffee-Talk-Java-News-Stories-and-Opinions/Complete-Jenkins-Git-environment-variables-list-for-batch-jobs-and-shell-script-builds
|
|
138
|
+
if os.environ.get(JENKINS_URL_KEY):
|
|
139
|
+
self.branch = os.environ.get(JENKINS_GIT_BRANCH_KEY) or os.environ.get(JENKINS_GIT_LOCAL_BRANCH_KEY)
|
|
140
|
+
|
|
141
|
+
# Github Actions
|
|
142
|
+
# ref: https://docs.github.com/en/actions/learn-github-actions/variables#default-environment-variables
|
|
143
|
+
# These environment variables cannot be retrieved when a `push` event is emitted.
|
|
144
|
+
# Here is a note regarding the output of `git show-ref`:
|
|
145
|
+
# - Git tag is pushed during a `push` event
|
|
146
|
+
# => ed6de84bde58d51deebe90e01ddfa5fa78899b1c refs/tags/tag-name
|
|
147
|
+
# - Git commit is pushed during a `push` event
|
|
148
|
+
# => ed6de84bde58d51deebe90e01ddfa5fa78899b1c refs/heads/branch/branch-name
|
|
149
|
+
if os.environ.get(GITHUB_ACTIONS_KEY):
|
|
150
|
+
self.branch = os.environ.get(GITHUB_ACTIONS_GITHUB_HEAD_REF_KEY) or \
|
|
151
|
+
os.environ.get(GITHUB_ACTIONS_GITHUB_BASE_REF_KEY)
|
|
152
|
+
|
|
153
|
+
# CircleCI
|
|
154
|
+
# ref: https://circleci.com/docs/variables/
|
|
155
|
+
if os.environ.get(CIRCLECI_KEY):
|
|
156
|
+
self.branch = os.environ.get(CIRCLECI_CIRCLE_BRANCH_KEY)
|
|
157
|
+
# AWS CodeBuild
|
|
158
|
+
# ref: https://docs.aws.amazon.com/codebuild/latest/userguide/build-env-ref-env-vars.html
|
|
159
|
+
if os.environ.get(CODE_BUILD_BUILD_ID_KEY):
|
|
160
|
+
v = os.environ.get(CODE_BUILD_WEBHOOK_HEAD_REF_KEY)
|
|
161
|
+
if v:
|
|
162
|
+
# refs/head/<branch name>
|
|
163
|
+
self.branch = v.split("/")[-1]
|
|
164
|
+
|
|
165
|
+
if self.branch:
|
|
166
|
+
return # if we've figured this out, great
|
|
167
|
+
|
|
168
|
+
try:
|
|
169
|
+
show_ref = subprocess.check_output(["git", "show-ref"], cwd=self.dir).decode()
|
|
170
|
+
refs = [ref for ref in show_ref.split("\n") if self.commit_hash in ref]
|
|
171
|
+
|
|
172
|
+
if len(refs) > 0:
|
|
173
|
+
# We assume the following values:
|
|
174
|
+
# * ed6de84bde58d51deebe90e01ddfa5fa78899b1c refs/heads/branch/branch-name
|
|
175
|
+
# * ed6de84bde58d51deebe90e01ddfa5fa78899b1c refs/remotes/origin/branch-name
|
|
176
|
+
match = re.search('[a-f0-9]{40} refs/(heads|remotes/origin)/(.*)', refs[0])
|
|
177
|
+
if match:
|
|
178
|
+
self.branch = match.group(2)
|
|
179
|
+
else:
|
|
180
|
+
self.branch = refs[0].split("/")[-1]
|
|
181
|
+
except Exception:
|
|
182
|
+
# cannot get branch name by git command
|
|
183
|
+
pass
|
|
184
|
+
|
|
185
|
+
# The first order of business is to ascertain what Git repositories we have in the workspace
|
|
186
|
+
def list_sources() -> List[Workspace]:
|
|
187
|
+
# This command accepts REPO_NAME=REPO_DIR as well as just REPO_DIR
|
|
188
|
+
pattern = re.compile(r'[^=]+=[^=]+')
|
|
189
|
+
ws: List[Workspace] = []
|
|
190
|
+
for s in source:
|
|
191
|
+
if pattern.match(s):
|
|
192
|
+
kv = s.split('=')
|
|
193
|
+
ws.append(Workspace(name=kv[0], dir=kv[1]))
|
|
194
|
+
else:
|
|
195
|
+
ws.append(Workspace(name=s, dir=s))
|
|
196
|
+
# TODO: if repo_dir is absolute path, warn the user that that's probably
|
|
197
|
+
# not what they want to do
|
|
198
|
+
return ws
|
|
199
|
+
|
|
200
|
+
# `record commit` on each top-level (= non submodule) Git repository
|
|
201
|
+
# `record commit` command processes Git submodule on its own,
|
|
202
|
+
# so we need to do this between list_sources and list_submodules
|
|
203
|
+
def collect_commits():
|
|
204
|
+
if not no_commit_collection:
|
|
205
|
+
for w in ws:
|
|
206
|
+
commit.callback(app, name=w.name, source=w.dir, max_days=max_days)
|
|
207
|
+
else:
|
|
208
|
+
click.secho(
|
|
209
|
+
"Warning: Commit collection is turned off. The commit data must be collected separately.",
|
|
210
|
+
fg='yellow', err=True)
|
|
211
|
+
|
|
212
|
+
# tally up all the submodules, unless we are told not to
|
|
213
|
+
def list_submodules(workspaces: List[Workspace]) -> List[Workspace]:
|
|
214
|
+
if no_submodules:
|
|
215
|
+
return workspaces
|
|
216
|
+
|
|
217
|
+
r = workspaces.copy()
|
|
218
|
+
for w in workspaces:
|
|
219
|
+
submodule_pattern = re.compile(r"^[\+\-U ](?P<hash>[a-f0-9]{40}) (?P<name>\S+)")
|
|
220
|
+
|
|
221
|
+
# invoke git directly because dulwich's submodule feature was broken
|
|
222
|
+
submodule_stdouts = subprocess.check_output("git submodule status --recursive".split(),
|
|
223
|
+
cwd=w.dir).decode().splitlines()
|
|
224
|
+
for submodule_stdout in submodule_stdouts:
|
|
225
|
+
# the output is e.g.
|
|
226
|
+
# "+bbf213437a65e82dd6dda4391ecc5d598200a6ce sub1 (heads/master)"
|
|
227
|
+
matched = submodule_pattern.search(submodule_stdout)
|
|
228
|
+
if matched:
|
|
229
|
+
commit_hash = matched.group('hash')
|
|
230
|
+
name = matched.group('name')
|
|
231
|
+
if commit_hash and name:
|
|
232
|
+
r.append(Workspace(
|
|
233
|
+
name=w.name + "/" + name,
|
|
234
|
+
dir=w.dir + "/" + name,
|
|
235
|
+
commit_hash=commit_hash))
|
|
236
|
+
return r
|
|
237
|
+
|
|
238
|
+
# figure out the commit hash and branch of those workspaces
|
|
239
|
+
def compute_hash_and_branch(ws: List[Workspace]):
|
|
240
|
+
ws_by_name = {w.name: w for w in ws}
|
|
241
|
+
|
|
242
|
+
# Process repository options to create branch name mappings
|
|
243
|
+
branch_name_map = dict()
|
|
244
|
+
if len(repositories) == 1 and len(ws) == 1 and not ('=' in repositories[0]):
|
|
245
|
+
# if there's only one repo and the short form "--repository NAME" is used, then we assign that to the first repo
|
|
246
|
+
branch_name_map[ws[0].name] = repositories[0]
|
|
247
|
+
else:
|
|
248
|
+
for r in repositories:
|
|
249
|
+
kv = r.split('=')
|
|
250
|
+
if len(kv) != 2:
|
|
251
|
+
click.secho(
|
|
252
|
+
f"Expected --repo-branch-map REPO=BRANCHNAME but got {kv}",
|
|
253
|
+
fg='yellow', err=True)
|
|
254
|
+
raise typer.Exit(1)
|
|
255
|
+
|
|
256
|
+
if not ws_by_name.get(kv[0]):
|
|
257
|
+
warn_and_exit_if_fail_fast_mode("Invalid repository name {repo} in a --branch option.\nThe repository “{repo}” is not specified via `--source` or `--commit` option.".format(repo=kv[0])) # noqa: E501
|
|
258
|
+
|
|
259
|
+
branch_name_map[kv[0]] = kv[1]
|
|
260
|
+
|
|
261
|
+
for w in ws:
|
|
262
|
+
try:
|
|
263
|
+
if not w.commit_hash:
|
|
264
|
+
w.commit_hash = subprocess.check_output("git rev-parse HEAD".split(), cwd=w.dir).decode().replace("\n", "")
|
|
265
|
+
except Exception as e:
|
|
266
|
+
click.secho(
|
|
267
|
+
"Can't get commit hash for {}. Do you run command under git-controlled directory? "
|
|
268
|
+
"If not, please set a directory use by --source option.",
|
|
269
|
+
fg='yellow', err=True)
|
|
270
|
+
print(e, file=sys.stderr)
|
|
271
|
+
raise typer.Exit(1)
|
|
272
|
+
if w.name in branch_name_map:
|
|
273
|
+
w.branch = branch_name_map[w.name]
|
|
274
|
+
else:
|
|
275
|
+
w.calc_branch_name()
|
|
276
|
+
|
|
277
|
+
# Rely on --commit to create a list of workspaces, even when there's no local Git workspaces
|
|
278
|
+
def synthesize_workspaces() -> List[Workspace]:
|
|
279
|
+
ws = []
|
|
280
|
+
|
|
281
|
+
commit_pattern = re.compile("[0-9A-Fa-f]{5,40}$")
|
|
282
|
+
|
|
283
|
+
for name, hash in parsed_commits:
|
|
284
|
+
if not commit_pattern.match(hash):
|
|
285
|
+
click.secho(
|
|
286
|
+
f"{name}'s commit hash `{hash}` is invalid.",
|
|
287
|
+
fg='yellow', err=True)
|
|
288
|
+
raise typer.Exit(1)
|
|
289
|
+
|
|
290
|
+
ws.append(Workspace(name=name, commit_hash=hash))
|
|
291
|
+
|
|
292
|
+
return ws
|
|
293
|
+
|
|
294
|
+
# send all the data to server and obtain build_id, or none if the service is down, to recover
|
|
295
|
+
def send(ws: List[Workspace]) -> str | None:
|
|
296
|
+
# TODO(Konboi): port forward #1128
|
|
297
|
+
# figure out all the CI links to capture
|
|
298
|
+
def compute_links():
|
|
299
|
+
return capture_links(link_options=links, env=os.environ)
|
|
300
|
+
|
|
301
|
+
try:
|
|
302
|
+
lineage = branch or ws[0].branch
|
|
303
|
+
if lineage is None:
|
|
304
|
+
click.echo("Unable to determine branch name. Please specify --branch option.", err=True)
|
|
305
|
+
raise typer.Exit(1)
|
|
306
|
+
|
|
307
|
+
payload = {
|
|
308
|
+
"buildNumber": build_name,
|
|
309
|
+
"lineage": lineage,
|
|
310
|
+
"commitHashes": [{
|
|
311
|
+
'repositoryName': w.name,
|
|
312
|
+
'commitHash': w.commit_hash,
|
|
313
|
+
'branchName': w.branch or ""
|
|
314
|
+
} for w in ws],
|
|
315
|
+
"links": compute_links(),
|
|
316
|
+
"timestamp": parsed_timestamp.isoformat() if parsed_timestamp else None,
|
|
317
|
+
}
|
|
318
|
+
|
|
319
|
+
res = client.request("post", "builds", payload=payload)
|
|
320
|
+
res.raise_for_status()
|
|
321
|
+
|
|
322
|
+
return res.json().get("id", None)
|
|
323
|
+
except Exception as e:
|
|
324
|
+
tracking_client.send_error_event(
|
|
325
|
+
event_name=Tracking.ErrorEvent.INTERNAL_CLI_ERROR,
|
|
326
|
+
stack_trace=str(e),
|
|
327
|
+
)
|
|
328
|
+
client.print_exception_and_recover(e)
|
|
329
|
+
return None
|
|
330
|
+
|
|
331
|
+
# report what we did to the user to assist diagnostics
|
|
332
|
+
def report(ws: List[Workspace], build_id: str):
|
|
333
|
+
org, workspace = get_org_workspace()
|
|
334
|
+
click.echo(
|
|
335
|
+
f"Launchable recorded build {build_name} to workspace {org} / {workspace} with commits from "
|
|
336
|
+
f"{len(ws)} {'repositories' if len(ws) > 1 else 'repository'}: \n")
|
|
337
|
+
|
|
338
|
+
header = ["Name", "Path", "HEAD Commit"]
|
|
339
|
+
rows = [[w.name, w.dir, w.commit_hash] for w in ws]
|
|
340
|
+
click.echo(tabulate(rows, header, tablefmt="github"))
|
|
341
|
+
click.echo(
|
|
342
|
+
f"\nVisit https://app.launchableinc.com/organizations/{org}/workspaces/"
|
|
343
|
+
f"{workspace}/data/builds/{build_id} to view this build and its test sessions")
|
|
344
|
+
|
|
345
|
+
# all the logics at the high level
|
|
346
|
+
if len(commits) == 0:
|
|
347
|
+
ws = list_sources()
|
|
348
|
+
collect_commits()
|
|
349
|
+
ws = list_submodules(ws)
|
|
350
|
+
else:
|
|
351
|
+
ws = synthesize_workspaces()
|
|
352
|
+
compute_hash_and_branch(ws)
|
|
353
|
+
build_id = send(ws)
|
|
354
|
+
if not build_id:
|
|
355
|
+
return # recover from service outage gracefully
|
|
356
|
+
report(ws, build_id)
|