atlas-init 0.3.1__py3-none-any.whl → 0.3.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
atlas_init/__init__.py CHANGED
@@ -1,6 +1,6 @@
1
1
  from pathlib import Path
2
2
 
3
- VERSION = "0.3.1"
3
+ VERSION = "0.3.3"
4
4
 
5
5
 
6
6
  def running_in_repo() -> bool:
@@ -6,6 +6,10 @@ test_suites:
6
6
  repo_go_packages:
7
7
  cfn:
8
8
  - cfn-resources/cluster
9
+ - name: cluster_sdk
10
+ repo_go_packages:
11
+ tf:
12
+ - internal/service/advancedcluster
9
13
  - name: clusterm10
10
14
  vars:
11
15
  cluster_info_m10: true
@@ -56,6 +60,8 @@ test_suites:
56
60
  repo_go_packages:
57
61
  cfn:
58
62
  - cfn-resources/resource-policy
63
+ tf:
64
+ - internal/service/resourcepolicy
59
65
  - name: s3
60
66
  repo_go_packages:
61
67
  tf:
atlas_init/cli.py CHANGED
@@ -9,7 +9,6 @@ from model_lib import dump, parse_payload
9
9
  from zero_3rdparty.file_utils import iter_paths
10
10
 
11
11
  from atlas_init.cli_helper import sdk_auto_changes
12
- from atlas_init.cli_helper.go import run_go_tests
13
12
  from atlas_init.cli_helper.run import (
14
13
  run_binary_command_is_ok,
15
14
  run_command_exit_on_failure,
@@ -116,23 +115,6 @@ def destroy(context: typer.Context):
116
115
  return
117
116
 
118
117
 
119
- @app_command()
120
- def test_go():
121
- settings = init_settings()
122
- suites = active_suites(settings)
123
- sorted_suites = sorted(suite.name for suite in suites)
124
- logger.info(f"running go tests for {len(suites)} test-suites: {sorted_suites}")
125
- match repo_alias := current_repo():
126
- case Repo.CFN:
127
- raise NotImplementedError
128
- case Repo.TF:
129
- repo_path = current_repo_path()
130
- package_prefix = settings.config.go_package_prefix(repo_alias)
131
- run_go_tests(repo_path, repo_alias, package_prefix, settings, suites)
132
- case _:
133
- raise NotImplementedError
134
-
135
-
136
118
  @app_command()
137
119
  def sdk_upgrade(
138
120
  old: SdkVersion = typer.Argument(help=SDK_VERSION_HELP),
@@ -1,34 +1,214 @@
1
1
  import logging
2
2
  import os
3
+ from concurrent.futures import ThreadPoolExecutor, wait
4
+ from enum import StrEnum
3
5
  from pathlib import Path
4
6
 
5
- from atlas_init.cli_helper.run import run_command_is_ok
7
+ from model_lib import Entity
8
+ from pydantic import Field
9
+
10
+ from atlas_init.cli_helper.run import run_command_is_ok_output
11
+ from atlas_init.cli_tf.go_test_run import (
12
+ GoTestContext,
13
+ GoTestContextStep,
14
+ GoTestRun,
15
+ parse,
16
+ )
6
17
  from atlas_init.settings.config import TestSuite
7
18
  from atlas_init.settings.env_vars import AtlasInitSettings
19
+ from atlas_init.settings.path import DEFAULT_DOWNLOADS_DIR
8
20
 
9
21
  logger = logging.getLogger(__name__)
10
22
 
11
23
 
24
+ class GoTestMode(StrEnum):
25
+ package = "package"
26
+ individual = "individual"
27
+
28
+
29
+ class GoEnvVars(StrEnum):
30
+ manual = "manual"
31
+ vscode = "vscode"
32
+
33
+
34
+ class GoTestResult(Entity):
35
+ runs: dict[str, list[GoTestRun]] = Field(default_factory=dict)
36
+ failure_names: set[str] = Field(default_factory=set)
37
+
38
+ test_name_package_path: dict[str, Path] = Field(default_factory=dict)
39
+
40
+ def add_test_package_path(self, test_name: str, package_path: Path):
41
+ if old_path := self.test_name_package_path.get(test_name):
42
+ logger.warning(f"overwriting test_name={test_name} with package_path={old_path} --> {package_path}")
43
+ self.test_name_package_path[test_name] = package_path
44
+
45
+ def add_test_results_all_pass(self, test_name: str, test_results: list[GoTestRun]) -> bool:
46
+ prev_test_results = self.runs.setdefault(test_name, [])
47
+ if prev_test_results:
48
+ logger.warning(f"2nd time test results for {test_name}")
49
+ for result in test_results:
50
+ log_path = _log_path(test_name)
51
+ result.log_path = log_path
52
+ prev_test_results.extend(test_results)
53
+ return all(run.is_pass for run in test_results)
54
+
55
+
12
56
  def run_go_tests(
13
57
  repo_path: Path,
14
58
  repo_alias: str,
15
59
  package_prefix: str,
16
60
  settings: AtlasInitSettings,
17
61
  groups: list[TestSuite],
18
- ):
19
- extra_vars = settings.load_env_vars(settings.env_vars_vs_code)
20
- logger.info(f"go test env-vars-extra: {sorted(extra_vars)}")
21
- test_env = os.environ | extra_vars
22
- ci_value = test_env.pop("CI", None)
23
- if ci_value:
62
+ mode: GoTestMode = GoTestMode.package,
63
+ *,
64
+ dry_run: bool = False,
65
+ timeout_minutes: int = 300,
66
+ concurrent_runs: int = 20,
67
+ re_run: bool = False,
68
+ env_vars: GoEnvVars = GoEnvVars.vscode,
69
+ ) -> GoTestResult:
70
+ test_env = _resolve_env_vars(settings, env_vars)
71
+ if ci_value := test_env.pop("CI", None):
24
72
  logger.warning(f"pooped CI={ci_value}")
73
+ results = GoTestResult()
74
+ commands_to_run: dict[str, str] = {}
25
75
  for group in groups:
26
- packages = ",".join(f"{package_prefix}/{pkg}" for pkg in group.repo_go_packages.get(repo_alias, []))
76
+ package_paths = group.repo_go_packages.get(repo_alias, [])
77
+ packages = ",".join(f"{package_prefix}/{pkg}" for pkg in package_paths)
27
78
  if not packages:
28
79
  logger.warning(f"no go packages for suite: {group}")
29
80
  continue
30
- command = f"go test {packages} -v -run ^TestAcc* -timeout 300m".split(" ")
31
- if not group.sequential_tests:
32
- command.extend(["-parallel", "20"])
33
- is_ok = run_command_is_ok(command, test_env, cwd=repo_path, logger=logger)
34
- assert is_ok, f"go tests failed for {group}"
81
+ if mode == GoTestMode.individual:
82
+ test_names = find_individual_tests(repo_path, package_paths)
83
+ for name, pkg_path in test_names.items():
84
+ results.add_test_package_path(name, pkg_path)
85
+ commands_to_run[name] = f"go test {packages} -v -run ^{name}$ -timeout {timeout_minutes}m"
86
+ elif mode == GoTestMode.package:
87
+ command = f"go test {packages} -v -run ^TestAcc* -timeout {timeout_minutes}m"
88
+ if not group.sequential_tests:
89
+ command = f"{command} -parallel {concurrent_runs}"
90
+ commands_to_run[group.name] = command
91
+ else:
92
+ raise NotImplementedError(f"mode={mode}")
93
+ commands_str = "\n".join(f"'{name}': '{command}'" for name, command in sorted(commands_to_run.items()))
94
+ logger.info(f"will run the following commands:\n{commands_str}")
95
+ if dry_run:
96
+ return results
97
+ if not commands_to_run:
98
+ logger.warning("no tests to run!")
99
+ return results
100
+ return _run_tests(
101
+ results,
102
+ repo_path,
103
+ commands_to_run,
104
+ test_env,
105
+ test_timeout_s=timeout_minutes * 60,
106
+ max_workers=concurrent_runs,
107
+ re_run=re_run,
108
+ )
109
+
110
+
111
+ def _resolve_env_vars(settings: AtlasInitSettings, env_vars: GoEnvVars) -> dict[str, str]:
112
+ if env_vars == GoEnvVars.manual:
113
+ extra_vars = settings.load_profile_manual_env_vars(skip_os_update=True)
114
+ elif env_vars == GoEnvVars.vscode:
115
+ extra_vars = settings.load_env_vars(settings.env_vars_vs_code)
116
+ else:
117
+ raise NotImplementedError(f"don't know how to load env_vars={env_vars}")
118
+ test_env = os.environ | extra_vars | {"TF_ACC": "1", "TF_LOG": "DEBUG"}
119
+ logger.info(f"go test env-vars-extra: {sorted(extra_vars)}")
120
+ return test_env
121
+
122
+
123
+ def find_individual_tests(repo_path: Path, package_paths: list[str]) -> dict[str, Path]:
124
+ tests = {}
125
+ for package_path in package_paths:
126
+ package_abs_path = repo_path / package_path.lstrip(".").lstrip("/")
127
+ for go_file in package_abs_path.glob("*.go"):
128
+ with go_file.open() as f:
129
+ for line in f:
130
+ if line.startswith("func TestAcc"):
131
+ test_name = line.split("(")[0].strip().removeprefix("func ")
132
+ tests[test_name] = package_abs_path
133
+ return tests
134
+
135
+
136
+ def _run_tests(
137
+ results: GoTestResult,
138
+ repo_path: Path,
139
+ commands_to_run: dict[str, str],
140
+ test_env: dict[str, str],
141
+ test_timeout_s: int = 301 * 60,
142
+ max_workers: int = 2,
143
+ *,
144
+ re_run: bool = False,
145
+ ) -> GoTestResult:
146
+ futures = {}
147
+ actual_workers = min(max_workers, len(commands_to_run)) or 1
148
+ with ThreadPoolExecutor(max_workers=actual_workers) as pool:
149
+ for name, command in sorted(commands_to_run.items()):
150
+ log_path = _log_path(name)
151
+ if log_path.exists() and log_path.read_text() and not re_run:
152
+ logger.info(f"skipping {name} because log exists")
153
+ continue
154
+ command_env = {**test_env, "TF_LOG_PATH": str(log_path)}
155
+ future = pool.submit(
156
+ run_command_is_ok_output,
157
+ command=command,
158
+ env=command_env,
159
+ cwd=repo_path,
160
+ logger=logger,
161
+ )
162
+ futures[future] = name
163
+ done, not_done = wait(futures.keys(), timeout=test_timeout_s)
164
+ for f in not_done:
165
+ logger.warning(f"timeout to run command name = {futures[f]}")
166
+ for f in done:
167
+ name: str = futures[f]
168
+ try:
169
+ ok, command_out = f.result()
170
+ except Exception:
171
+ logger.exception(f"failed to run command for {name}")
172
+ results.failure_names.add(name)
173
+ continue
174
+ context = GoTestContext(
175
+ name=name,
176
+ html_url=f"file://{_log_path(name)}",
177
+ steps=[GoTestContextStep(name="local-run")],
178
+ )
179
+ try:
180
+ parsed_tests = list(parse(command_out.splitlines(), context, test_step_nr=0))
181
+ except Exception:
182
+ logger.exception(f"failed to parse tests for {name}")
183
+ results.failure_names.add(name)
184
+ continue
185
+ if not parsed_tests and not ok:
186
+ results.failure_names.add(name)
187
+ logger.error(f"failed to run tests for {name}: {command_out}")
188
+ continue
189
+ if not parsed_tests:
190
+ logger.warning(f"failed to parse tests for {name}: {command_out}")
191
+ continue
192
+ if not ok:
193
+ logger.warning(f"failing tests for {name}: {command_out}")
194
+ if not results.add_test_results_all_pass(name, parsed_tests):
195
+ results.failure_names.add(name)
196
+ if failure_names := results.failure_names:
197
+ move_failed_logs_to_error_dir(failure_names)
198
+ logger.error(f"failed to run tests: {sorted(failure_names)}")
199
+ return results
200
+
201
+
202
+ def move_failed_logs_to_error_dir(failures: set[str]):
203
+ error_dir = DEFAULT_DOWNLOADS_DIR / "failures"
204
+ for log in DEFAULT_DOWNLOADS_DIR.glob("*.log"):
205
+ if log.stem in failures:
206
+ text = log.read_text()
207
+ assert "\n" in text
208
+ first_line = text.split("\n", maxsplit=1)[0]
209
+ ts = first_line.split(" ")[0]
210
+ log.rename(error_dir / f"{ts}.{log.name}")
211
+
212
+
213
+ def _log_path(name: str) -> Path:
214
+ return DEFAULT_DOWNLOADS_DIR / f"{name}.log"
@@ -8,6 +8,7 @@ from tempfile import TemporaryDirectory
8
8
  from typing import IO, TypeVar
9
9
 
10
10
  import typer
11
+ from zero_3rdparty.id_creator import simple_id
11
12
 
12
13
  StrT = TypeVar("StrT", bound=str)
13
14
 
@@ -94,6 +95,15 @@ def run_command_receive_result(
94
95
  return output_text
95
96
 
96
97
 
98
+ def run_command_is_ok_output(command: str, cwd: Path, logger: Logger, env: dict | None = None) -> tuple[bool, str]:
99
+ with TemporaryDirectory() as temp_dir:
100
+ result_file = Path(temp_dir) / f"{simple_id()}.txt"
101
+ with open(result_file, "w") as file:
102
+ is_ok = run_command_is_ok(command.split(), env=env, cwd=cwd, logger=logger, output=file)
103
+ output_text = result_file.read_text().strip()
104
+ return is_ok, output_text
105
+
106
+
97
107
  def add_to_clipboard(clipboard_content: str, logger: Logger):
98
108
  if pb_binary := find_binary_on_path("pbcopy", logger, allow_missing=True):
99
109
  subprocess.run(pb_binary, text=True, input=clipboard_content, check=True)
@@ -0,0 +1,88 @@
1
+ import logging
2
+
3
+ import typer
4
+
5
+ from atlas_init.cli_helper.go import GoEnvVars, GoTestMode, GoTestResult, run_go_tests
6
+ from atlas_init.cli_tf.mock_tf_log import MockTFLog, mock_tf_log, resolve_admin_api_path
7
+ from atlas_init.repos.path import Repo, current_repo, current_repo_path
8
+ from atlas_init.settings.env_vars import active_suites, init_settings
9
+ from atlas_init.typer_app import app_command
10
+
11
+ logger = logging.getLogger(__name__)
12
+
13
+
14
+ @app_command()
15
+ def go_test(
16
+ mode: GoTestMode = typer.Option("package", "-m", "--mode", help="package|individual"),
17
+ dry_run: bool = typer.Option(False, help="only log out the commands to be run"),
18
+ timeout_minutes: int = typer.Option(300, "-t", "--timeout", help="timeout in minutes"),
19
+ concurrent_runs: int = typer.Option(20, "-c", "--concurrent", help="number of concurrent runs"),
20
+ re_run: bool = typer.Option(False, "-r", "--re-run", help="re-run the tests if the log already exist"),
21
+ export_mock_tf_log: bool = typer.Option(False, "-e", "--export", help="export the mock-tf-log"),
22
+ env_method: GoEnvVars = typer.Option(GoEnvVars.manual, "--env", help="|".join(list(GoEnvVars))),
23
+ ):
24
+ if export_mock_tf_log and mode != GoTestMode.individual:
25
+ err_msg = "exporting mock-tf-log is only supported for individual tests"
26
+ raise ValueError(err_msg)
27
+ settings = init_settings()
28
+ suites = active_suites(settings)
29
+ sorted_suites = sorted(suite.name for suite in suites)
30
+ logger.info(f"running go tests for {len(suites)} test-suites: {sorted_suites}")
31
+ results: GoTestResult | None = None
32
+ match repo_alias := current_repo():
33
+ case Repo.CFN:
34
+ raise NotImplementedError
35
+ case Repo.TF:
36
+ repo_path = current_repo_path()
37
+ package_prefix = settings.config.go_package_prefix(repo_alias)
38
+ results = run_go_tests(
39
+ repo_path,
40
+ repo_alias,
41
+ package_prefix,
42
+ settings,
43
+ suites,
44
+ mode,
45
+ dry_run=dry_run,
46
+ timeout_minutes=timeout_minutes,
47
+ concurrent_runs=concurrent_runs,
48
+ re_run=re_run,
49
+ env_vars=env_method,
50
+ )
51
+ case _:
52
+ raise NotImplementedError
53
+ if results is None:
54
+ error_msg = "no results found"
55
+ raise ValueError(error_msg)
56
+ if export_mock_tf_log:
57
+ _export_mock_tf_logs(results)
58
+ # use the test_results: dict[str, list[GoTestRun]]
59
+ # TODO: create_detailed_summary()
60
+
61
+
62
+ def _export_mock_tf_logs(results: GoTestResult):
63
+ package_paths = results.test_name_package_path
64
+ admin_api_path = resolve_admin_api_path("", sdk_branch="main", admin_api_path="")
65
+ for test_name, runs in results.runs.items():
66
+ package_path = package_paths.get(test_name)
67
+ if package_path is None:
68
+ logger.warning(f"no package path found for test_name={test_name}")
69
+ continue
70
+ assert len(runs) == 1, f"expected only 1 run for test_name={test_name}, got {len(runs)}"
71
+ run = runs[0]
72
+ tpf_package_path = package_path.with_name(f"{package_path.name}tpf") / "testdata"
73
+ default_package_path = package_path / "testdata"
74
+ if not tpf_package_path.exists():
75
+ logger.warning(
76
+ f"tpf_package_path={tpf_package_path} doesn't exist, adding mocked data to {default_package_path}"
77
+ )
78
+ tpf_package_path = default_package_path
79
+ tf_log_path = run.log_path
80
+ assert tf_log_path, f"test didn't set tf_log_path: {test_name}"
81
+ req = MockTFLog(
82
+ log_path=tf_log_path,
83
+ output_dir=tpf_package_path,
84
+ admin_api_path=admin_api_path,
85
+ package_name=package_path.name,
86
+ )
87
+ mocked_yaml = mock_tf_log(req)
88
+ logger.info(f"mocked TestConfig saved to {mocked_yaml}")
@@ -11,13 +11,15 @@ from pydantic import ValidationError, model_validator
11
11
  logger = logging.getLogger(__name__)
12
12
 
13
13
 
14
- def parsed(payload: str) -> tuple[dict[str, Any], list]:
14
+ def parsed(payload: str) -> tuple[dict[str, Any] | None, list | None, bool | None]:
15
15
  with suppress(ValueError):
16
16
  resp = json.loads(payload)
17
17
  if isinstance(resp, dict):
18
- return resp, []
18
+ return resp, None, None
19
19
  if isinstance(resp, list):
20
- return {}, resp
20
+ return None, resp, None
21
+ if payload.strip() in {"true", "false"}:
22
+ return None, None, payload.strip() == "true"
21
23
  raise ValueError(f"Could not parse payload: {payload}")
22
24
 
23
25
 
@@ -98,19 +100,19 @@ class SDKRoundtrip(Entity):
98
100
  @property
99
101
  def version(self) -> str:
100
102
  content_type = self.response.headers.get("Content-Type", "v1")
101
- try:
103
+ content_type_req = self.request.headers.get("Accept", "v1")
104
+ with suppress(ValueError):
102
105
  return extract_version(content_type)
103
- except ValueError:
104
- logger.warning(f"failed to extract version from response header ({content_type}), trying request")
105
- content_type = self.request.headers.get("Accept", "v1")
106
- return extract_version(content_type)
106
+ with suppress(ValueError):
107
+ return extract_version(content_type_req)
108
+ raise ValueError(f"Could not extract version from req/resp: {content_type} or {content_type_req}")
107
109
 
108
110
  @model_validator(mode="after")
109
111
  def ensure_match(self) -> Self:
110
112
  req = self.request
111
113
  resp = self.response
112
- _, resp_payload_list = parsed(resp.text)
113
- if req.expect_list_response and not resp_payload_list:
114
+ _, resp_payload_list, __ = parsed(resp.text)
115
+ if req.expect_list_response and resp_payload_list is None:
114
116
  raise ValueError(f"Expected list response but got dict: {resp.text}")
115
117
  return self
116
118
 
@@ -159,7 +161,7 @@ def parse_http_requests(logs: str) -> list[SDKRoundtrip]:
159
161
  Can say that expected payload is either a list or a dict and if it ends with an identifier it is higher chance for a dict
160
162
  """
161
163
  test_name = parse_test_name(logs)
162
- logger.info(f"Finding http requests for test name: {test_name}")
164
+ logger.info(f"Finding http requests for test name: '{test_name}'")
163
165
  requests, responses = parse_raw_req_responses(logs)
164
166
  tf_step_starts = [i for i, line in enumerate(logs.splitlines()) if MARKER_START_STEP in line]
165
167
  used_responses: set[int] = set()
@@ -31,7 +31,7 @@ class RequestInfo(Entity):
31
31
 
32
32
  @property
33
33
  def id(self):
34
- return "__".join((self.method, self.path, self.version, self.text)) # noqa: FLY002
34
+ return "__".join(part for part in (self.method, self.path, self.version, self.text) if part)
35
35
 
36
36
 
37
37
  class StepRequests(Entity):
@@ -143,9 +143,9 @@ class MockRequestData(Entity):
143
143
  def replace_text_variables(self):
144
144
  for step in self.steps:
145
145
  for request in step.all_requests:
146
- request.text = normalize_text(request.text, self.variables)
146
+ request.text = normalize_text(request.text, self.variables, expect_json=True)
147
147
  for response in request.responses:
148
- response.text = normalize_text(response.text, self.variables)
148
+ response.text = normalize_text(response.text, self.variables, expect_json=True)
149
149
 
150
150
  def prune_duplicate_responses(self):
151
151
  for step in self.steps:
@@ -202,10 +202,10 @@ def find_normalized_path(path: str, api_spec_paths: list[ApiSpecPath]) -> ApiSpe
202
202
  raise ValueError(f"Could not find path: {path}")
203
203
 
204
204
 
205
- def normalize_text(text: str, variables: dict[str, str]) -> str:
205
+ def normalize_text(text: str, variables: dict[str, str], *, expect_json: bool = False) -> str:
206
206
  for var, value in variables.items():
207
207
  text = text.replace(value, f"{{{var}}}")
208
- if not text:
208
+ if not text or not expect_json:
209
209
  return text
210
210
  try:
211
211
  parsed_text = json.loads(text)
@@ -260,8 +260,8 @@ def create_mock_data(
260
260
  for modifier in modifiers:
261
261
  if modifier.match(rt, normalized_path):
262
262
  modifier.modification(rt)
263
- normalized_text = normalize_text(rt.request.text, mock_data.variables)
264
- normalized_response_text = normalize_text(rt.response.text, mock_data.variables)
263
+ normalized_text = normalize_text(rt.request.text, mock_data.variables, expect_json=True)
264
+ normalized_response_text = normalize_text(rt.response.text, mock_data.variables, expect_json=True)
265
265
  mock_data.add_roundtrip(rt, normalized_path, normalized_text, normalized_response_text, is_diff(rt))
266
266
  mock_data.replace_text_variables()
267
267
  if prune_duplicates:
@@ -0,0 +1,48 @@
1
+ import json
2
+ import logging
3
+
4
+ from atlas_init.cli_tf.debug_logs import SDKRoundtrip, parsed
5
+ from atlas_init.cli_tf.debug_logs_test_data import RTModifier
6
+
7
+ logger = logging.getLogger(__name__)
8
+
9
+
10
+ def add_label_tags(rt: SDKRoundtrip):
11
+ logger.info(f"Adding labels and tags to {rt.id}")
12
+ request = rt.request
13
+ req_dict, req_list, req_bool = parsed(request.text)
14
+ response = rt.response
15
+ resp_dict, resp_list, req_bool = parsed(response.text)
16
+ if resp_list or req_list or req_bool is not None:
17
+ return
18
+ resp_dict = resp_dict or {}
19
+ req_dict = req_dict or {}
20
+ for extra_field in ["labels", "tags"]:
21
+ if extra_field not in resp_dict:
22
+ resp_dict[extra_field] = []
23
+ if extra_field not in req_dict:
24
+ req_dict[extra_field] = []
25
+ request.text = json.dumps(req_dict, indent=1, sort_keys=True)
26
+ response.text = json.dumps(resp_dict, indent=1, sort_keys=True)
27
+
28
+
29
+ cluster_modifier = RTModifier(
30
+ version="2024-08-05",
31
+ method="POST",
32
+ path="/api/atlas/v2/groups/{groupId}/clusters",
33
+ modification=add_label_tags,
34
+ )
35
+
36
+
37
+ def package_modifiers(pkg_name: str) -> list[RTModifier]:
38
+ # sourcery skip: assign-if-exp, reintroduce-else
39
+ if pkg_name == "advancedcluster":
40
+ return [cluster_modifier]
41
+ return []
42
+
43
+
44
+ def package_skip_suffixes(pkg_name: str) -> list[str]:
45
+ # sourcery skip: assign-if-exp, reintroduce-else
46
+ if pkg_name == "resourcepolicy":
47
+ return [":validate"]
48
+ return []
@@ -5,11 +5,13 @@ import re
5
5
  from collections.abc import Iterable
6
6
  from enum import StrEnum
7
7
  from functools import total_ordering
8
+ from pathlib import Path
8
9
 
9
10
  import humanize
10
11
  from github.WorkflowJob import WorkflowJob
11
12
  from model_lib import Entity, Event, utc_datetime
12
- from pydantic import Field
13
+ from pydantic import Field, field_validator
14
+ from zero_3rdparty.datetime_utils import utc_now
13
15
 
14
16
  logger = logging.getLogger(__name__)
15
17
 
@@ -35,6 +37,24 @@ class LineInfo(Event):
35
37
  text: str
36
38
 
37
39
 
40
+ class GoTestContextStep(Entity):
41
+ name: str
42
+
43
+
44
+ class GoTestContext(Entity):
45
+ """Abstraction on WorkflowJob to also support local runs"""
46
+
47
+ name: str
48
+ created_at: utc_datetime = Field(default_factory=utc_now)
49
+ steps: list[GoTestContextStep] = Field(default_factory=list)
50
+ html_url: str = "http://localhost"
51
+
52
+ @classmethod
53
+ def from_local_run(cls, name: str, steps: list[GoTestContextStep]) -> GoTestContext:
54
+ raise NotImplementedError
55
+ # return cls(name=name, steps=steps)
56
+
57
+
38
58
  @total_ordering
39
59
  class GoTestRun(Entity):
40
60
  name: str
@@ -42,8 +62,9 @@ class GoTestRun(Entity):
42
62
  start_line: LineInfo
43
63
  ts: utc_datetime
44
64
  finish_ts: utc_datetime | None = None
45
- job: WorkflowJob
65
+ job: GoTestContext | WorkflowJob
46
66
  test_step: int
67
+ log_path: Path | None = None
47
68
 
48
69
  finish_line: LineInfo | None = None
49
70
  context_lines: list[str] = Field(default_factory=list)
@@ -87,6 +108,10 @@ class GoTestRun(Entity):
87
108
  def is_failure(self) -> bool:
88
109
  return self.status == GoTestStatus.FAIL
89
110
 
111
+ @property
112
+ def is_pass(self) -> bool:
113
+ return self.status == GoTestStatus.PASS
114
+
90
115
  def add_line_match(self, match: LineMatch, line: str, line_number: int) -> None:
91
116
  self.run_seconds = match.run_seconds or self.run_seconds
92
117
  self.finish_line = LineInfo(number=line_number, text=line)
@@ -99,7 +124,7 @@ class GoTestRun(Entity):
99
124
  match: LineMatch,
100
125
  line: str,
101
126
  line_number: int,
102
- job: WorkflowJob,
127
+ job: WorkflowJob | GoTestContext,
103
128
  test_step_nr: int,
104
129
  ) -> GoTestRun:
105
130
  start_line = LineInfo(number=line_number, text=line)
@@ -115,15 +140,20 @@ class GoTestRun(Entity):
115
140
 
116
141
 
117
142
  class LineMatch(Event):
118
- ts: utc_datetime
143
+ ts: utc_datetime = Field(default_factory=utc_now)
119
144
  status: GoTestStatus
120
145
  name: str
121
146
  run_seconds: float | None = None
122
147
 
148
+ @field_validator("ts", mode="before")
149
+ @classmethod
150
+ def remove_none(cls, v):
151
+ return v or utc_now()
152
+
123
153
 
124
154
  _status_options = "|".join(list(GoTestStatus))
125
155
  line_result = re.compile(
126
- r"(?P<ts>\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d+Z?)\s[-=]+\s"
156
+ r"(?P<ts>\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d+Z?)?\s?[-=]+\s"
127
157
  + r"(?P<status>%s):?\s+" % _status_options # noqa: UP031
128
158
  + r"(?P<name>[\w_]+)"
129
159
  + r"\s*\(?(?P<run_seconds>[\d\.]+)?s?\)?"
@@ -140,13 +170,13 @@ def match_line(line: str) -> LineMatch | None:
140
170
  2024-06-26T04:41:47.7228652Z --- PASS: TestAccNetworkRSPrivateLinkEndpointGCP_basic (424.50s)
141
171
  """
142
172
  if match := line_result.match(line):
143
- line_match = LineMatch(**match.groupdict())
173
+ line_match = LineMatch(**match.groupdict()) # type: ignore
144
174
  return None if _test_name_is_nested(line_match.name, line) else line_match
145
175
  return None
146
176
 
147
177
 
148
178
  context_start_pattern = re.compile(
149
- r"(?P<ts>\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d+Z?)\s[-=]+\s" r"NAME\s+" r"(?P<name>[\w_]+)"
179
+ r"(?P<ts>\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d+Z?)?\s?[-=]+\s" r"NAME\s+" r"(?P<name>[\w_]+)"
150
180
  )
151
181
 
152
182
 
@@ -157,7 +187,7 @@ def context_start_match(line: str) -> str:
157
187
 
158
188
 
159
189
  context_line_pattern = re.compile(
160
- r"(?P<ts>\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d+Z?)" r"\s{5}" r"(?P<indent>\s*)" r"(?P<relevant_line>.*)"
190
+ r"(?P<ts>\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d+Z?)?" r"\s{5}" r"(?P<indent>\s*)" r"(?P<relevant_line>.*)"
161
191
  )
162
192
 
163
193
 
@@ -168,7 +198,7 @@ def extract_context(line: str) -> str:
168
198
  return ""
169
199
 
170
200
 
171
- def parse(test_lines: list[str], job: WorkflowJob, test_step_nr: int) -> Iterable[GoTestRun]:
201
+ def parse(test_lines: list[str], job: WorkflowJob | GoTestContext, test_step_nr: int) -> Iterable[GoTestRun]:
172
202
  tests: dict[str, GoTestRun] = {}
173
203
  context_lines: list[str] = []
174
204
  current_context_test = ""
@@ -1,11 +1,14 @@
1
1
  import json
2
2
  import logging
3
3
  import time
4
+ from collections.abc import Callable
5
+ from io import StringIO
4
6
  from pathlib import Path
5
7
  from typing import Self
6
8
 
7
9
  import typer
8
- from model_lib import Entity, dump
10
+ import yaml
11
+ from model_lib import Entity
9
12
  from pydantic import Field, model_validator
10
13
  from zero_3rdparty import file_utils
11
14
 
@@ -15,8 +18,17 @@ from atlas_init.cli_tf.debug_logs import (
15
18
  parse_http_requests,
16
19
  parse_test_name,
17
20
  )
18
- from atlas_init.cli_tf.debug_logs_test_data import create_mock_data, default_is_diff
19
- from atlas_init.repos.go_sdk import api_spec_path_transformed, download_admin_api, parse_api_spec_paths
21
+ from atlas_init.cli_tf.debug_logs_test_data import (
22
+ RTModifier,
23
+ create_mock_data,
24
+ default_is_diff,
25
+ )
26
+ from atlas_init.cli_tf.debug_logs_test_data_package_config import package_modifiers, package_skip_suffixes
27
+ from atlas_init.repos.go_sdk import (
28
+ api_spec_path_transformed,
29
+ download_admin_api,
30
+ parse_api_spec_paths,
31
+ )
20
32
  from atlas_init.settings.path import DEFAULT_DOWNLOADS_DIR
21
33
 
22
34
  logger = logging.getLogger(__name__)
@@ -28,6 +40,10 @@ class MockTFLog(Entity):
28
40
  admin_api_path: Path
29
41
  diff_skip_suffixes: list[str] = Field(default_factory=list)
30
42
  keep_duplicates: bool = False
43
+ modifiers: list[RTModifier] = Field(default_factory=list)
44
+ package_name: str = ""
45
+ log_diff_roundtrips: bool = False
46
+ skip_default_package_config: bool = False
31
47
 
32
48
  @model_validator(mode="after")
33
49
  def ensure_paths_exist(self) -> Self:
@@ -38,28 +54,49 @@ class MockTFLog(Entity):
38
54
  if not self.output_dir.exists():
39
55
  raise ValueError(f"output_dir: '{self.output_dir}' doesn't exist")
40
56
  assert self.output_dir.name == "testdata", "output_path should be a directory named testdata"
57
+ if (package_name := self.package_name) and not self.skip_default_package_config:
58
+ self.modifiers.extend(package_modifiers(package_name))
59
+ self.diff_skip_suffixes.extend(package_skip_suffixes(package_name))
41
60
  return self
42
61
 
43
62
  def differ(self, rt: SDKRoundtrip) -> bool:
44
63
  return default_is_diff(rt) and not any(rt.request.path.endswith(suffix) for suffix in self.diff_skip_suffixes)
45
64
 
46
65
 
47
- def mock_tf_log(req: MockTFLog) -> None:
66
+ def mock_tf_log(req: MockTFLog) -> Path:
48
67
  log_file_text = req.log_path.read_text()
49
68
  test_name = parse_test_name(log_file_text)
50
69
  roundtrips = parse_http_requests(log_file_text)
70
+ logger.info(f"Found #{len(roundtrips)} roundtrips")
71
+ if req.log_diff_roundtrips:
72
+ log_diff_roundtrips(roundtrips, req.differ)
51
73
  api_spec_paths = parse_api_spec_paths(req.admin_api_path)
52
74
  data = create_mock_data(
53
75
  roundtrips,
54
76
  api_spec_paths,
55
77
  is_diff=req.differ,
56
78
  prune_duplicates=not req.keep_duplicates,
79
+ modifiers=req.modifiers,
57
80
  )
58
81
  # avoid anchors
59
- data_yaml = dump(json.loads(dump(data, "json")), "yaml")
82
+ data_json = data.model_dump_json(exclude_none=True)
83
+ data_parsed = json.loads(data_json)
84
+ s = StringIO()
85
+ yaml.safe_dump(
86
+ data_parsed,
87
+ s,
88
+ default_flow_style=False,
89
+ width=100_000,
90
+ allow_unicode=True,
91
+ sort_keys=False,
92
+ )
93
+ data_yaml = s.getvalue()
94
+ test_name = test_name.replace("TestAcc", "TestMock")
60
95
  output_path = req.output_dir / f"{test_name}.yaml"
96
+ logger.info(f"Variables found {data.variables}")
61
97
  logger.info(f"Writing to {output_path}")
62
98
  file_utils.ensure_parents_write_text(output_path, data_yaml)
99
+ return output_path
63
100
 
64
101
 
65
102
  def mock_tf_log_cmd(
@@ -68,7 +105,7 @@ def mock_tf_log_cmd(
68
105
  "",
69
106
  "-o",
70
107
  "--output-testdir",
71
- help="the path to the output test directory, for example: internal/service/advancedclustertpf/testdata/, uses cwd/testdata by default",
108
+ help="the path to the output test directory, for example: internal/service/advancedclustertpf/testdata/, uses $(cwd)/testdata by default",
72
109
  ),
73
110
  sdk_repo_path_str: str = option_sdk_repo_path,
74
111
  sdk_branch: str = typer.Option("main", "-b", "--branch", help="the branch for downloading openapi spec"),
@@ -77,6 +114,9 @@ def mock_tf_log_cmd(
77
114
  ),
78
115
  diff_skip_suffixes: list[str] = typer.Option(..., "-s", "--skip-suffixes", default_factory=list),
79
116
  keep_duplicates: bool = typer.Option(False, "-keep", "--keep-duplicates", help="keep duplicate requests"),
117
+ log_diff_roundtrips: bool = typer.Option(
118
+ False, "-l", "--log-diff-roundtrips", help="print out the roundtrips used in diffs"
119
+ ),
80
120
  ):
81
121
  cwd = Path.cwd()
82
122
  default_testdir = cwd / "testdata"
@@ -87,6 +127,7 @@ def mock_tf_log_cmd(
87
127
  admin_api_path=resolved_admin_api_path,
88
128
  diff_skip_suffixes=diff_skip_suffixes,
89
129
  keep_duplicates=keep_duplicates,
130
+ log_diff_roundtrips=log_diff_roundtrips,
90
131
  )
91
132
  mock_tf_log(event_in)
92
133
 
@@ -116,3 +157,20 @@ def resolve_admin_api_path(sdk_repo_path_str: str, sdk_branch: str, admin_api_pa
116
157
  assert resolved_admin_api_path.exists(), f"unable to resolve admin_api_path={resolved_admin_api_path}"
117
158
  assert resolved_admin_api_path.is_file(), f"not a file admin_api_path={resolved_admin_api_path}"
118
159
  return resolved_admin_api_path
160
+
161
+
162
+ def log_diff_roundtrips(roundtrips: list[SDKRoundtrip], differ: Callable[[SDKRoundtrip], bool] | None = None):
163
+ differ = differ or default_is_diff
164
+ diff_count = 0
165
+ step_nr = 0
166
+ for rt in roundtrips:
167
+ if not differ(rt):
168
+ continue
169
+ if rt.step_number != step_nr:
170
+ logger.info(f"{'-' * 80}\nStep {rt.step_number}")
171
+ step_nr = rt.step_number
172
+ diff_count += 1
173
+ logger.info(
174
+ f"\n{rt.request.method} {rt.request.path}\n{rt.request.text}\n{rt.response.status}-{rt.response.status_text}\n{rt.response.text}"
175
+ )
176
+ logger.info(f"Diffable requests: {diff_count}")
@@ -170,9 +170,12 @@ class AtlasInitPaths(BaseSettings):
170
170
  assert env_path.exists(), f"no env-vars exist {env_path} have you forgotten apply?"
171
171
  return load_dotenv(env_path)
172
172
 
173
- def load_profile_manual_env_vars(self) -> dict[str, str]:
173
+ def load_profile_manual_env_vars(self, *, skip_os_update: bool = False) -> dict[str, str]:
174
+ # sourcery skip: dict-assign-update-to-union
174
175
  manual_env_vars = self.manual_env_vars
175
176
  if manual_env_vars:
177
+ if skip_os_update:
178
+ return manual_env_vars
176
179
  logger.warning(f"loading manual env-vars from {self.env_file_manual}")
177
180
  os.environ.update(manual_env_vars)
178
181
  else:
atlas_init/typer_app.py CHANGED
@@ -27,9 +27,10 @@ app_command = partial(
27
27
 
28
28
 
29
29
  def extra_root_commands():
30
- from atlas_init.cli_root import trigger
30
+ from atlas_init.cli_root import go_test, trigger
31
31
 
32
32
  assert trigger
33
+ assert go_test
33
34
 
34
35
 
35
36
  @app.callback(invoke_without_command=True)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: atlas-init
3
- Version: 0.3.1
3
+ Version: 0.3.3
4
4
  Project-URL: Documentation, https://github.com/EspenAlbert/atlas-init#readme
5
5
  Project-URL: Issues, https://github.com/EspenAlbert/atlas-init/issues
6
6
  Project-URL: Source, https://github.com/EspenAlbert/atlas-init
@@ -1,11 +1,11 @@
1
- atlas_init/__init__.py,sha256=Mja1wzBho0kdXUYEG2JEXu72cDv6N_Uf0xLaUN7aQsA,372
1
+ atlas_init/__init__.py,sha256=x8cFU0CpX_K9Ha_EhynKLP9QaJvcEH7phPh9svYKiZI,372
2
2
  atlas_init/__main__.py,sha256=dY1dWWvwxRZMmnOFla6RSfti-hMeLeKdoXP7SVYqMUc,52
3
- atlas_init/atlas_init.yaml,sha256=GMyJVhKKRc7WzEu7fafmWgeTsDaExTLv7QvXOmE_Brg,1907
4
- atlas_init/cli.py,sha256=IiOEC_Jry6vrSDH3_OvsU50F-_3iVIS4tV6-R7659fY,9642
3
+ atlas_init/atlas_init.yaml,sha256=6UC10VjXpn80RMtn-LsuJ-Ghoql5KBWKgzs3BixHfIo,2040
4
+ atlas_init/cli.py,sha256=xOnAOUccHDLkivICdF0GsLhccr_IxvnTKTbe1KGW7kU,8971
5
5
  atlas_init/cli_args.py,sha256=tiwUYAE0JBSl9lHV6VJ41vFCU90ChBZ4mKvi-YoF_HY,541
6
6
  atlas_init/humps.py,sha256=l0ZXXuI34wwd9TskXhCjULfGbUyK-qNmiyC6_2ow6kU,7339
7
7
  atlas_init/terraform.yaml,sha256=qPrnbzBEP-JAQVkYadHsggRnDmshrOJyiv0ckyZCxwY,2734
8
- atlas_init/typer_app.py,sha256=zbvYUlZrF4TZEPEwpa33fVSLVKcxRamuXCgF1FCUhCU,2068
8
+ atlas_init/typer_app.py,sha256=nJbSuaAVhVzbxqNup1m5Nmszl2X3FOtZyI7Rjmz4JSs,2096
9
9
  atlas_init/cli_cfn/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
10
10
  atlas_init/cli_cfn/app.py,sha256=iMukpUDgsAgZh_U_APGZB3gmewOo-3vtFK0byJuDz9w,6649
11
11
  atlas_init/cli_cfn/aws.py,sha256=GbohR7uczSGwQjLEYozCmlxbeIHo1uwQIJMwsh7kF7M,17894
@@ -13,23 +13,25 @@ atlas_init/cli_cfn/cfn_parameter_finder.py,sha256=tAadNF1M_U2BTY-m9fXVXFXNQRvfud
13
13
  atlas_init/cli_cfn/example.py,sha256=pQNpFreuv58O3lanLy5Kunp8GxG8i9PWjuWsYlpv2tg,8320
14
14
  atlas_init/cli_cfn/files.py,sha256=vjop9G8rGMgyRe4fX5eWNX5H-YGAmk-fNUqUGErI7xg,1720
15
15
  atlas_init/cli_helper/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
16
- atlas_init/cli_helper/go.py,sha256=LVMXTWvEYOHOul_UG5uQT3QAoL0H1f1O7e4d-11kPrw,1232
17
- atlas_init/cli_helper/run.py,sha256=LvzOXFBotGZQ7KZjivr4e7Aq8AUnxy7vGpH1rluSAN8,2988
16
+ atlas_init/cli_helper/go.py,sha256=ulRjwQ9JVXAd0FUgZJ1gPzeINx0B9FH9jUZ1O7Skilk,8033
17
+ atlas_init/cli_helper/run.py,sha256=njE_ua8x_glOo6eOGa4NgZqpLcqOo3eALydrZ0bCXW4,3486
18
18
  atlas_init/cli_helper/sdk.py,sha256=exh58-VZwxtosaxM269C62EEy1VnpJPOVziPDPkGsmE,2983
19
19
  atlas_init/cli_helper/sdk_auto_changes.py,sha256=oWyXw7P0PdO28hclRvza_RcIVXAyzu0lCYTJTNBDMeo,189
20
20
  atlas_init/cli_helper/tf_runner.py,sha256=OYdC-Y6i-xRh8_LCudKdtP7CEYEO9e67nVhholN29eg,3636
21
21
  atlas_init/cli_root/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
22
+ atlas_init/cli_root/go_test.py,sha256=K8N-EIgry21ILYGJqj5Y7kjPXPl4UTOS9WDk3m3nhjY,3878
22
23
  atlas_init/cli_root/trigger.py,sha256=oEgqb_l25tyYgUaFHEuChcOCJA7k3mnRa4D-Myz-Igs,5789
23
24
  atlas_init/cli_tf/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
24
25
  atlas_init/cli_tf/app.py,sha256=0Y5c-Pc9ibOz6kXvFlL-yhH_fx1nHLgBgK9OAVqjX9s,11390
25
26
  atlas_init/cli_tf/changelog.py,sha256=biWYKf1pZvXZ-jEgcZ5q9sY7nTGrL2PuI0h9mCILf_g,3181
26
- atlas_init/cli_tf/debug_logs.py,sha256=lnB5BpcEooVzGd2RLxbwAVQs0ZYXzRKy5sHa0hftHI8,8799
27
- atlas_init/cli_tf/debug_logs_test_data.py,sha256=bv4gqhHSNEnQqIijrcjvEUA0M6S-aeo73V4mji0pKCM,9435
27
+ atlas_init/cli_tf/debug_logs.py,sha256=q71ZNOnQOz1ikPCyqUz_6zyd4Bm1QVkCkTcixJBZ1xI,8988
28
+ atlas_init/cli_tf/debug_logs_test_data.py,sha256=G4pnuWJ7PAQd3NXRKAtwAPC6Ne-PgpzaTZHQ9waqxZI,9565
29
+ atlas_init/cli_tf/debug_logs_test_data_package_config.py,sha256=0GB-m8l9TWL4vstnFVO2jw5Jvtlz9WfHTp-9RmaPugw,1473
28
30
  atlas_init/cli_tf/github_logs.py,sha256=VD7qhlXNuG21eTuJ5VI7rsflp5WHSodfngkRVgQlumw,8114
29
- atlas_init/cli_tf/go_test_run.py,sha256=ZoQSvIasmWauFxZJrWL0ObFX-P0k-D3c_ep3OnPY4zs,5842
31
+ atlas_init/cli_tf/go_test_run.py,sha256=LQUQ-3zJ8EUCixwu33QTAzUns3um793osst8tE0UKjk,6792
30
32
  atlas_init/cli_tf/go_test_run_format.py,sha256=OUd6QPHDeTzbwVuh6MhP-xXgjOOGP9W_sCLJ8KylBTs,1201
31
33
  atlas_init/cli_tf/go_test_summary.py,sha256=agr4SITgxchjgOzRpScoTUk-iG38QDLkpnsMtTW9GTY,5382
32
- atlas_init/cli_tf/mock_tf_log.py,sha256=c0geBR74UkHiyElnV0R_yTuXUgP4F_H53rbGj6D99yc,4958
34
+ atlas_init/cli_tf/mock_tf_log.py,sha256=311sUVpxbvcz6Qdpz2Z1kFQm67zfjs4aUsQOKrJ2LrY,6988
33
35
  atlas_init/cli_tf/schema.py,sha256=iwvb4wD2Wba0MMu7ooTNAIi1jHbpLiXGPOT51_o_YW8,12431
34
36
  atlas_init/cli_tf/schema_go_parser.py,sha256=PiRfFFVnkhltxcGFfOCgH53wwzIEynw2BXmSfaINLL8,8294
35
37
  atlas_init/cli_tf/schema_inspection.py,sha256=ujLvGfg3baByND4nRD0drZoI45STxo3VfYvim-PfVOc,1764
@@ -54,7 +56,7 @@ atlas_init/repos/go_sdk.py,sha256=1OzM9DjHEAzAAuI9ygoRRuhUK2gqpOhXExXRqhqa0tg,17
54
56
  atlas_init/repos/path.py,sha256=wrT8e01OBoAHj8iMrxqutgqWu-BHPe9-bEWtcZRu238,4187
55
57
  atlas_init/settings/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
56
58
  atlas_init/settings/config.py,sha256=HIytZom8RRvpLGy6u8CpZ83tmFXI6v1tO3iSiuo08kc,6259
57
- atlas_init/settings/env_vars.py,sha256=q8Hj2LPJIg-PK0fCjrEigoPwTGIEbqjLEZckwgnkG8s,9688
59
+ atlas_init/settings/env_vars.py,sha256=mfpQp5Ja5tdW_mUg5Nqneb294JehQAheWNOpHvDLbg0,9844
58
60
  atlas_init/settings/interactive.py,sha256=Xy1Z5WMAOSaJ-vQI_4xjAbSR92rWQgnffwVoDT27L68,340
59
61
  atlas_init/settings/path.py,sha256=KkXysu6-0AuSjsvYGknYGJX1hL2j1RD-Fpf8KsVYpkE,2618
60
62
  atlas_init/settings/rich_utils.py,sha256=5LgJUmc9wyJTsoS6xWKadrT0MoQREDaKvEOCuBLDXRg,1704
@@ -86,7 +88,7 @@ atlas_init/tf/modules/vpc_peering/vpc_peering.tf,sha256=hJ3KJdGbLpOQednUpVuiJ0Cq
86
88
  atlas_init/tf/modules/vpc_privatelink/atlas-privatelink.tf,sha256=FloaaX1MNDvoMZxBnEopeLKyfIlq6kaX2dmx8WWlXNU,1298
87
89
  atlas_init/tf/modules/vpc_privatelink/variables.tf,sha256=gktHCDYD4rz6CEpLg5aiXcFbugw4L5S2Fqc52QYdJyc,255
88
90
  atlas_init/tf/modules/vpc_privatelink/versions.tf,sha256=G0u5V_Hvvrkux_tqfOY05pA-GzSp_qILpfx1dZaTGDc,237
89
- atlas_init-0.3.1.dist-info/METADATA,sha256=vb-qsxAC6t4yI4ec_x1L-nYaGT2sGSt0LCIRHBPSgl8,5650
90
- atlas_init-0.3.1.dist-info/WHEEL,sha256=C2FUgwZgiLbznR-k0b_5k3Ai_1aASOXDss3lzCUsUug,87
91
- atlas_init-0.3.1.dist-info/entry_points.txt,sha256=oSNFIEAS9nUZyyZ8Fc-0F0U5j-NErygy01LpJVSHapQ,57
92
- atlas_init-0.3.1.dist-info/RECORD,,
91
+ atlas_init-0.3.3.dist-info/METADATA,sha256=0BLRaxcC3IeB3umJ-rcNN3xRRrkspbSVMPOrLbmozhk,5650
92
+ atlas_init-0.3.3.dist-info/WHEEL,sha256=C2FUgwZgiLbznR-k0b_5k3Ai_1aASOXDss3lzCUsUug,87
93
+ atlas_init-0.3.3.dist-info/entry_points.txt,sha256=oSNFIEAS9nUZyyZ8Fc-0F0U5j-NErygy01LpJVSHapQ,57
94
+ atlas_init-0.3.3.dist-info/RECORD,,