atlas-init 0.4.0__py3-none-any.whl → 0.4.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (43) hide show
  1. atlas_init/__init__.py +1 -1
  2. atlas_init/atlas_init.yaml +3 -0
  3. atlas_init/cli.py +1 -1
  4. atlas_init/cli_cfn/aws.py +2 -2
  5. atlas_init/cli_helper/go.py +104 -58
  6. atlas_init/cli_helper/run.py +3 -3
  7. atlas_init/cli_helper/run_manager.py +3 -3
  8. atlas_init/cli_root/go_test.py +13 -10
  9. atlas_init/cli_tf/app.py +4 -0
  10. atlas_init/cli_tf/debug_logs.py +3 -3
  11. atlas_init/cli_tf/example_update.py +142 -0
  12. atlas_init/cli_tf/example_update_test/test_update_example.tf +23 -0
  13. atlas_init/cli_tf/example_update_test.py +96 -0
  14. atlas_init/cli_tf/github_logs.py +6 -3
  15. atlas_init/cli_tf/go_test_run.py +24 -1
  16. atlas_init/cli_tf/go_test_summary.py +7 -1
  17. atlas_init/cli_tf/hcl/modifier.py +144 -0
  18. atlas_init/cli_tf/hcl/modifier_test/test_process_variables_output_.tf +25 -0
  19. atlas_init/cli_tf/hcl/modifier_test/test_process_variables_variable_.tf +24 -0
  20. atlas_init/cli_tf/hcl/modifier_test.py +95 -0
  21. atlas_init/cli_tf/hcl/parser.py +1 -1
  22. atlas_init/cli_tf/log_clean.py +29 -0
  23. atlas_init/cli_tf/schema_table.py +1 -3
  24. atlas_init/cli_tf/schema_v3.py +1 -1
  25. atlas_init/repos/path.py +14 -0
  26. atlas_init/settings/config.py +24 -13
  27. atlas_init/settings/env_vars.py +1 -1
  28. atlas_init/settings/env_vars_generated.py +1 -1
  29. atlas_init/settings/rich_utils.py +1 -1
  30. atlas_init/tf/.terraform.lock.hcl +16 -16
  31. atlas_init/tf/main.tf +25 -1
  32. atlas_init/tf/modules/aws_kms/aws_kms.tf +100 -0
  33. atlas_init/tf/modules/aws_kms/provider.tf +7 -0
  34. atlas_init/tf/modules/cfn/cfn.tf +1 -1
  35. atlas_init/tf/modules/cloud_provider/cloud_provider.tf +9 -2
  36. atlas_init/tf/modules/encryption_at_rest/main.tf +29 -0
  37. atlas_init/tf/modules/encryption_at_rest/provider.tf +9 -0
  38. atlas_init/tf/variables.tf +5 -0
  39. {atlas_init-0.4.0.dist-info → atlas_init-0.4.2.dist-info}/METADATA +13 -11
  40. {atlas_init-0.4.0.dist-info → atlas_init-0.4.2.dist-info}/RECORD +42 -31
  41. atlas_init/cli_tf/go_test_run_format.py +0 -31
  42. {atlas_init-0.4.0.dist-info → atlas_init-0.4.2.dist-info}/WHEEL +0 -0
  43. {atlas_init-0.4.0.dist-info → atlas_init-0.4.2.dist-info}/entry_points.txt +0 -0
atlas_init/__init__.py CHANGED
@@ -1,6 +1,6 @@
1
1
  from pathlib import Path
2
2
 
3
- VERSION = "0.4.0"
3
+ VERSION = "0.4.2"
4
4
 
5
5
 
6
6
  def running_in_repo() -> bool:
@@ -13,6 +13,9 @@ test_suites:
13
13
  - name: clusterm10
14
14
  vars:
15
15
  cluster_info_m10: true
16
+ - name: encryption_at_rest
17
+ vars:
18
+ use_encryption_at_rest: true
16
19
  - name: federated
17
20
  repo_go_packages:
18
21
  tf:
atlas_init/cli.py CHANGED
@@ -78,7 +78,7 @@ def apply(context: typer.Context, *, skip_outputs: bool = False):
78
78
  hook_func() # type: ignore
79
79
 
80
80
 
81
- def _plan_or_apply(extra_args: list[str], command: Literal["plan", "apply"], *, skip_outputs: bool) -> list[TestSuite]:
81
+ def _plan_or_apply(extra_args: list[str], command: Literal["plan", "apply"], *, skip_outputs: bool) -> list[TestSuite]: # type: ignore
82
82
  settings = init_settings()
83
83
  logger.info(f"using the '{command}' command, extra args: {extra_args}")
84
84
  try:
atlas_init/cli_cfn/aws.py CHANGED
@@ -50,7 +50,7 @@ def deregister_cfn_resource_type(type_name: str, deregister: bool, region_filter
50
50
  logger.info(f"deregistering: {arn}")
51
51
  client.deregister_type(Arn=arn)
52
52
  if default_version_arn is not None:
53
- logger.info(f"deregistering default-arn: {arn}")
53
+ logger.info(f"deregistering default-arn: {default_version_arn}")
54
54
  client.deregister_type(Arn=default_version_arn)
55
55
  except Exception as e:
56
56
  if "The type does not exist" in repr(e):
@@ -336,7 +336,7 @@ def get_last_cfn_type(
336
336
  "Filters": {"Category": category, "TypeNamePrefix": prefix},
337
337
  "MaxResults": 100,
338
338
  }
339
- next_token = ""
339
+ next_token = "" # nosec
340
340
  for _ in range(100):
341
341
  types_response: ListTypesOutputTypeDef = client.list_types(**kwargs) # type: ignore
342
342
  next_token = types_response.get("NextToken", "")
@@ -24,6 +24,7 @@ logger = logging.getLogger(__name__)
24
24
  class GoTestMode(StrEnum):
25
25
  package = "package"
26
26
  individual = "individual"
27
+ regex = "regex"
27
28
 
28
29
 
29
30
  class GoEnvVars(StrEnum):
@@ -31,6 +32,24 @@ class GoEnvVars(StrEnum):
31
32
  vscode = "vscode"
32
33
 
33
34
 
35
+ class GoTestCaptureMode(StrEnum):
36
+ capture = "capture"
37
+ replay = "replay"
38
+ replay_and_update = "replay-and-update"
39
+ no_capture = "no-capture"
40
+
41
+
42
+ def env_vars_for_capture(mode: GoTestCaptureMode) -> dict[str, str]:
43
+ env = {}
44
+ if mode == GoTestCaptureMode.capture:
45
+ env["HTTP_MOCKER_CAPTURE"] = "true"
46
+ if mode in {GoTestCaptureMode.replay, GoTestCaptureMode.replay_and_update}:
47
+ env["HTTP_MOCKER_REPLAY"] = "true"
48
+ if mode == GoTestCaptureMode.replay_and_update:
49
+ env["HTTP_MOCKER_DATA_UPDATE"] = "true"
50
+ return env
51
+
52
+
34
53
  class GoTestResult(Entity):
35
54
  runs: dict[str, list[GoTestRun]] = Field(default_factory=dict)
36
55
  failure_names: set[str] = Field(default_factory=set)
@@ -55,11 +74,9 @@ class GoTestResult(Entity):
55
74
 
56
75
  def run_go_tests(
57
76
  repo_path: Path,
58
- repo_alias: str,
59
- package_prefix: str,
60
77
  settings: AtlasInitSettings,
61
- groups: list[TestSuite],
62
- mode: GoTestMode = GoTestMode.package,
78
+ groups: list[TestSuite], # type: ignore
79
+ mode: GoTestMode | str = GoTestMode.package,
63
80
  *,
64
81
  dry_run: bool = False,
65
82
  timeout_minutes: int = 300,
@@ -67,37 +84,30 @@ def run_go_tests(
67
84
  re_run: bool = False,
68
85
  env_vars: GoEnvVars = GoEnvVars.vscode,
69
86
  names: set[str] | None = None,
70
- use_replay_mode: bool = False,
87
+ capture_mode: GoTestCaptureMode = GoTestCaptureMode.capture,
88
+ use_old_schema: bool = False,
71
89
  ) -> GoTestResult:
72
- test_env = _resolve_env_vars(settings, env_vars, use_replay_mode=use_replay_mode)
90
+ test_env = resolve_env_vars(
91
+ settings,
92
+ env_vars,
93
+ capture_mode=capture_mode,
94
+ use_old_schema=use_old_schema,
95
+ )
73
96
  if ci_value := test_env.pop("CI", None):
74
- logger.warning(f"pooped CI={ci_value}")
97
+ logger.warning(f"popped CI={ci_value}")
75
98
  results = GoTestResult()
76
99
  commands_to_run: dict[str, str] = {}
77
100
  for group in groups:
78
- package_paths = group.repo_go_packages.get(repo_alias, [])
79
- packages = ",".join(f"{package_prefix}/{pkg}" for pkg in package_paths)
80
- if not packages:
81
- logger.warning(f"no go packages for suite: {group}")
101
+ if group.sequential_tests:
102
+ logger.info(f"running individual tests sequentially as {group.name} is set to sequential_tests")
103
+ concurrent_runs = 1
104
+ group_commands_to_run = group_commands_for_mode(
105
+ repo_path, mode, concurrent_runs, timeout_minutes, names, results, group
106
+ )
107
+ if not group_commands_to_run:
108
+ logger.warning(f"no tests for suite: {group.name}")
82
109
  continue
83
- if mode == GoTestMode.individual:
84
- if group.sequential_tests:
85
- logger.info(f"running individual tests sequentially as {group.name} is set to sequential_tests")
86
- concurrent_runs = 1
87
- test_names = find_individual_tests(repo_path, package_paths)
88
- for name, pkg_path in test_names.items():
89
- if names and name not in names:
90
- continue
91
- results.add_test_package_path(name, pkg_path)
92
- commands_to_run[name] = f"go test {packages} -v -run ^{name}$ -timeout {timeout_minutes}m"
93
- elif mode == GoTestMode.package:
94
- name_regex = f'^({"|".join(names)})$' if names else "^TestAcc*"
95
- command = f"go test {packages} -v -run {name_regex} -timeout {timeout_minutes}m"
96
- if not group.sequential_tests:
97
- command = f"{command} -parallel {concurrent_runs}"
98
- commands_to_run[group.name] = command
99
- else:
100
- raise NotImplementedError(f"mode={mode}")
110
+ commands_to_run |= group_commands_to_run
101
111
  commands_str = "\n".join(f"'{name}': '{command}'" for name, command in sorted(commands_to_run.items()))
102
112
  logger.info(f"will run the following commands:\n{commands_str}")
103
113
  if dry_run:
@@ -116,31 +126,63 @@ def run_go_tests(
116
126
  )
117
127
 
118
128
 
119
- def _resolve_env_vars(settings: AtlasInitSettings, env_vars: GoEnvVars, *, use_replay_mode: bool) -> dict[str, str]:
129
+ def group_commands_for_mode(
130
+ repo_path: Path,
131
+ mode: GoTestMode | str,
132
+ concurrent_runs: int,
133
+ timeout_minutes: int,
134
+ names: set[str] | None,
135
+ results: GoTestResult,
136
+ group: TestSuite, # type: ignore
137
+ ) -> dict[str, str]:
138
+ commands_to_run: dict[str, str] = {}
139
+ if mode == GoTestMode.package:
140
+ name_regex = f'^({"|".join(names)})$' if names else "^TestAcc*"
141
+ for pkg_url in group.package_url_tests(repo_path):
142
+ command = f"go test {pkg_url} -v -run {name_regex} -timeout {timeout_minutes}m"
143
+ if not group.sequential_tests:
144
+ command = f"{command} -parallel {concurrent_runs}"
145
+ pkg_name = pkg_url.rsplit("/")[-1]
146
+ commands_to_run[f"{group.name}-{pkg_name}"] = command
147
+ return commands_to_run
148
+ if mode == GoTestMode.individual:
149
+ prefix = "TestAcc"
150
+ else:
151
+ logger.info(f"using {GoTestMode.regex} with {mode}")
152
+ prefix = mode
153
+ for pkg_url, tests in group.package_url_tests(repo_path, prefix=prefix).items():
154
+ for name, pkg_path in tests.items():
155
+ if names and name not in names:
156
+ continue
157
+ results.add_test_package_path(name, pkg_path)
158
+ commands_to_run[name] = f"go test {pkg_url} -v -run ^{name}$ -timeout {timeout_minutes}m"
159
+ return commands_to_run
160
+
161
+
162
+ def resolve_env_vars(
163
+ settings: AtlasInitSettings,
164
+ env_vars: GoEnvVars,
165
+ *,
166
+ capture_mode: GoTestCaptureMode,
167
+ use_old_schema: bool,
168
+ skip_os: bool = False,
169
+ ) -> dict[str, str]:
120
170
  if env_vars == GoEnvVars.manual:
121
- extra_vars = settings.load_profile_manual_env_vars(skip_os_update=True)
171
+ test_env_vars = settings.load_profile_manual_env_vars(skip_os_update=True)
122
172
  elif env_vars == GoEnvVars.vscode:
123
- extra_vars = settings.load_env_vars(settings.env_vars_vs_code)
173
+ test_env_vars = settings.load_env_vars(settings.env_vars_vs_code)
124
174
  else:
125
175
  raise NotImplementedError(f"don't know how to load env_vars={env_vars}")
126
- mocker_env_name = "HTTP_MOCKER_REPLAY" if use_replay_mode else "HTTP_MOCKER_CAPTURE"
127
- extra_vars |= {"TF_ACC": "1", "TF_LOG": "DEBUG", mocker_env_name: "true"}
128
- test_env = os.environ | extra_vars
129
- logger.info(f"go test env-vars-extra: {sorted(extra_vars)}")
130
- return test_env
131
-
132
-
133
- def find_individual_tests(repo_path: Path, package_paths: list[str]) -> dict[str, Path]:
134
- tests = {}
135
- for package_path in package_paths:
136
- package_abs_path = repo_path / package_path.lstrip(".").lstrip("/")
137
- for go_file in package_abs_path.glob("*.go"):
138
- with go_file.open() as f:
139
- for line in f:
140
- if line.startswith("func TestAcc"):
141
- test_name = line.split("(")[0].strip().removeprefix("func ")
142
- tests[test_name] = package_abs_path
143
- return tests
176
+ test_env_vars |= {
177
+ "TF_ACC": "1",
178
+ "TF_LOG": "DEBUG",
179
+ "MONGODB_ATLAS_PREVIEW_PROVIDER_V2_ADVANCED_CLUSTER": "false" if use_old_schema else "true",
180
+ }
181
+ test_env_vars |= env_vars_for_capture(capture_mode)
182
+ logger.info(f"go test env-vars-extra: {sorted(test_env_vars)}")
183
+ if not skip_os:
184
+ test_env_vars = os.environ | test_env_vars # os.environ on the left side, prefer explicit args
185
+ return test_env_vars
144
186
 
145
187
 
146
188
  def _run_tests(
@@ -158,9 +200,13 @@ def _run_tests(
158
200
  with ThreadPoolExecutor(max_workers=actual_workers) as pool:
159
201
  for name, command in sorted(commands_to_run.items()):
160
202
  log_path = _log_path(name)
161
- if log_path.exists() and log_path.read_text() and not re_run:
162
- logger.info(f"skipping {name} because log exists")
163
- continue
203
+ if log_path.exists() and log_path.read_text():
204
+ if re_run:
205
+ logger.info(f"moving existing logs of {name} to old dir")
206
+ move_logs_to_dir({name}, dir_name="old")
207
+ else:
208
+ logger.info(f"skipping {name} because log exists")
209
+ continue
164
210
  command_env = {**test_env, "TF_LOG_PATH": str(log_path)}
165
211
  future = pool.submit(
166
212
  run_command_is_ok_output,
@@ -204,20 +250,20 @@ def _run_tests(
204
250
  if not results.add_test_results_all_pass(name, parsed_tests):
205
251
  results.failure_names.add(name)
206
252
  if failure_names := results.failure_names:
207
- move_failed_logs_to_error_dir(failure_names)
253
+ move_logs_to_dir(failure_names)
208
254
  logger.error(f"failed to run tests: {sorted(failure_names)}")
209
255
  return results
210
256
 
211
257
 
212
- def move_failed_logs_to_error_dir(failures: set[str]):
213
- error_dir = DEFAULT_DOWNLOADS_DIR / "failures"
258
+ def move_logs_to_dir(names: set[str], dir_name: str = "failures"):
259
+ new_dir = DEFAULT_DOWNLOADS_DIR / dir_name
214
260
  for log in DEFAULT_DOWNLOADS_DIR.glob("*.log"):
215
- if log.stem in failures:
261
+ if log.stem in names:
216
262
  text = log.read_text()
217
263
  assert "\n" in text
218
264
  first_line = text.split("\n", maxsplit=1)[0]
219
265
  ts = first_line.split(" ")[0]
220
- log.rename(error_dir / f"{ts}.{log.name}")
266
+ log.rename(new_dir / f"{ts}.{log.name}")
221
267
 
222
268
 
223
269
  def _log_path(name: str) -> Path:
@@ -1,5 +1,5 @@
1
1
  import os
2
- import subprocess
2
+ import subprocess # nosec
3
3
  import sys
4
4
  from logging import Logger
5
5
  from pathlib import Path
@@ -34,7 +34,7 @@ def run_command_is_ok(
34
34
  stdout=output,
35
35
  cwd=cwd,
36
36
  env=env,
37
- shell=True, # noqa: S602 # We control the calls to this function and don't suspect any shell injection
37
+ shell=True, # noqa: S602 # We control the calls to this function and don't suspect any shell injection #nosec
38
38
  )
39
39
  is_ok = exit_code == 0
40
40
  if is_ok:
@@ -104,6 +104,6 @@ def run_command_is_ok_output(command: str, cwd: Path, logger: Logger, env: dict
104
104
 
105
105
  def add_to_clipboard(clipboard_content: str, logger: Logger):
106
106
  if pb_binary := find_binary_on_path("pbcopy", logger, allow_missing=True):
107
- subprocess.run(pb_binary, text=True, input=clipboard_content, check=True)
107
+ subprocess.run(pb_binary, text=True, input=clipboard_content, check=True) # nosec
108
108
  else:
109
109
  logger.warning("pbcopy not found on $PATH")
@@ -3,7 +3,7 @@ from __future__ import annotations
3
3
  import logging
4
4
  import os
5
5
  import signal
6
- import subprocess
6
+ import subprocess # nosec
7
7
  import sys
8
8
  import threading
9
9
  import time
@@ -194,7 +194,7 @@ class RunManager:
194
194
  sys_stderr = sys.stderr
195
195
 
196
196
  def read_stderr(process: subprocess.Popen):
197
- for line in process.stderr:
197
+ for line in process.stderr: # type: ignore
198
198
  sys_stderr.write(line)
199
199
  result._add_line(line)
200
200
 
@@ -211,7 +211,7 @@ class RunManager:
211
211
  stderr=subprocess.PIPE,
212
212
  stdin=sys.stdin,
213
213
  start_new_session=True,
214
- shell=True, # noqa: S602 # We control the calls to this function and don't suspect any shell injection
214
+ shell=True, # noqa: S602 # We control the calls to this function and don't suspect any shell injection #nosec
215
215
  bufsize=0,
216
216
  text=True, # This makes it return strings instead of bytes
217
217
  ) as process:
@@ -2,7 +2,7 @@ import logging
2
2
 
3
3
  import typer
4
4
 
5
- from atlas_init.cli_helper.go import GoEnvVars, GoTestMode, GoTestResult, run_go_tests
5
+ from atlas_init.cli_helper.go import GoEnvVars, GoTestCaptureMode, GoTestMode, GoTestResult, run_go_tests
6
6
  from atlas_init.cli_tf.mock_tf_log import MockTFLog, mock_tf_log, resolve_admin_api_path
7
7
  from atlas_init.repos.path import Repo, current_repo, current_repo_path
8
8
  from atlas_init.settings.env_vars import active_suites, init_settings
@@ -13,7 +13,7 @@ logger = logging.getLogger(__name__)
13
13
 
14
14
  @app_command()
15
15
  def go_test(
16
- mode: GoTestMode = typer.Option("package", "-m", "--mode", help="package|individual"),
16
+ mode: str = typer.Option("package", "-m", "--mode", help="package|individual or a prefix"),
17
17
  dry_run: bool = typer.Option(False, help="only log out the commands to be run"),
18
18
  timeout_minutes: int = typer.Option(300, "-t", "--timeout", help="timeout in minutes"),
19
19
  concurrent_runs: int = typer.Option(20, "-c", "--concurrent", help="number of concurrent runs"),
@@ -22,11 +22,16 @@ def go_test(
22
22
  export_mock_tf_log_verbose: bool = typer.Option(
23
23
  False, "--export-verbose", help="log roundtrips when exporting the mock-tf-log"
24
24
  ),
25
- env_method: GoEnvVars = typer.Option(GoEnvVars.manual, "--env", help="|".join(list(GoEnvVars))),
25
+ env_method: GoEnvVars = typer.Option(GoEnvVars.manual, "--env"),
26
26
  names: list[str] = typer.Option(
27
- ..., "-n", "--names", default_factory=list, help="run only the tests with these names"
27
+ ...,
28
+ "-n",
29
+ "--names",
30
+ default_factory=list,
31
+ help="run only the tests with these names",
28
32
  ),
29
- use_replay_mode: bool = typer.Option(False, "--replay", help="use replay mode and stored responses"),
33
+ capture_mode: GoTestCaptureMode = typer.Option(GoTestCaptureMode.capture, "--capture"),
34
+ use_old_schema: bool = typer.Option(False, "--old-schema", help="use the old schema for the tests"),
30
35
  ):
31
36
  if export_mock_tf_log and mode != GoTestMode.individual:
32
37
  err_msg = "exporting mock-tf-log is only supported for individual tests"
@@ -36,16 +41,13 @@ def go_test(
36
41
  sorted_suites = sorted(suite.name for suite in suites)
37
42
  logger.info(f"running go tests for {len(suites)} test-suites: {sorted_suites}")
38
43
  results: GoTestResult | None = None
39
- match repo_alias := current_repo():
44
+ match current_repo():
40
45
  case Repo.CFN:
41
46
  raise NotImplementedError
42
47
  case Repo.TF:
43
48
  repo_path = current_repo_path()
44
- package_prefix = settings.config.go_package_prefix(repo_alias)
45
49
  results = run_go_tests(
46
50
  repo_path,
47
- repo_alias,
48
- package_prefix,
49
51
  settings,
50
52
  suites,
51
53
  mode,
@@ -55,7 +57,8 @@ def go_test(
55
57
  re_run=re_run,
56
58
  env_vars=env_method,
57
59
  names=set(names),
58
- use_replay_mode=use_replay_mode,
60
+ capture_mode=capture_mode,
61
+ use_old_schema=use_old_schema,
59
62
  )
60
63
  case _:
61
64
  raise NotImplementedError
atlas_init/cli_tf/app.py CHANGED
@@ -17,6 +17,7 @@ from atlas_init.cli_helper.run import (
17
17
  run_command_receive_result,
18
18
  )
19
19
  from atlas_init.cli_tf.changelog import convert_to_changelog
20
+ from atlas_init.cli_tf.example_update import update_example_cmd
20
21
  from atlas_init.cli_tf.github_logs import (
21
22
  GH_TOKEN_ENV_NAME,
22
23
  find_test_runs,
@@ -28,6 +29,7 @@ from atlas_init.cli_tf.go_test_summary import (
28
29
  create_detailed_summary,
29
30
  create_short_summary,
30
31
  )
32
+ from atlas_init.cli_tf.log_clean import log_clean
31
33
  from atlas_init.cli_tf.mock_tf_log import mock_tf_log_cmd
32
34
  from atlas_init.cli_tf.schema import (
33
35
  dump_generator_config,
@@ -48,6 +50,8 @@ from atlas_init.settings.interactive import confirm
48
50
 
49
51
  app = typer.Typer(no_args_is_help=True)
50
52
  app.command(name="mock-tf-log")(mock_tf_log_cmd)
53
+ app.command(name="example-update")(update_example_cmd)
54
+ app.command(name="log-clean")(log_clean)
51
55
  logger = logging.getLogger(__name__)
52
56
 
53
57
 
@@ -154,7 +154,7 @@ MARKER_TEST = "Starting TestSteps: "
154
154
 
155
155
 
156
156
  class FileRef(NamedTuple):
157
- index: int
157
+ request_index: int
158
158
  line_start: int
159
159
  line_end: int
160
160
 
@@ -249,12 +249,12 @@ def parse_raw_req_responses(
249
249
  in_response = True
250
250
  current_start = i + 1
251
251
  if in_request and line.startswith(MARKER_END):
252
- key = FileRef(index=request_count, line_start=current_start, line_end=i)
252
+ key = FileRef(request_index=request_count, line_start=current_start, line_end=i)
253
253
  requests[key] = log_lines[current_start:i]
254
254
  request_count += 1
255
255
  in_request = False
256
256
  if in_response and line.startswith(MARKER_END):
257
- key = FileRef(index=request_count, line_start=current_start, line_end=i)
257
+ key = FileRef(request_index=request_count, line_start=current_start, line_end=i)
258
258
  responses[key] = log_lines[current_start:i]
259
259
  response_count += 1
260
260
  in_response = False
@@ -0,0 +1,142 @@
1
+ import logging
2
+ from collections import defaultdict
3
+ from functools import total_ordering
4
+ from pathlib import Path
5
+
6
+ import typer
7
+ from model_lib import Entity, Event, dump, parse_payload
8
+ from pydantic import Field
9
+
10
+ from atlas_init.cli_helper.run import run_binary_command_is_ok
11
+ from atlas_init.cli_tf.hcl.modifier import (
12
+ BLOCK_TYPE_OUTPUT,
13
+ BLOCK_TYPE_VARIABLE,
14
+ update_descriptions,
15
+ )
16
+
17
+ logger = logging.getLogger(__name__)
18
+
19
+
20
+ class UpdateExamples(Entity):
21
+ examples_base_dir: Path
22
+ var_descriptions: dict[str, str]
23
+ output_descriptions: dict[str, str] = Field(default_factory=dict)
24
+ skip_tf_fmt: bool = False
25
+
26
+
27
+ @total_ordering
28
+ class TFConfigDescriptionChange(Event):
29
+ path: Path
30
+ name: str
31
+ before: str
32
+ after: str
33
+ block_type: str
34
+
35
+ @property
36
+ def changed(self) -> bool:
37
+ return self.after not in ("", self.before)
38
+
39
+ def __lt__(self, other) -> bool:
40
+ if not isinstance(other, TFConfigDescriptionChange):
41
+ raise TypeError
42
+ return (self.path, self.name) < (other.path, other.name)
43
+
44
+
45
+ class UpdateExamplesOutput(Entity):
46
+ before_var_descriptions: dict[str, str] = Field(default_factory=dict)
47
+ before_output_descriptions: dict[str, str] = Field(default_factory=dict)
48
+ changes: list[TFConfigDescriptionChange] = Field(default_factory=list)
49
+
50
+
51
+ def update_examples(event_in: UpdateExamples) -> UpdateExamplesOutput:
52
+ changes = []
53
+ existing_var_descriptions = update_block_descriptions(
54
+ event_in.examples_base_dir,
55
+ changes,
56
+ event_in.var_descriptions,
57
+ BLOCK_TYPE_VARIABLE,
58
+ )
59
+ existing_output_descriptions = update_block_descriptions(
60
+ event_in.examples_base_dir,
61
+ changes,
62
+ event_in.output_descriptions,
63
+ BLOCK_TYPE_OUTPUT,
64
+ )
65
+ if event_in.skip_tf_fmt:
66
+ logger.info("skipping terraform fmt")
67
+ else:
68
+ assert run_binary_command_is_ok(
69
+ "terraform", "fmt -recursive", cwd=event_in.examples_base_dir, logger=logger
70
+ ), "terraform fmt failed"
71
+ return UpdateExamplesOutput(
72
+ before_var_descriptions=flatten_descriptions(existing_var_descriptions),
73
+ before_output_descriptions=flatten_descriptions(existing_output_descriptions),
74
+ changes=sorted(changes),
75
+ )
76
+
77
+
78
+ def flatten_descriptions(descriptions: dict[str, list[str]]) -> dict[str, str]:
79
+ return {
80
+ key: "\n".join(desc for desc in sorted(set(descriptions)) if desc != "")
81
+ for key, descriptions in descriptions.items()
82
+ }
83
+
84
+
85
+ def update_block_descriptions(
86
+ base_dir: Path,
87
+ changes: list[TFConfigDescriptionChange],
88
+ new_names: dict[str, str],
89
+ block_type: str,
90
+ ):
91
+ all_existing_descriptions = defaultdict(list)
92
+ in_files = sorted(base_dir.rglob("*.tf"))
93
+ for tf_file in in_files:
94
+ logger.info(f"looking for {block_type} in {tf_file}")
95
+ new_tf, existing_descriptions = update_descriptions(tf_file, new_names, block_type=block_type)
96
+ if not existing_descriptions: # probably no variables in the file
97
+ continue
98
+ for name, descriptions in existing_descriptions.items():
99
+ changes.extend(
100
+ TFConfigDescriptionChange(
101
+ path=tf_file,
102
+ name=name,
103
+ before=description,
104
+ after=new_names.get(name, ""),
105
+ block_type=block_type,
106
+ )
107
+ for description in descriptions
108
+ )
109
+ all_existing_descriptions[name].extend(descriptions)
110
+ if tf_file.read_text() == new_tf:
111
+ logger.debug(f"no {block_type} changes for {tf_file}")
112
+ continue
113
+ tf_file.write_text(new_tf)
114
+ return all_existing_descriptions
115
+
116
+
117
+ def update_example_cmd(
118
+ examples_base_dir: Path = typer.Argument(
119
+ ..., help="Directory containing *.tf files (can have many subdirectories)"
120
+ ),
121
+ var_descriptions: Path = typer.Option("", "--vars", help="Path to a JSON/yaml file with variable descriptions"),
122
+ output_descriptions: Path = typer.Option("", "--outputs", help="Path to a JSON/yaml file with output descriptions"),
123
+ skip_log_existing: bool = typer.Option(False, help="Log existing descriptions"),
124
+ skip_log_changes: bool = typer.Option(False, help="Log variable updates"),
125
+ ):
126
+ var_descriptions_dict = parse_payload(var_descriptions) if var_descriptions else {}
127
+ output_descriptions_dict = parse_payload(output_descriptions) if output_descriptions else {}
128
+ event = UpdateExamples(
129
+ examples_base_dir=examples_base_dir,
130
+ var_descriptions=var_descriptions_dict, # type: ignore
131
+ output_descriptions=output_descriptions_dict, # type: ignore
132
+ )
133
+ output = update_examples(event)
134
+ if not skip_log_changes:
135
+ for change in output.changes:
136
+ if change.changed:
137
+ logger.info(f"{change.path}({change.block_type}) {change.name}: {change.before} -> {change.after}")
138
+ if not skip_log_existing:
139
+ existing_var_yaml = dump(output.before_var_descriptions, "yaml")
140
+ logger.info(f"Existing Variables:\n{existing_var_yaml}")
141
+ existing_output_yaml = dump(output.before_output_descriptions, "yaml")
142
+ logger.info(f"Existing Outputs:\n{existing_output_yaml}")
@@ -0,0 +1,23 @@
1
+ variable "cluster_name" {
2
+ description = "description of cluster name"
3
+ type = string
4
+ }
5
+ variable "replication_specs" {
6
+ description = "Updated description"
7
+ default = []
8
+ type = list(object({
9
+ num_shards = number
10
+ zone_name = string
11
+ regions_config = set(object({
12
+ region_name = string
13
+ electable_nodes = number
14
+ priority = number
15
+ read_only_nodes = optional(number, 0)
16
+ }))
17
+ }))
18
+ }
19
+
20
+ variable "provider_name" {
21
+ type = string
22
+ default = "" # optional in v3
23
+ }