atlas-init 0.1.1__py3-none-any.whl → 0.1.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (73) hide show
  1. atlas_init/__init__.py +3 -3
  2. atlas_init/atlas_init.yaml +18 -1
  3. atlas_init/cli.py +62 -70
  4. atlas_init/cli_cfn/app.py +40 -117
  5. atlas_init/cli_cfn/{cfn.py → aws.py} +129 -14
  6. atlas_init/cli_cfn/cfn_parameter_finder.py +89 -6
  7. atlas_init/cli_cfn/example.py +203 -0
  8. atlas_init/cli_cfn/files.py +63 -0
  9. atlas_init/cli_helper/run.py +18 -2
  10. atlas_init/cli_helper/tf_runner.py +4 -6
  11. atlas_init/cli_root/__init__.py +0 -0
  12. atlas_init/cli_root/trigger.py +153 -0
  13. atlas_init/cli_tf/app.py +211 -4
  14. atlas_init/cli_tf/changelog.py +103 -0
  15. atlas_init/cli_tf/debug_logs.py +221 -0
  16. atlas_init/cli_tf/debug_logs_test_data.py +253 -0
  17. atlas_init/cli_tf/github_logs.py +229 -0
  18. atlas_init/cli_tf/go_test_run.py +194 -0
  19. atlas_init/cli_tf/go_test_run_format.py +31 -0
  20. atlas_init/cli_tf/go_test_summary.py +144 -0
  21. atlas_init/cli_tf/hcl/__init__.py +0 -0
  22. atlas_init/cli_tf/hcl/cli.py +161 -0
  23. atlas_init/cli_tf/hcl/cluster_mig.py +348 -0
  24. atlas_init/cli_tf/hcl/parser.py +140 -0
  25. atlas_init/cli_tf/schema.py +222 -18
  26. atlas_init/cli_tf/schema_go_parser.py +236 -0
  27. atlas_init/cli_tf/schema_table.py +150 -0
  28. atlas_init/cli_tf/schema_table_models.py +155 -0
  29. atlas_init/cli_tf/schema_v2.py +599 -0
  30. atlas_init/cli_tf/schema_v2_api_parsing.py +298 -0
  31. atlas_init/cli_tf/schema_v2_sdk.py +361 -0
  32. atlas_init/cli_tf/schema_v3.py +222 -0
  33. atlas_init/cli_tf/schema_v3_sdk.py +279 -0
  34. atlas_init/cli_tf/schema_v3_sdk_base.py +68 -0
  35. atlas_init/cli_tf/schema_v3_sdk_create.py +216 -0
  36. atlas_init/humps.py +253 -0
  37. atlas_init/repos/cfn.py +6 -1
  38. atlas_init/repos/path.py +3 -3
  39. atlas_init/settings/config.py +14 -4
  40. atlas_init/settings/env_vars.py +16 -1
  41. atlas_init/settings/path.py +12 -1
  42. atlas_init/settings/rich_utils.py +2 -0
  43. atlas_init/terraform.yaml +77 -1
  44. atlas_init/tf/.terraform.lock.hcl +59 -83
  45. atlas_init/tf/always.tf +7 -0
  46. atlas_init/tf/main.tf +3 -0
  47. atlas_init/tf/modules/aws_s3/provider.tf +1 -1
  48. atlas_init/tf/modules/aws_vars/aws_vars.tf +2 -0
  49. atlas_init/tf/modules/aws_vpc/provider.tf +4 -1
  50. atlas_init/tf/modules/cfn/cfn.tf +47 -33
  51. atlas_init/tf/modules/cfn/kms.tf +54 -0
  52. atlas_init/tf/modules/cfn/resource_actions.yaml +1 -0
  53. atlas_init/tf/modules/cfn/variables.tf +31 -0
  54. atlas_init/tf/modules/cloud_provider/cloud_provider.tf +1 -0
  55. atlas_init/tf/modules/cloud_provider/provider.tf +1 -1
  56. atlas_init/tf/modules/cluster/cluster.tf +34 -24
  57. atlas_init/tf/modules/cluster/provider.tf +1 -1
  58. atlas_init/tf/modules/federated_vars/federated_vars.tf +3 -0
  59. atlas_init/tf/modules/federated_vars/provider.tf +1 -1
  60. atlas_init/tf/modules/project_extra/project_extra.tf +15 -1
  61. atlas_init/tf/modules/stream_instance/stream_instance.tf +1 -1
  62. atlas_init/tf/modules/vpc_peering/vpc_peering.tf +1 -1
  63. atlas_init/tf/modules/vpc_privatelink/versions.tf +1 -1
  64. atlas_init/tf/outputs.tf +11 -3
  65. atlas_init/tf/providers.tf +2 -1
  66. atlas_init/tf/variables.tf +12 -0
  67. atlas_init/typer_app.py +76 -0
  68. {atlas_init-0.1.1.dist-info → atlas_init-0.1.4.dist-info}/METADATA +36 -18
  69. atlas_init-0.1.4.dist-info/RECORD +91 -0
  70. {atlas_init-0.1.1.dist-info → atlas_init-0.1.4.dist-info}/WHEEL +1 -1
  71. atlas_init-0.1.1.dist-info/RECORD +0 -62
  72. /atlas_init/tf/modules/aws_vpc/{aws-vpc.tf → aws_vpc.tf} +0 -0
  73. {atlas_init-0.1.1.dist-info → atlas_init-0.1.4.dist-info}/entry_points.txt +0 -0
@@ -0,0 +1,194 @@
1
+ from __future__ import annotations
2
+
3
+ import logging
4
+ import re
5
+ from collections.abc import Iterable
6
+ from enum import StrEnum
7
+ from functools import total_ordering
8
+
9
+ import humanize
10
+ from github.WorkflowJob import WorkflowJob
11
+ from model_lib import Entity, Event, utc_datetime
12
+ from pydantic import Field
13
+
14
+ logger = logging.getLogger(__name__)
15
+
16
+
17
+ class GoTestStatus(StrEnum):
18
+ RUN = "RUN"
19
+ PASS = "PASS" # noqa: S105
20
+ FAIL = "FAIL"
21
+ SKIP = "SKIP"
22
+
23
+
24
+ class Classification(StrEnum):
25
+ OUT_OF_CAPACITY = "OUT_OF_CAPACITY"
26
+ # DANGLING_RESOURCES = "DANGLING_RESOURCES"
27
+ # PERFORMANCE_REGRESSION = "PERFORMANCE_REGRESSION"
28
+ FIRST_TIME_ERROR = "FIRST_TIME_ERROR"
29
+ LEGIT_ERROR = "LEGIT_ERROR"
30
+ PANIC = "PANIC"
31
+
32
+
33
+ class LineInfo(Event):
34
+ number: int
35
+ text: str
36
+
37
+
38
+ @total_ordering
39
+ class GoTestRun(Entity):
40
+ name: str
41
+ status: GoTestStatus = GoTestStatus.RUN
42
+ start_line: LineInfo
43
+ ts: utc_datetime
44
+ finish_ts: utc_datetime | None = None
45
+ job: WorkflowJob
46
+ test_step: int
47
+
48
+ finish_line: LineInfo | None = None
49
+ context_lines: list[str] = Field(default_factory=list)
50
+ run_seconds: float | None = None
51
+
52
+ classifications: set[Classification] = Field(default_factory=set)
53
+
54
+ def finish_summary(self) -> str:
55
+ finish_line = self.finish_line
56
+ lines = [
57
+ self.start_line.text if finish_line is None else finish_line.text,
58
+ self.url,
59
+ ]
60
+ return "\n".join(lines + self.context_lines)
61
+
62
+ def __lt__(self, other) -> bool:
63
+ if not isinstance(other, GoTestRun):
64
+ raise TypeError
65
+ return (self.ts, self.name) < (other.ts, other.name)
66
+
67
+ @property
68
+ def when(self) -> str:
69
+ return humanize.naturaltime(self.ts)
70
+
71
+ @property
72
+ def runtime_human(self) -> str:
73
+ if seconds := self.run_seconds:
74
+ return humanize.naturaldelta(seconds)
75
+ return "unknown"
76
+
77
+ @property
78
+ def context_lines_str(self) -> str:
79
+ return "\n".join(self.context_lines)
80
+
81
+ @property
82
+ def url(self) -> str:
83
+ line = self.finish_line or self.start_line
84
+ return f"{self.job.html_url}#step:{self.test_step}:{line.number}"
85
+
86
+ @property
87
+ def is_failure(self) -> bool:
88
+ return self.status == GoTestStatus.FAIL
89
+
90
+ def add_line_match(self, match: LineMatch, line: str, line_number: int) -> None:
91
+ self.run_seconds = match.run_seconds or self.run_seconds
92
+ self.finish_line = LineInfo(number=line_number, text=line)
93
+ self.status = match.status
94
+ self.finish_ts = match.ts
95
+
96
+ @classmethod
97
+ def from_line_match(
98
+ cls,
99
+ match: LineMatch,
100
+ line: str,
101
+ line_number: int,
102
+ job: WorkflowJob,
103
+ test_step_nr: int,
104
+ ) -> GoTestRun:
105
+ start_line = LineInfo(number=line_number, text=line)
106
+ return cls(
107
+ name=match.name,
108
+ status=match.status,
109
+ ts=match.ts,
110
+ run_seconds=match.run_seconds,
111
+ start_line=start_line,
112
+ job=job,
113
+ test_step=test_step_nr,
114
+ )
115
+
116
+
117
+ class LineMatch(Event):
118
+ ts: utc_datetime
119
+ status: GoTestStatus
120
+ name: str
121
+ run_seconds: float | None = None
122
+
123
+
124
+ _status_options = "|".join(list(GoTestStatus))
125
+ line_result = re.compile(
126
+ r"(?P<ts>\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d+Z?)\s[-=]+\s"
127
+ + r"(?P<status>%s):?\s+" % _status_options # noqa: UP031
128
+ + r"(?P<name>[\w_]+)"
129
+ + r"\s*\(?(?P<run_seconds>[\d\.]+)?s?\)?"
130
+ )
131
+
132
+
133
+ def _test_name_is_nested(name: str, line: str) -> bool:
134
+ return f"{name}/" in line
135
+
136
+
137
+ def match_line(line: str) -> LineMatch | None:
138
+ """
139
+ 2024-06-26T04:41:47.7209465Z === RUN TestAccNetworkDSPrivateLinkEndpoint_basic
140
+ 2024-06-26T04:41:47.7228652Z --- PASS: TestAccNetworkRSPrivateLinkEndpointGCP_basic (424.50s)
141
+ """
142
+ if match := line_result.match(line):
143
+ line_match = LineMatch(**match.groupdict())
144
+ return None if _test_name_is_nested(line_match.name, line) else line_match
145
+ return None
146
+
147
+
148
+ context_start_pattern = re.compile(
149
+ r"(?P<ts>\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d+Z?)\s[-=]+\s" r"NAME\s+" r"(?P<name>[\w_]+)"
150
+ )
151
+
152
+
153
+ def context_start_match(line: str) -> str:
154
+ if match := context_start_pattern.match(line):
155
+ return match.groupdict()["name"]
156
+ return ""
157
+
158
+
159
+ context_line_pattern = re.compile(
160
+ r"(?P<ts>\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d+Z?)" r"\s{5}" r"(?P<indent>\s*)" r"(?P<relevant_line>.*)"
161
+ )
162
+
163
+
164
+ def extract_context(line: str) -> str:
165
+ if match := context_line_pattern.match(line):
166
+ match_vars = match.groupdict()
167
+ return match_vars["indent"] + match_vars["relevant_line"].strip()
168
+ return ""
169
+
170
+
171
+ def parse(test_lines: list[str], job: WorkflowJob, test_step_nr: int) -> Iterable[GoTestRun]:
172
+ tests: dict[str, GoTestRun] = {}
173
+ context_lines: list[str] = []
174
+ current_context_test = ""
175
+ for line_nr, line in enumerate(test_lines, start=0): # possibly an extra line in the log files we download
176
+ if current_context_test:
177
+ if more_context := extract_context(line):
178
+ context_lines.append(more_context)
179
+ continue
180
+ else:
181
+ tests[current_context_test].context_lines.extend(context_lines)
182
+ context_lines.clear()
183
+ current_context_test = ""
184
+ if new_context_test := context_start_match(line):
185
+ current_context_test = new_context_test
186
+ continue
187
+ if line_match := match_line(line):
188
+ if existing := tests.pop(line_match.name, None):
189
+ existing.add_line_match(line_match, line, line_nr)
190
+ yield existing
191
+ else:
192
+ tests[line_match.name] = GoTestRun.from_line_match(line_match, line, line_nr, job, test_step_nr)
193
+ if tests:
194
+ logger.warning(f"unfinished tests: {sorted(tests.keys())}")
@@ -0,0 +1,31 @@
1
+ from collections import Counter
2
+
3
+ from github.WorkflowJob import WorkflowJob
4
+ from zero_3rdparty import datetime_utils
5
+
6
+ from atlas_init.cli_tf.go_test_run import GoTestRun, GoTestStatus
7
+
8
+
9
+ def format_job(job: WorkflowJob) -> str:
10
+ date = datetime_utils.date_filename(job.created_at)
11
+ exec_time = "0s"
12
+ if complete_ts := job.completed_at:
13
+ exec_time = f"{(complete_ts - job.created_at).total_seconds()}s"
14
+ return f"{date}_{job.workflow_name}_attempt{job.run_attempt}_ ({exec_time})"
15
+
16
+
17
+ def job_summary(runs: list[GoTestRun]) -> tuple[WorkflowJob, str]:
18
+ status_counts: dict[GoTestStatus, int] = Counter()
19
+ for run in runs:
20
+ status_counts[run.status] += 1
21
+ line = [f"{key}={status_counts[key]}" for key in sorted(status_counts.keys())]
22
+ job = runs[0].job
23
+ return job, f"{format_job(job)}:" + ",".join(line)
24
+
25
+
26
+ def fail_test_summary(runs: list[GoTestRun]) -> str:
27
+ failed_runs = [r for r in runs if r.is_failure]
28
+ failed_details: list[str] = [run.finish_summary() for run in failed_runs]
29
+ failed_names = [f"- {run.name}" for run in failed_runs]
30
+ delimiter = "\n" + "-" * 40 + "\n"
31
+ return "\n".join(failed_details) + delimiter + "\n".join(failed_names)
@@ -0,0 +1,144 @@
1
+ import logging
2
+ from datetime import date, datetime, timedelta
3
+ from functools import total_ordering
4
+
5
+ from model_lib import Entity
6
+ from pydantic import Field, model_validator
7
+ from zero_3rdparty import datetime_utils, file_utils
8
+
9
+ from atlas_init.cli_tf.github_logs import summary_dir
10
+ from atlas_init.cli_tf.go_test_run import GoTestRun, GoTestStatus
11
+
12
+ logger = logging.getLogger(__name__)
13
+ _COMPLETE_STATUSES = {GoTestStatus.PASS, GoTestStatus.FAIL}
14
+
15
+
16
+ @total_ordering
17
+ class GoTestSummary(Entity):
18
+ name: str
19
+ results: list[GoTestRun] = Field(default_factory=list)
20
+
21
+ @model_validator(mode="after")
22
+ def sort_results(self):
23
+ self.results.sort()
24
+ return self
25
+
26
+ @property
27
+ def total_completed(self) -> int:
28
+ return sum((r.status in _COMPLETE_STATUSES for r in self.results), 0)
29
+
30
+ @property
31
+ def success_rate(self) -> float:
32
+ total = self.total_completed
33
+ if total == 0:
34
+ logger.warning(f"No results to calculate success rate for {self.name}")
35
+ return 0
36
+ return sum(r.status == "PASS" for r in self.results) / total
37
+
38
+ @property
39
+ def is_skipped(self) -> bool:
40
+ return all(r.status == GoTestStatus.SKIP for r in self.results)
41
+
42
+ @property
43
+ def success_rate_human(self) -> str:
44
+ return f"{self.success_rate:.2%}"
45
+
46
+ def last_pass_human(self) -> str:
47
+ return next(
48
+ (f"Passed {test.when}" for test in reversed(self.results) if test.status == GoTestStatus.PASS),
49
+ "never passed",
50
+ )
51
+
52
+ def __lt__(self, other) -> bool:
53
+ if not isinstance(other, GoTestSummary):
54
+ raise TypeError
55
+ return (self.success_rate, self.name) < (other.success_rate, other.name)
56
+
57
+ def select_tests(self, date: date) -> list[GoTestRun]:
58
+ return [r for r in self.results if r.ts.date() == date]
59
+
60
+
61
+ def summary_str(summary: GoTestSummary, start_date: datetime, end_date: datetime) -> str:
62
+ return "\n".join(
63
+ [
64
+ f"## {summary.name}",
65
+ f"Success rate: {summary.success_rate_human}",
66
+ "",
67
+ "### Timeline",
68
+ *timeline_lines(summary, start_date, end_date),
69
+ "",
70
+ *failure_details(summary),
71
+ ]
72
+ )
73
+
74
+
75
+ def timeline_lines(summary: GoTestSummary, start_date: datetime, end_date: datetime) -> list[str]:
76
+ lines = []
77
+ one_day = timedelta(days=1)
78
+ for active_date in datetime_utils.day_range(start_date.date(), (end_date + one_day).date(), one_day):
79
+ active_tests = summary.select_tests(active_date)
80
+ if not active_tests:
81
+ lines.append(f"{active_date:%Y-%m-%d}: MISSING")
82
+ continue
83
+
84
+ tests_str = ", ".join(format_test_oneline(t) for t in active_tests)
85
+ lines.append(f"{active_date:%Y-%m-%d}: {tests_str}")
86
+ return lines
87
+
88
+
89
+ def failure_details(summary: GoTestSummary) -> list[str]:
90
+ lines = ["## Failures"]
91
+ for test in summary.results:
92
+ if test.status == GoTestStatus.FAIL:
93
+ lines.extend(
94
+ (
95
+ f"### {test.when} {format_test_oneline(test)}",
96
+ test.finish_summary(),
97
+ "",
98
+ )
99
+ )
100
+ return lines
101
+
102
+
103
+ def format_test_oneline(test: GoTestRun) -> str:
104
+ return f"[{test.status} {test.runtime_human}]({test.url})"
105
+
106
+
107
+ def create_detailed_summary(
108
+ summary_name: str,
109
+ end_test_date: datetime,
110
+ start_test_date: datetime,
111
+ test_results: dict[str, list[GoTestRun]],
112
+ expected_names: set[str] | None = None,
113
+ ) -> list[str]:
114
+ summary_dir_path = summary_dir(summary_name)
115
+ if summary_dir_path.exists():
116
+ file_utils.clean_dir(summary_dir_path)
117
+ summaries = [GoTestSummary(name=name, results=runs) for name, runs in test_results.items()]
118
+ top_level_summary = ["# SUMMARY OF ALL TESTS name (success rate)"]
119
+ summaries = [summary for summary in summaries if summary.results and not summary.is_skipped]
120
+ if expected_names and (skipped_names := expected_names - {summary.name for summary in summaries}):
121
+ logger.warning(f"skipped test names: {'\n'.join(skipped_names)}")
122
+ top_level_summary.append(f"Skipped tests: {', '.join(skipped_names)}")
123
+ for summary in sorted(summaries):
124
+ test_summary_path = summary_dir_path / f"{summary.success_rate_human}_{summary.name}.md"
125
+ test_summary_md = summary_str(summary, start_test_date, end_test_date)
126
+ file_utils.ensure_parents_write_text(test_summary_path, test_summary_md)
127
+ top_level_summary.append(f"- {summary.name} ({summary.success_rate_human}) ({summary.last_pass_human()})")
128
+ return top_level_summary
129
+
130
+
131
+ def create_short_summary(test_results: dict[str, list[GoTestRun]], failing_names: list[str]) -> list[str]:
132
+ summary = ["# SUMMARY OF FAILING TESTS"]
133
+ summary_fail_details: list[str] = ["# FAIL DETAILS"]
134
+
135
+ for fail_name in failing_names:
136
+ fail_tests = test_results[fail_name]
137
+ summary.append(f"- {fail_name} has {len(fail_tests)} failures:")
138
+ summary.extend(
139
+ f" - [{fail_run.when} failed in {fail_run.runtime_human}]({fail_run.url})" for fail_run in fail_tests
140
+ )
141
+ summary_fail_details.append(f"\n\n ## {fail_name} details:")
142
+ summary_fail_details.extend(f"```\n{fail_run.finish_summary()}\n```" for fail_run in fail_tests)
143
+ logger.info("\n".join(summary_fail_details))
144
+ return summary
File without changes
@@ -0,0 +1,161 @@
1
+ from __future__ import annotations
2
+
3
+ import json
4
+ import logging
5
+ import os
6
+ import subprocess
7
+ import sys
8
+ from collections.abc import Iterable
9
+ from pathlib import Path
10
+ from tempfile import TemporaryDirectory
11
+
12
+ from atlas_init.cli_tf.hcl.cluster_mig import (
13
+ LEGACY_CLUSTER_TYPE,
14
+ NEW_CLUSTER_TYPE,
15
+ convert_clusters,
16
+ )
17
+ from atlas_init.cli_tf.hcl.parser import (
18
+ ResourceBlock,
19
+ )
20
+
21
+ logger = logging.getLogger(__name__)
22
+
23
+
24
+ def should_continue(is_interactive: bool, question: str):
25
+ for h in logger.handlers:
26
+ h.flush()
27
+ return input(f"{question} [y/N]") == "y" if is_interactive else True
28
+
29
+
30
+ def tf(cmd: str, tf_dir: Path, err_msg: str, err_msg_code_2: str = "") -> str:
31
+ err_codes = {
32
+ 1: err_msg,
33
+ 2: err_msg_code_2,
34
+ }
35
+ with TemporaryDirectory() as temp_dir:
36
+ result_file = Path(temp_dir) / "file"
37
+ with open(result_file, "w") as file:
38
+ exit_code = subprocess.call(
39
+ f"terraform {cmd}".split(),
40
+ stdin=sys.stdin,
41
+ stderr=sys.stderr,
42
+ stdout=file,
43
+ cwd=tf_dir,
44
+ )
45
+ cmd_output = result_file.read_text().strip()
46
+ if exit_code == 0:
47
+ return cmd_output
48
+ err_msg = err_codes.get(exit_code, err_msg) or err_msg
49
+ logger.error(cmd_output)
50
+ logger.error(err_msg)
51
+ sys.exit(exit_code)
52
+
53
+
54
+ def convert_and_validate(tf_dir: Path, *, is_interactive: bool = False):
55
+ out_path = tf_dir / "conversion_cluster_adv_cluster.tf"
56
+ if out_path.exists() and should_continue(is_interactive, f"{out_path} already exists, should it be overwritten?"):
57
+ logger.info(f"removing existing conversion file @ {out_path}")
58
+ out_path.unlink()
59
+ if adv_clusters := read_import_id_addresses(tf_dir, NEW_CLUSTER_TYPE):
60
+ existing_addresses = ", ".join(adv_clusters.values())
61
+ if should_continue(
62
+ is_interactive,
63
+ f"found existing advanced clusters: {existing_addresses}, should they be removed?",
64
+ ):
65
+ remove_from_state(tf_dir, adv_clusters.values())
66
+ ensure_no_plan_changes(tf_dir)
67
+ new_clusters_path = convert_clusters(tf_dir, out_path)
68
+ logger.info(
69
+ f"found a total of {len(new_clusters_path)} clusters to convert and generated their config to {out_path}"
70
+ )
71
+ if should_continue(is_interactive, f"should import the new clusters in {out_path}?"):
72
+ import_new_clusters(tf_dir)
73
+ ensure_no_plan_changes(tf_dir)
74
+ else:
75
+ logger.info("skipping import")
76
+ if should_continue(is_interactive, "should replace the old cluster resources with the new ones?"):
77
+ replace_old_clusters(tf_dir, out_path, new_clusters_path)
78
+ ensure_no_plan_changes(tf_dir)
79
+ logger.info(f"migration successful, migrated {len(new_clusters_path)} clusters!")
80
+ else:
81
+ logger.info("skipping replacment")
82
+
83
+
84
+ def remove_from_state(tf_dir, addresses: Iterable[str]) -> None:
85
+ for address in addresses:
86
+ logger.info(f"removing {address} from state")
87
+ tf(f"state rm {address}", tf_dir, f"failed to remove {address}")
88
+
89
+
90
+ def ensure_no_plan_changes(tf_dir):
91
+ logger.info("running plan to ensure there are no changes")
92
+ tf(
93
+ "plan -detailed-exitcode",
94
+ tf_dir,
95
+ "error running terraform plan",
96
+ "plan had changes",
97
+ )
98
+
99
+
100
+ def import_new_clusters(tf_dir: Path) -> None:
101
+ cluster_import_ids = read_import_id_addresses(tf_dir)
102
+ for import_id, resource_address in cluster_import_ids.items():
103
+ new_resource_address = resource_address.replace(LEGACY_CLUSTER_TYPE, NEW_CLUSTER_TYPE)
104
+ logger.info(f"importing {import_id} to {new_resource_address}")
105
+ tf(
106
+ f"import {new_resource_address} {import_id}",
107
+ tf_dir,
108
+ f"failed to import {new_resource_address}",
109
+ )
110
+
111
+
112
+ def read_import_id_addresses(tf_dir: Path, resource_type: str = "") -> dict[str, str]:
113
+ current_state = tf("show -json", tf_dir, "failed to read terraform state")
114
+ return read_cluster_import_ids(current_state, resource_type)
115
+
116
+
117
+ def replace_old_clusters(
118
+ tf_dir: Path,
119
+ out_path: Path,
120
+ new_clusters_path: dict[tuple[Path, ResourceBlock], str],
121
+ ) -> None:
122
+ out_path.unlink()
123
+ for (path, block), new_config in new_clusters_path.items():
124
+ old_resource_id = block.resource_id
125
+ logger.info(f"replacing {old_resource_id} @ {path}")
126
+ old_text = path.read_text()
127
+ new_text = old_text.replace(block.hcl, new_config)
128
+ path.write_text(new_text)
129
+ remove_from_state(tf_dir, read_import_id_addresses(tf_dir).values())
130
+
131
+
132
+ def read_cluster_import_ids(state: str, resource_type: str = "") -> dict[str, str]:
133
+ resource_type = resource_type or LEGACY_CLUSTER_TYPE
134
+ try:
135
+ json_state = json.loads(state)
136
+ except json.JSONDecodeError:
137
+ logger.exception("unable to decode state")
138
+ sys.exit(1)
139
+ resources = json_state["values"]["root_module"]["resources"]
140
+ assert isinstance(resources, list)
141
+ import_ids = {}
142
+ for resource in resources:
143
+ if resource["type"] == resource_type:
144
+ project_id = resource["values"]["project_id"]
145
+ name = resource["values"]["name"]
146
+ import_id = f"{project_id}-{name}"
147
+ import_ids[import_id] = resource["address"]
148
+ return import_ids
149
+
150
+
151
+ if __name__ == "__main__":
152
+ logging.basicConfig(level=logging.INFO)
153
+ *_, tf_dir_str = sys.argv
154
+ tf_dir_path = Path(tf_dir_str)
155
+ assert tf_dir_path.is_dir(), f"not a directory: {tf_dir_path}"
156
+ fast_forward = os.environ.get("FAST_FORWARD", "false").lower() in {
157
+ "yes",
158
+ "true",
159
+ "1",
160
+ }
161
+ convert_and_validate(tf_dir_path, is_interactive=not fast_forward)