atlas-init 0.1.0__py3-none-any.whl → 0.1.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (74) hide show
  1. atlas_init/__init__.py +3 -3
  2. atlas_init/atlas_init.yaml +51 -34
  3. atlas_init/cli.py +76 -72
  4. atlas_init/cli_cfn/app.py +40 -117
  5. atlas_init/cli_cfn/{cfn.py → aws.py} +129 -14
  6. atlas_init/cli_cfn/cfn_parameter_finder.py +89 -6
  7. atlas_init/cli_cfn/example.py +203 -0
  8. atlas_init/cli_cfn/files.py +63 -0
  9. atlas_init/cli_helper/go.py +6 -3
  10. atlas_init/cli_helper/run.py +18 -2
  11. atlas_init/cli_helper/tf_runner.py +12 -21
  12. atlas_init/cli_root/__init__.py +0 -0
  13. atlas_init/cli_root/trigger.py +153 -0
  14. atlas_init/cli_tf/app.py +211 -4
  15. atlas_init/cli_tf/changelog.py +103 -0
  16. atlas_init/cli_tf/debug_logs.py +221 -0
  17. atlas_init/cli_tf/debug_logs_test_data.py +253 -0
  18. atlas_init/cli_tf/github_logs.py +229 -0
  19. atlas_init/cli_tf/go_test_run.py +194 -0
  20. atlas_init/cli_tf/go_test_run_format.py +31 -0
  21. atlas_init/cli_tf/go_test_summary.py +144 -0
  22. atlas_init/cli_tf/hcl/__init__.py +0 -0
  23. atlas_init/cli_tf/hcl/cli.py +161 -0
  24. atlas_init/cli_tf/hcl/cluster_mig.py +348 -0
  25. atlas_init/cli_tf/hcl/parser.py +140 -0
  26. atlas_init/cli_tf/schema.py +222 -18
  27. atlas_init/cli_tf/schema_go_parser.py +236 -0
  28. atlas_init/cli_tf/schema_table.py +150 -0
  29. atlas_init/cli_tf/schema_table_models.py +155 -0
  30. atlas_init/cli_tf/schema_v2.py +599 -0
  31. atlas_init/cli_tf/schema_v2_api_parsing.py +298 -0
  32. atlas_init/cli_tf/schema_v2_sdk.py +361 -0
  33. atlas_init/cli_tf/schema_v3.py +222 -0
  34. atlas_init/cli_tf/schema_v3_sdk.py +279 -0
  35. atlas_init/cli_tf/schema_v3_sdk_base.py +68 -0
  36. atlas_init/cli_tf/schema_v3_sdk_create.py +216 -0
  37. atlas_init/humps.py +253 -0
  38. atlas_init/repos/cfn.py +6 -1
  39. atlas_init/repos/path.py +3 -3
  40. atlas_init/settings/config.py +30 -11
  41. atlas_init/settings/env_vars.py +29 -3
  42. atlas_init/settings/path.py +12 -1
  43. atlas_init/settings/rich_utils.py +39 -2
  44. atlas_init/terraform.yaml +77 -1
  45. atlas_init/tf/.terraform.lock.hcl +125 -0
  46. atlas_init/tf/always.tf +11 -2
  47. atlas_init/tf/main.tf +3 -0
  48. atlas_init/tf/modules/aws_s3/provider.tf +1 -1
  49. atlas_init/tf/modules/aws_vars/aws_vars.tf +2 -0
  50. atlas_init/tf/modules/aws_vpc/provider.tf +4 -1
  51. atlas_init/tf/modules/cfn/cfn.tf +47 -33
  52. atlas_init/tf/modules/cfn/kms.tf +54 -0
  53. atlas_init/tf/modules/cfn/resource_actions.yaml +1 -0
  54. atlas_init/tf/modules/cfn/variables.tf +31 -0
  55. atlas_init/tf/modules/cloud_provider/cloud_provider.tf +1 -0
  56. atlas_init/tf/modules/cloud_provider/provider.tf +1 -1
  57. atlas_init/tf/modules/cluster/cluster.tf +34 -24
  58. atlas_init/tf/modules/cluster/provider.tf +1 -1
  59. atlas_init/tf/modules/federated_vars/federated_vars.tf +3 -0
  60. atlas_init/tf/modules/federated_vars/provider.tf +1 -1
  61. atlas_init/tf/modules/project_extra/project_extra.tf +15 -1
  62. atlas_init/tf/modules/stream_instance/stream_instance.tf +1 -1
  63. atlas_init/tf/modules/vpc_peering/vpc_peering.tf +1 -1
  64. atlas_init/tf/modules/vpc_privatelink/versions.tf +1 -1
  65. atlas_init/tf/outputs.tf +11 -3
  66. atlas_init/tf/providers.tf +2 -1
  67. atlas_init/tf/variables.tf +17 -0
  68. atlas_init/typer_app.py +76 -0
  69. {atlas_init-0.1.0.dist-info → atlas_init-0.1.4.dist-info}/METADATA +58 -21
  70. atlas_init-0.1.4.dist-info/RECORD +91 -0
  71. {atlas_init-0.1.0.dist-info → atlas_init-0.1.4.dist-info}/WHEEL +1 -1
  72. atlas_init-0.1.0.dist-info/RECORD +0 -61
  73. /atlas_init/tf/modules/aws_vpc/{aws-vpc.tf → aws_vpc.tf} +0 -0
  74. {atlas_init-0.1.0.dist-info → atlas_init-0.1.4.dist-info}/entry_points.txt +0 -0
@@ -0,0 +1,348 @@
1
+ from __future__ import annotations
2
+
3
+ import logging
4
+ import re
5
+ from dataclasses import asdict, dataclass
6
+ from pathlib import Path
7
+ from typing import Literal, Self
8
+
9
+ from atlas_init.cli_tf.hcl.parser import (
10
+ Block,
11
+ ResourceBlock,
12
+ hcl_attrs,
13
+ iter_blocks,
14
+ iter_resource_blocks,
15
+ )
16
+
17
+ logger = logging.getLogger(__name__)
18
+ INDENT = " "
19
+ LEGACY_CLUSTER_TYPE = "mongodbatlas_cluster"
20
+ NEW_CLUSTER_TYPE = "mongodbatlas_advanced_cluster"
21
+
22
+
23
+ def indent(level: int, line: str) -> str:
24
+ return INDENT * level + line
25
+
26
+
27
+ def convert_clusters(tf_dir: Path, out_path: Path | None = None) -> dict[tuple[Path, ResourceBlock], str]:
28
+ new_filepath = out_path or tf_dir / "conversion_cluster_adv_cluster.tf"
29
+ new_content: list[str] = []
30
+ all_blocks = {}
31
+ for path in tf_dir.glob("*.tf"):
32
+ legacy = path.read_text()
33
+ logger.info(f"checking for cluster blocks in {path}")
34
+ new_blocks = parse_and_convert_cluster_blocks(legacy)
35
+ if not new_blocks:
36
+ continue
37
+ all_blocks |= {(path, block): new_config for block, new_config in new_blocks.items()}
38
+ new_content.append(f"# file @ {path}")
39
+ for cluster, new_config in new_blocks.items():
40
+ logger.info(f"found {cluster} to migrate in {path}")
41
+ new_content.extend((f"# {cluster}", new_config))
42
+ assert new_content, "unable to find any cluster resources"
43
+ new_filepath.write_text("\n".join(new_content))
44
+ return all_blocks
45
+
46
+
47
+ def convert_cluster_config(hcl_config: str) -> str:
48
+ """
49
+ For the given HCL config, convert the `mongodbatlas_cluster` resource to `mongodbatlas_advanced_cluster`.
50
+
51
+ Strategy:
52
+ - support find all resource blocks, e.g. `resource "mongodbatlas_cluster" "project_cluster_free"`
53
+ - for each resource block
54
+ - rename the resource_type to `mongodbatlas_advanced_cluster`
55
+ - Iterate through all root attributes and copy them to the new resource.
56
+ - Iterate through all the block attributes
57
+ - If the block attribute is `replication_specs`:
58
+ - Iterate through all the attributes and copy them.
59
+ - Iterate through all the nested_blocks and copy them
60
+ - Otherwise, copy the block attribute as is.
61
+ for every copy, lookup:
62
+ - if the key has been renamed in the new resource.
63
+ Special attributes:
64
+ - disk_size_gb (only set on root in legacy but in electable_specs in new)
65
+ - provider_name (only set on root in legacy but in replication_specs in new)
66
+ - node_counts are specs in new
67
+ - auto_scaling_xx has moved to a block
68
+ """
69
+ converted_blocks = parse_and_convert_cluster_blocks(hcl_config)
70
+ logger.info(f"found {len(converted_blocks)} blocks to replace")
71
+ for block, new_block in converted_blocks.items():
72
+ hcl_config = hcl_config.replace(block.hcl, new_block)
73
+ return hcl_config
74
+
75
+
76
+ def parse_and_convert_cluster_blocks(hcl_config: str) -> dict[ResourceBlock, str]:
77
+ return {
78
+ block: convert_cluster_block(block)
79
+ for block in iter_resource_blocks(hcl_config)
80
+ if block.type == LEGACY_CLUSTER_TYPE
81
+ }
82
+
83
+
84
+ _removed_attributes_root = {
85
+ "provider_name",
86
+ "provider_instance_size_name",
87
+ "auto_scaling_disk_gb_enabled",
88
+ "auto_scaling_compute_enabled",
89
+ "provider_auto_scaling_compute_min_instance_size",
90
+ "provider_auto_scaling_compute_max_instance_size",
91
+ "auto_scaling_compute_scale_down_enabled",
92
+ "backing_provider_name",
93
+ "provider_disk_iops",
94
+ "provider_encrypt_ebs_volume",
95
+ "provider_volume_type",
96
+ "provider_region_name",
97
+ "replication_factor",
98
+ "num_shards",
99
+ }
100
+ _removed_attributes_region_config = {
101
+ "electable_nodes",
102
+ "read_only_nodes",
103
+ "analytics_nodes",
104
+ }
105
+
106
+ _renamed_attributes = {
107
+ "cloud_backup": "backup_enabled",
108
+ }
109
+
110
+
111
+ def attribute_migration(
112
+ block_name: Literal["root", "", "region_config"], key: str, value: str
113
+ ) -> tuple[str, str] | None:
114
+ if block_name == "root":
115
+ if key in _removed_attributes_root:
116
+ return None
117
+ key = _renamed_attributes.get(key, key)
118
+ return key, value
119
+ if block_name == "region_config":
120
+ if key in _removed_attributes_region_config:
121
+ return None
122
+ return key, value
123
+ return key, value
124
+
125
+
126
+ def write_attributes(
127
+ level: int,
128
+ attributes: dict[str, str],
129
+ block_name: Literal["root", "", "region_config"] = "",
130
+ ) -> list[str]:
131
+ lines = []
132
+ for key, value in attributes.items():
133
+ migrated_key_value = attribute_migration(block_name, key, value)
134
+ if not migrated_key_value:
135
+ continue
136
+ new_key, new_value = migrated_key_value
137
+ lines.append(f"{' ' * level}{new_key} = {new_value}")
138
+ return lines
139
+
140
+
141
+ @dataclass
142
+ class ClusterMigContext:
143
+ # root level
144
+ provider_name: str = ""
145
+ provider_instance_size_name: str = ""
146
+ auto_scaling_disk_gb_enabled: str = ""
147
+ auto_scaling_compute_enabled: str = ""
148
+ provider_auto_scaling_compute_min_instance_size: str = ""
149
+ provider_auto_scaling_compute_max_instance_size: str = ""
150
+ auto_scaling_compute_scale_down_enabled: str = ""
151
+ backing_provider_name: str = ""
152
+ provider_disk_iops: str = ""
153
+ provider_encrypt_ebs_volume: str = ""
154
+ provider_volume_type: str = ""
155
+ provider_region_name: str = ""
156
+
157
+ # region_config
158
+ electable_nodes: str = ""
159
+ read_only_nodes: str = ""
160
+ analytics_nodes: str = ""
161
+
162
+ def add_region_config(self, region_config: dict[str, str]) -> Self:
163
+ kwargs = asdict(self) | as_mig_context_kwargs(region_config)
164
+ return type(self)(**kwargs)
165
+
166
+ @property
167
+ def auto_scaling_lines(self) -> list[str]:
168
+ auto_scaling_block = {}
169
+ if self.auto_scaling_disk_gb_enabled:
170
+ auto_scaling_block["disk_gb_enabled"] = self.auto_scaling_disk_gb_enabled
171
+ if self.auto_scaling_compute_enabled:
172
+ auto_scaling_block["compute_enabled"] = self.auto_scaling_compute_enabled
173
+ if self.provider_auto_scaling_compute_min_instance_size:
174
+ auto_scaling_block["compute_min_instance_size"] = self.provider_auto_scaling_compute_min_instance_size
175
+ if self.provider_auto_scaling_compute_max_instance_size:
176
+ auto_scaling_block["compute_max_instance_size"] = self.provider_auto_scaling_compute_max_instance_size
177
+ if self.auto_scaling_compute_scale_down_enabled:
178
+ auto_scaling_block["compute_scale_down_enabled"] = self.auto_scaling_compute_scale_down_enabled
179
+ if not auto_scaling_block:
180
+ return []
181
+ return [
182
+ indent(3, "auto_scaling {"),
183
+ *write_attributes(4, auto_scaling_block),
184
+ indent(3, "}"),
185
+ ]
186
+
187
+ def hardware_spec(self, node_count: str) -> dict[str, str]:
188
+ hardware_spec = {}
189
+ if node_count:
190
+ hardware_spec["node_count"] = node_count
191
+ if self.provider_instance_size_name:
192
+ hardware_spec["instance_size"] = self.provider_instance_size_name
193
+ if self.provider_disk_iops:
194
+ hardware_spec["disk_iops"] = self.provider_disk_iops
195
+ if self.provider_volume_type:
196
+ hardware_spec["ebs_volume_type"] = self.provider_volume_type
197
+ elif "AWS" in self.provider_name:
198
+ hardware_spec["ebs_volume_type"] = '"STANDARD"'
199
+ return hardware_spec
200
+
201
+ @property
202
+ def electable_spec_lines(self) -> list[str]:
203
+ if not self.electable_nodes and not self.provider_instance_size_name:
204
+ return []
205
+
206
+ electable_block = self.hardware_spec(self.electable_nodes)
207
+ return [
208
+ indent(3, "electable_specs {"),
209
+ *write_attributes(4, electable_block),
210
+ indent(3, "}"),
211
+ ]
212
+
213
+ @property
214
+ def analytics_spec_lines(self) -> list[str]:
215
+ if not self.analytics_nodes:
216
+ return []
217
+ analytics_block = self.hardware_spec(self.analytics_nodes)
218
+ return [
219
+ indent(3, "analytics_specs {"),
220
+ *write_attributes(4, analytics_block),
221
+ indent(3, "}"),
222
+ ]
223
+
224
+ @property
225
+ def read_only_spec_lines(self) -> list[str]:
226
+ if not self.read_only_nodes:
227
+ return []
228
+ read_only_block = self.hardware_spec(self.read_only_nodes)
229
+ return [
230
+ indent(3, "read_only_specs {"),
231
+ *write_attributes(4, read_only_block),
232
+ indent(3, "}"),
233
+ ]
234
+
235
+ def region_config_lines(self, attributes: dict[str, str]) -> list[str]:
236
+ if self.provider_region_name:
237
+ attributes.setdefault("region_name", self.provider_region_name)
238
+ if self.provider_name:
239
+ attributes.setdefault("provider_name", self.provider_name)
240
+ if self.backing_provider_name:
241
+ attributes.setdefault("backing_provider_name", self.backing_provider_name)
242
+ return [
243
+ indent(2, "region_configs {"),
244
+ *write_attributes(3, attributes, "region_config"),
245
+ *self.auto_scaling_lines,
246
+ *self.electable_spec_lines,
247
+ *self.analytics_spec_lines,
248
+ *self.read_only_spec_lines,
249
+ indent(2, "}"),
250
+ ]
251
+
252
+
253
+ _mig_context_fields = set(asdict(ClusterMigContext()))
254
+
255
+
256
+ def as_mig_context_kwargs(attributes: dict[str, str]) -> dict[str, str]:
257
+ return {k: v for k, v in attributes.items() if k in _mig_context_fields}
258
+
259
+
260
+ _default_replication_spec_legacy = """\
261
+ replication_specs {
262
+ regions_config {
263
+ priority = 7
264
+ }
265
+ }"""
266
+
267
+
268
+ def default_replication_spec(line_start: int) -> Block:
269
+ hcl = _default_replication_spec_legacy
270
+ return Block(
271
+ name="replication_specs",
272
+ level=1,
273
+ line_start=line_start,
274
+ line_end=line_start + len(hcl.split("\n")),
275
+ hcl=hcl,
276
+ )
277
+
278
+
279
+ _dynamic_pattern = re.compile(r"dynamic\s+\"[^\"]+\"\s+{")
280
+
281
+
282
+ def convert_cluster_block(root_block: ResourceBlock) -> str:
283
+ if _dynamic_pattern.search(root_block.hcl):
284
+ err_msg = f"dynamic block found for {root_block}, currently dynamic blocks are not supported"
285
+ raise ValueError(err_msg)
286
+ root_blocks = list(iter_blocks(root_block))
287
+ attributes_root = hcl_attrs(root_block)
288
+ attributes_root.setdefault("cluster_type", '"REPLICASET"')
289
+ cluster_content = [
290
+ f'resource "{NEW_CLUSTER_TYPE}" "{root_block.name}" {{',
291
+ ]
292
+ cluster_content.extend(write_attributes(1, attributes_root, "root"))
293
+ mig_context = ClusterMigContext(**as_mig_context_kwargs(attributes_root))
294
+ replication_spec_blocks = get_replication_specs(root_block)
295
+ if not replication_spec_blocks:
296
+ line_start = len(root_block.content_lines())
297
+ root_blocks.append(default_replication_spec(line_start))
298
+ for block in root_blocks:
299
+ if block.name == "replication_specs":
300
+ cluster_content.extend(write_replication_spec(block, mig_context))
301
+ elif block.name == "lifecycle":
302
+ cluster_content.extend(write_lifecycle(block))
303
+ else:
304
+ cluster_content.append(block.hcl)
305
+ cluster_content.append("}")
306
+ return "\n".join(cluster_content)
307
+
308
+
309
+ def write_replication_spec(block: Block, mig_context: ClusterMigContext) -> list[str]:
310
+ nested_blocks = list(iter_blocks(block))
311
+ attributes = hcl_attrs(block)
312
+ lines = [
313
+ " replication_specs {",
314
+ *write_attributes(2, attributes),
315
+ ]
316
+ for block in nested_blocks:
317
+ if block.name == "regions_config":
318
+ lines.extend(write_region_config(block, mig_context))
319
+ else:
320
+ lines.append(block.hcl)
321
+ lines.append(" }")
322
+ return lines
323
+
324
+
325
+ def write_region_config(block: Block, mig_context: ClusterMigContext) -> list[str]:
326
+ attributes = hcl_attrs(block)
327
+ region_config_mig = mig_context.add_region_config(attributes)
328
+ return region_config_mig.region_config_lines(attributes)
329
+
330
+
331
+ def get_replication_specs(resource: Block) -> list[Block]:
332
+ return [block for block in iter_blocks(resource) if block.name == "replication_specs"]
333
+
334
+
335
+ def write_lifecycle(lifecycle: Block) -> list[str]:
336
+ attributes = hcl_attrs(lifecycle)
337
+ if ignore_changes := attributes.get("ignore_changes", ""):
338
+ for ignored_name in _removed_attributes_root:
339
+ regex = re.compile(f"{ignored_name},?")
340
+ ignore_changes = regex.sub("", ignore_changes)
341
+ attributes["ignore_changes"] = ignore_changes
342
+ blocks = list(iter_blocks(lifecycle))
343
+ return [
344
+ indent(1, "lifecycle {"),
345
+ *write_attributes(2, attributes),
346
+ *(block.hcl for block in blocks),
347
+ indent(1, "}"),
348
+ ]
@@ -0,0 +1,140 @@
1
+ import re
2
+ from collections import defaultdict
3
+ from collections.abc import Iterable
4
+ from dataclasses import dataclass
5
+
6
+
7
+ @dataclass(eq=True, frozen=False)
8
+ class Block:
9
+ name: str
10
+ line_start: int
11
+ level: int
12
+ hcl: str = ""
13
+ line_end: int = -1
14
+
15
+ def __post_init__(self):
16
+ self.name = self.name.strip()
17
+
18
+ def end_block(self, line_end: int, hcl: str):
19
+ self.line_end = line_end
20
+ self.hcl = hcl
21
+
22
+ @property
23
+ def _lines(self) -> list[str]:
24
+ return self.hcl.splitlines()
25
+
26
+ def content_lines(self) -> list[tuple[int, str]]:
27
+ return list(enumerate(self._lines[1:-1], start=1))
28
+
29
+ def read_lines(self, start: int, end: int) -> list[str]:
30
+ return self._lines[start : end + 1]
31
+
32
+ def __hash__(self) -> int:
33
+ return hash((self.name, self.line_start, self.line_end))
34
+
35
+
36
+ @dataclass
37
+ class ResourceBlock(Block):
38
+ type: str = ""
39
+
40
+ def __post_init__(self):
41
+ self.name = self.name.strip()
42
+ self.type = self.type.strip()
43
+
44
+ def __hash__(self) -> int:
45
+ return hash((self.name, self.type))
46
+
47
+ def __str__(self) -> str:
48
+ return f"{self.resource_id} @ L{self.line_start}-{self.line_end}"
49
+
50
+ @property
51
+ def resource_id(self) -> str:
52
+ return f"{self.type}.{self.name}"
53
+
54
+
55
+ _resource_pattern = re.compile(r"resource\s+\"(?P<type>[^\"]+)\"\s+\"(?P<name>[^\"]+)\"\s+\{")
56
+
57
+
58
+ def iter_resource_blocks(hcl_config: str) -> Iterable[ResourceBlock]:
59
+ # support line_nr indexing
60
+ lines = ["", *hcl_config.splitlines()]
61
+ current_block = None
62
+ for i, line in enumerate(lines):
63
+ if current_block is not None:
64
+ if line.rstrip() == "}":
65
+ current_block.end_block(i, "\n".join(lines[current_block.line_start : i + 1]))
66
+ yield current_block
67
+ current_block = None
68
+ continue
69
+ if match := _resource_pattern.match(line):
70
+ assert current_block is None, "Nested blocks resource blocks are not supported"
71
+ current_block = ResourceBlock(
72
+ name=match.group("name"),
73
+ type=match.group("type"),
74
+ line_start=i,
75
+ level=0,
76
+ )
77
+ if current_block is not None:
78
+ err_msg = "Final resource block not closed"
79
+ raise ValueError(err_msg)
80
+
81
+
82
+ _block_pattern = re.compile(r"(?P<name>[^\{]+)[\s=]+\{")
83
+
84
+
85
+ def iter_blocks(block: Block, level: int | None = None) -> Iterable[Block]:
86
+ level = level or block.level + 1
87
+ line_level_start_names: dict[int, tuple[int, str]] = {}
88
+ current_level = level
89
+ for line_nr, line in block.content_lines():
90
+ if match := _block_pattern.match(line):
91
+ line_level_start_names[current_level] = (line_nr, match.group("name"))
92
+ current_level += 1
93
+ if line.strip() == "}":
94
+ current_level -= 1
95
+ start_line_nr_name = line_level_start_names.pop(current_level, None)
96
+ if start_line_nr_name is None:
97
+ raise ValueError(f"Unbalanced block @ {line_nr} in {block.name}")
98
+ start_line_nr, name = start_line_nr_name
99
+ if level == current_level:
100
+ block_lines: list[str] = block.read_lines(start_line_nr, line_nr)
101
+ if "=" in block_lines[0]:
102
+ continue
103
+ yield Block(
104
+ name=name,
105
+ line_start=start_line_nr,
106
+ level=level,
107
+ line_end=line_nr,
108
+ hcl="\n".join(block_lines),
109
+ )
110
+ if line_level_start_names.get(level) is not None:
111
+ raise ValueError(f"Unfinished block @ {line_nr} in {block.name} at level {level}")
112
+
113
+
114
+ def hcl_attrs(block: Block) -> dict[str, str]:
115
+ nested_blocks = list(iter_blocks(block, level=block.level + 1))
116
+ block_lines = as_block_lines(nested_blocks)
117
+ return _hcl_attrs(block, block_lines)
118
+
119
+
120
+ def _hcl_attrs(block: Block, block_lines: set[int]) -> dict[str, str]:
121
+ attrs = defaultdict(list)
122
+ attr_name: str | None = None
123
+ for line_nr, line in block.content_lines():
124
+ if line_nr in block_lines:
125
+ continue
126
+ if "=" in line:
127
+ assert attr_name is None, f"unfinished attribute {attr_name}, new attribute at {line_nr}"
128
+ attr_name, attr_value = line.split("=", 1)
129
+ attrs[attr_name.strip()] = [attr_value.strip()]
130
+ if line.rstrip().endswith(("{", "[", ",")):
131
+ raise ValueError(f"unsupported nested attribute assignment on {line_nr} in block: {block.name}")
132
+ attr_name = None
133
+ return {k: "\n".join(v) for k, v in attrs.items()}
134
+
135
+
136
+ def as_block_lines(blocks: list[Block]) -> set[int]:
137
+ block_lines = set()
138
+ for block in blocks:
139
+ block_lines.update(set(range(block.line_start, block.line_end)))
140
+ return block_lines