atlas-init 0.4.4__py3-none-any.whl → 0.6.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (66) hide show
  1. atlas_init/__init__.py +1 -1
  2. atlas_init/cli.py +2 -0
  3. atlas_init/cli_cfn/app.py +3 -4
  4. atlas_init/cli_cfn/cfn_parameter_finder.py +61 -53
  5. atlas_init/cli_cfn/contract.py +4 -7
  6. atlas_init/cli_cfn/example.py +8 -18
  7. atlas_init/cli_helper/go.py +7 -11
  8. atlas_init/cli_root/mms_released.py +46 -0
  9. atlas_init/cli_root/trigger.py +6 -6
  10. atlas_init/cli_tf/app.py +3 -84
  11. atlas_init/cli_tf/ci_tests.py +493 -0
  12. atlas_init/cli_tf/codegen/__init__.py +0 -0
  13. atlas_init/cli_tf/codegen/models.py +97 -0
  14. atlas_init/cli_tf/codegen/openapi_minimal.py +74 -0
  15. atlas_init/cli_tf/github_logs.py +7 -94
  16. atlas_init/cli_tf/go_test_run.py +385 -132
  17. atlas_init/cli_tf/go_test_summary.py +331 -4
  18. atlas_init/cli_tf/go_test_tf_error.py +380 -0
  19. atlas_init/cli_tf/hcl/modifier.py +14 -12
  20. atlas_init/cli_tf/hcl/modifier2.py +87 -0
  21. atlas_init/cli_tf/mock_tf_log.py +1 -1
  22. atlas_init/cli_tf/{schema_v2_api_parsing.py → openapi.py} +95 -17
  23. atlas_init/cli_tf/schema_v2.py +43 -1
  24. atlas_init/crud/__init__.py +0 -0
  25. atlas_init/crud/mongo_client.py +115 -0
  26. atlas_init/crud/mongo_dao.py +296 -0
  27. atlas_init/crud/mongo_utils.py +239 -0
  28. atlas_init/repos/go_sdk.py +12 -3
  29. atlas_init/repos/path.py +110 -7
  30. atlas_init/settings/config.py +3 -6
  31. atlas_init/settings/env_vars.py +22 -31
  32. atlas_init/settings/interactive2.py +134 -0
  33. atlas_init/tf/.terraform.lock.hcl +59 -59
  34. atlas_init/tf/always.tf +5 -5
  35. atlas_init/tf/main.tf +3 -3
  36. atlas_init/tf/modules/aws_kms/aws_kms.tf +1 -1
  37. atlas_init/tf/modules/aws_s3/provider.tf +2 -1
  38. atlas_init/tf/modules/aws_vpc/provider.tf +2 -1
  39. atlas_init/tf/modules/cfn/cfn.tf +0 -8
  40. atlas_init/tf/modules/cfn/kms.tf +5 -5
  41. atlas_init/tf/modules/cfn/provider.tf +7 -0
  42. atlas_init/tf/modules/cfn/variables.tf +1 -1
  43. atlas_init/tf/modules/cloud_provider/cloud_provider.tf +1 -1
  44. atlas_init/tf/modules/cloud_provider/provider.tf +2 -1
  45. atlas_init/tf/modules/cluster/cluster.tf +31 -31
  46. atlas_init/tf/modules/cluster/provider.tf +2 -1
  47. atlas_init/tf/modules/encryption_at_rest/provider.tf +2 -1
  48. atlas_init/tf/modules/federated_vars/federated_vars.tf +1 -1
  49. atlas_init/tf/modules/federated_vars/provider.tf +2 -1
  50. atlas_init/tf/modules/project_extra/project_extra.tf +1 -10
  51. atlas_init/tf/modules/project_extra/provider.tf +8 -0
  52. atlas_init/tf/modules/stream_instance/provider.tf +8 -0
  53. atlas_init/tf/modules/stream_instance/stream_instance.tf +0 -9
  54. atlas_init/tf/modules/vpc_peering/provider.tf +10 -0
  55. atlas_init/tf/modules/vpc_peering/vpc_peering.tf +0 -10
  56. atlas_init/tf/modules/vpc_privatelink/versions.tf +2 -1
  57. atlas_init/tf/outputs.tf +1 -0
  58. atlas_init/tf/providers.tf +1 -1
  59. atlas_init/tf/variables.tf +7 -7
  60. atlas_init/typer_app.py +4 -8
  61. {atlas_init-0.4.4.dist-info → atlas_init-0.6.0.dist-info}/METADATA +7 -4
  62. atlas_init-0.6.0.dist-info/RECORD +121 -0
  63. atlas_init-0.4.4.dist-info/RECORD +0 -105
  64. {atlas_init-0.4.4.dist-info → atlas_init-0.6.0.dist-info}/WHEEL +0 -0
  65. {atlas_init-0.4.4.dist-info → atlas_init-0.6.0.dist-info}/entry_points.txt +0 -0
  66. {atlas_init-0.4.4.dist-info → atlas_init-0.6.0.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,493 @@
1
+ from __future__ import annotations
2
+
3
+ import asyncio
4
+ import logging
5
+ import os
6
+ import re
7
+ from concurrent.futures import Future, ThreadPoolExecutor, wait
8
+ from datetime import date, datetime, timedelta
9
+ from pathlib import Path
10
+
11
+ import typer
12
+ from ask_shell import confirm, select_list, print_to_live, new_task, run_and_wait
13
+ from model_lib import Entity, Event
14
+ from pydantic import Field, ValidationError, field_validator, model_validator
15
+ from pydantic_core import Url
16
+ from rich.markdown import Markdown
17
+ from zero_3rdparty import file_utils, str_utils
18
+ from zero_3rdparty.datetime_utils import utc_now
19
+
20
+ from atlas_init.cli_helper.run import add_to_clipboard, run_command_receive_result
21
+ from atlas_init.cli_tf.github_logs import (
22
+ GH_TOKEN_ENV_NAME,
23
+ download_job_safely,
24
+ is_test_job,
25
+ tf_repo,
26
+ )
27
+ from atlas_init.cli_tf.go_test_run import GoTestRun, GoTestStatus, parse_tests
28
+ from atlas_init.cli_tf.go_test_summary import DailyReportIn, TFCITestOutput, create_daily_report
29
+ from atlas_init.cli_tf.go_test_tf_error import (
30
+ DetailsInfo,
31
+ ErrorClassAuthor,
32
+ GoTestError,
33
+ GoTestErrorClass,
34
+ GoTestErrorClassification,
35
+ parse_error_details,
36
+ )
37
+ from atlas_init.cli_tf.mock_tf_log import resolve_admin_api_path
38
+ from atlas_init.crud.mongo_dao import (
39
+ TFResources,
40
+ init_mongo_dao,
41
+ read_tf_resources,
42
+ )
43
+ from atlas_init.repos.go_sdk import ApiSpecPaths, parse_api_spec_paths
44
+ from atlas_init.repos.path import Repo, current_repo_path
45
+ from atlas_init.settings.env_vars import AtlasInitSettings, init_settings
46
+
47
+ logger = logging.getLogger(__name__)
48
+
49
+
50
+ class TFCITestInput(Event):
51
+ settings: AtlasInitSettings = Field(default_factory=init_settings)
52
+ repo_path: Path = Field(default_factory=lambda: current_repo_path(Repo.TF))
53
+ test_group_name: str = ""
54
+ max_days_ago: int = 1
55
+ branch: str = "master"
56
+ workflow_file_stems: set[str] = Field(default_factory=lambda: set(_TEST_STEMS))
57
+ names: set[str] = Field(default_factory=set)
58
+ skip_log_download: bool = False
59
+ skip_error_parsing: bool = False
60
+ summary_name: str = ""
61
+ report_date: datetime = Field(default_factory=utc_now)
62
+
63
+ @field_validator("report_date", mode="before")
64
+ def support_today(cls, value: str | datetime) -> datetime | str:
65
+ return utc_now() if isinstance(value, str) and value == "today" else value
66
+
67
+ @model_validator(mode="after")
68
+ def set_workflow_file_stems(self) -> TFCITestInput:
69
+ if not self.workflow_file_stems:
70
+ self.workflow_file_stems = set(_TEST_STEMS)
71
+ return self
72
+
73
+
74
+ def ci_tests(
75
+ test_group_name: str = typer.Option("", "-g"),
76
+ max_days_ago: int = typer.Option(1, "-d", "--days"),
77
+ branch: str = typer.Option("master", "-b", "--branch"),
78
+ workflow_file_stems: str = typer.Option("test-suite,terraform-compatibility-matrix", "-w", "--workflow"),
79
+ names: str = typer.Option(
80
+ "",
81
+ "-n",
82
+ "--test-names",
83
+ help="comma separated list of test names to filter, e.g., TestAccCloudProviderAccessAuthorizationAzure_basic,TestAccBackupSnapshotExportBucket_basicAzure",
84
+ ),
85
+ summary_name: str = typer.Option(
86
+ "",
87
+ "-s",
88
+ "--summary",
89
+ help="the name of the summary directory to store detailed test results",
90
+ ),
91
+ summary_env_name: str = typer.Option("", "--env", help="filter summary based on tests/errors only in dev/qa"),
92
+ skip_log_download: bool = typer.Option(False, "-sld", "--skip-log-download", help="skip downloading logs"),
93
+ skip_error_parsing: bool = typer.Option(
94
+ False, "-sep", "--skip-error-parsing", help="skip parsing errors, usually together with --skip-log-download"
95
+ ),
96
+ copy_to_clipboard: bool = typer.Option(
97
+ False,
98
+ "--copy",
99
+ help="copy the summary to clipboard",
100
+ ),
101
+ report_date: str = typer.Option(
102
+ "today",
103
+ "-rd",
104
+ "--report-day",
105
+ help="the day to generate the report for, defaults to today, format=YYYY-MM-DD",
106
+ ),
107
+ ):
108
+ names_set: set[str] = set()
109
+ if names:
110
+ names_set.update(names.split(","))
111
+ logger.info(f"filtering tests by names: {names_set} (todo: support this)")
112
+ if test_group_name:
113
+ logger.warning(f"test_group_name is not supported yet: {test_group_name}")
114
+ if summary_name:
115
+ logger.warning(f"summary_name is not supported yet: {summary_name}")
116
+ event = TFCITestInput(
117
+ test_group_name=test_group_name,
118
+ max_days_ago=max_days_ago,
119
+ report_date=report_date, # type: ignore
120
+ branch=branch,
121
+ workflow_file_stems=set(workflow_file_stems.split(",")),
122
+ names=names_set,
123
+ summary_name=summary_name,
124
+ skip_log_download=skip_log_download,
125
+ skip_error_parsing=skip_error_parsing,
126
+ )
127
+ out = asyncio.run(ci_tests_pipeline(event))
128
+ settings = event.settings
129
+ manual_classification(out.classified_errors, settings)
130
+ daily_in = DailyReportIn(
131
+ report_date=event.report_date,
132
+ run_history_start=event.report_date - timedelta(days=event.max_days_ago),
133
+ run_history_end=event.report_date,
134
+ env_filter=[summary_env_name] if summary_env_name else [],
135
+ )
136
+ daily_out = create_daily_report(out, settings, daily_in)
137
+ print_to_live(Markdown(daily_out.summary_md))
138
+ if copy_to_clipboard:
139
+ add_to_clipboard(daily_out.summary_md, logger=logger)
140
+ if summary_name:
141
+ summary_path = settings.github_ci_summary_dir / str_utils.ensure_suffix(summary_name, ".md")
142
+ file_utils.ensure_parents_write_text(summary_path, daily_out.summary_md)
143
+ logger.info(f"summary written to {summary_path}")
144
+ if report_details_md := daily_out.details_md:
145
+ details_path = summary_path.with_name(f"{summary_path.stem}_details.md")
146
+ file_utils.ensure_parents_write_text(details_path, report_details_md)
147
+ logger.info(f"summary details written to {details_path}")
148
+ if confirm(f"do you want to open the summary file? {summary_path}", default=False):
149
+ run_command_receive_result(f'code "{summary_path}"', cwd=event.repo_path, logger=logger)
150
+
151
+
152
+ async def ci_tests_pipeline(event: TFCITestInput) -> TFCITestOutput:
153
+ repo_path = event.repo_path
154
+ branch = event.branch
155
+ settings = event.settings
156
+ download_input = DownloadJobLogsInput(
157
+ branch=branch,
158
+ max_days_ago=event.max_days_ago,
159
+ end_date=event.report_date,
160
+ workflow_file_stems=event.workflow_file_stems,
161
+ repo_path=repo_path,
162
+ )
163
+ dao = await init_mongo_dao(settings)
164
+ if event.skip_log_download:
165
+ logger.info("skipping log download, reading existing instead")
166
+ log_paths = []
167
+ else:
168
+ log_paths = download_logs(download_input, settings)
169
+ resources = read_tf_resources(settings, repo_path, branch)
170
+ with new_task(f"parse job logs from {len(log_paths)} files"):
171
+ parse_job_output = parse_job_tf_test_logs(
172
+ ParseJobLogsInput(
173
+ settings=settings,
174
+ log_paths=log_paths,
175
+ resources=resources,
176
+ branch=branch,
177
+ )
178
+ )
179
+ await dao.store_tf_test_runs(parse_job_output.test_runs)
180
+ report_date = event.report_date
181
+ with new_task(f"reading test runs from storage for {report_date.date().isoformat()}"):
182
+ report_tests = await dao.read_tf_tests_for_day(event.branch, report_date)
183
+ with new_task("parsing test errors"):
184
+ report_errors = parse_test_errors(report_tests)
185
+ with new_task("classifying errors"):
186
+ error_run_ids = [error.run_id for error in report_errors]
187
+ existing_classifications = await dao.read_error_classifications(error_run_ids)
188
+ classified_errors = classify_errors(existing_classifications, report_errors)
189
+ return TFCITestOutput(
190
+ log_paths=log_paths, found_tests=report_tests, classified_errors=classified_errors, found_errors=report_errors
191
+ )
192
+
193
+
194
+ def parse_test_errors(found_tests: list[GoTestRun]) -> list[GoTestError]:
195
+ admin_api_path = resolve_admin_api_path(sdk_branch="main")
196
+ spec_paths = ApiSpecPaths(method_paths=parse_api_spec_paths(admin_api_path))
197
+ error_tests = [test for test in found_tests if test.is_failure]
198
+ test_errors: list[GoTestError] = []
199
+ for test in error_tests:
200
+ test_error_input = ParseTestErrorInput(test=test, api_spec_paths=spec_paths)
201
+ test_errors.append(parse_test_error(test_error_input))
202
+ return test_errors
203
+
204
+
205
+ def classify_errors(
206
+ existing: dict[str, GoTestErrorClassification], errors: list[GoTestError]
207
+ ) -> list[GoTestErrorClassification]:
208
+ needs_classification: list[GoTestError] = []
209
+ classified_errors: list[GoTestErrorClassification] = []
210
+ for error in errors:
211
+ if prev_classification := existing.get(error.run_id):
212
+ logger.info(f"found existing classification{error.run_name}: {prev_classification}")
213
+ classified_errors.append(prev_classification)
214
+ continue
215
+ if auto_class := GoTestErrorClass.auto_classification(error.run.output_lines_str):
216
+ logger.info(f"auto class for {error.run_name}: {auto_class}")
217
+ classified_errors.append(
218
+ GoTestErrorClassification(
219
+ error_class=auto_class,
220
+ confidence=1.0,
221
+ details=error.details,
222
+ test_output=error.run.output_lines_str,
223
+ run_id=error.run_id,
224
+ author=ErrorClassAuthor.AUTO,
225
+ test_name=error.run_name,
226
+ )
227
+ )
228
+ else:
229
+ needs_classification.append(error)
230
+ return classified_errors + add_llm_classifications(needs_classification)
231
+
232
+
233
+ def manual_classification(
234
+ classifications: list[GoTestErrorClassification], settings: AtlasInitSettings, confidence_threshold: float = 1.0
235
+ ):
236
+ needs_classification = [cls for cls in classifications if cls.needs_classification(confidence_threshold)]
237
+ with new_task("Manual Classification", total=len(needs_classification) + 1, log_updates=True) as task:
238
+ asyncio.run(classify(needs_classification, settings, task))
239
+
240
+
241
+ async def classify(
242
+ needs_classification: list[GoTestErrorClassification], settings: AtlasInitSettings, task: new_task
243
+ ) -> None:
244
+ dao = await init_mongo_dao(settings)
245
+
246
+ async def add_classification(
247
+ cls: GoTestErrorClassification, new_class: GoTestErrorClass, new_author: ErrorClassAuthor, confidence: float
248
+ ):
249
+ cls.error_class = new_class
250
+ cls.author = new_author
251
+ cls.confidence = confidence
252
+ is_new = await dao.add_classification(cls)
253
+ if not is_new:
254
+ logger.debug("replaced existing class")
255
+
256
+ for cls in needs_classification:
257
+ task.update(advance=1)
258
+ similars = await dao.read_similar_error_classifications(cls.details, author_filter=ErrorClassAuthor.HUMAN)
259
+ if (existing := similars.get(cls.run_id)) and not existing.needs_classification():
260
+ logger.debug(f"found existing classification: {existing}")
261
+ continue
262
+ if similars and len({similar.error_class for similar in similars.values()}) == 1:
263
+ _, similar = similars.popitem()
264
+ if not similar.needs_classification(0.0):
265
+ logger.info(f"using similar classification: {similar}")
266
+ await add_classification(cls, similar.error_class, ErrorClassAuthor.SIMILAR, 1.0)
267
+ continue
268
+ test = await dao.read_tf_test_run(cls.run_id)
269
+ if new_class := ask_user_to_classify_error(cls, test):
270
+ await add_classification(cls, new_class, ErrorClassAuthor.HUMAN, 1.0)
271
+ elif confirm("do you want to stop classifying errors?", default=True):
272
+ logger.info("stopping classification")
273
+ return
274
+
275
+
276
+ def add_llm_classifications(needs_classification_errors: list[GoTestError]) -> list[GoTestErrorClassification]:
277
+ """Todo: Use LLM and support reading existing classifications, for example matching on the details"""
278
+ return [
279
+ GoTestErrorClassification(
280
+ ts=utc_now(),
281
+ error_class=GoTestErrorClass.UNKNOWN,
282
+ confidence=0.0,
283
+ details=error.details,
284
+ test_output=error.run.output_lines_str,
285
+ run_id=error.run_id,
286
+ author=ErrorClassAuthor.LLM,
287
+ test_name=error.run_name,
288
+ )
289
+ for error in needs_classification_errors
290
+ ]
291
+
292
+
293
+ class DownloadJobLogsInput(Event):
294
+ branch: str = "master"
295
+ workflow_file_stems: set[str] = Field(default_factory=lambda: set(_TEST_STEMS))
296
+ max_days_ago: int = 1
297
+ end_date: datetime = Field(default_factory=utc_now)
298
+ repo_path: Path
299
+
300
+ @property
301
+ def start_date(self) -> datetime:
302
+ return self.end_date - timedelta(days=self.max_days_ago)
303
+
304
+
305
+ def download_logs(event: DownloadJobLogsInput, settings: AtlasInitSettings) -> list[Path]:
306
+ token = run_and_wait("gh auth token", cwd=event.repo_path).stdout
307
+ assert token, "expected token, but got empty string"
308
+ os.environ[GH_TOKEN_ENV_NAME] = token
309
+ end_test_date = event.end_date
310
+ start_test_date = event.start_date
311
+ log_paths = []
312
+ with new_task(
313
+ f"downloading logs for {event.branch} from {start_test_date.date()} to {end_test_date.date()}",
314
+ total=(end_test_date - start_test_date).days,
315
+ ) as task:
316
+ while start_test_date <= end_test_date:
317
+ event_out = download_gh_job_logs(
318
+ settings,
319
+ DownloadJobRunsInput(branch=event.branch, run_date=start_test_date.date()),
320
+ )
321
+ log_paths.extend(event_out.log_paths)
322
+ if errors := event_out.log_errors():
323
+ logger.warning(errors)
324
+ start_test_date += timedelta(days=1)
325
+ task.update(advance=1)
326
+ return log_paths
327
+
328
+
329
+ _TEST_STEMS = {
330
+ "test-suite",
331
+ "terraform-compatibility-matrix",
332
+ "acceptance-tests",
333
+ }
334
+
335
+
336
+ class DownloadJobRunsInput(Event):
337
+ branch: str = "master"
338
+ run_date: date
339
+ workflow_file_stems: set[str] = Field(default_factory=lambda: set(_TEST_STEMS))
340
+ worker_count: int = 10
341
+ max_wait_seconds: int = 300
342
+
343
+
344
+ class DownloadJobRunsOutput(Entity):
345
+ job_download_timeouts: int = 0
346
+ job_download_empty: int = 0
347
+ job_download_errors: int = 0
348
+ log_paths: list[Path] = Field(default_factory=list)
349
+
350
+ def log_errors(self) -> str:
351
+ if not (self.job_download_timeouts or self.job_download_empty or self.job_download_errors):
352
+ return ""
353
+ return f"job_download_timeouts: {self.job_download_timeouts}, job_download_empty: {self.job_download_empty}, job_download_errors: {self.job_download_errors}"
354
+
355
+
356
+ def created_on_day(create: date) -> str:
357
+ date_fmt = year_month_day(create)
358
+ return f"{date_fmt}T00:00:00Z..{date_fmt}T23:59:59Z"
359
+
360
+
361
+ def year_month_day(create: date) -> str:
362
+ return create.strftime("%Y-%m-%d")
363
+
364
+
365
+ def download_gh_job_logs(settings: AtlasInitSettings, event: DownloadJobRunsInput) -> DownloadJobRunsOutput:
366
+ repository = tf_repo()
367
+ branch = event.branch
368
+ futures: list[Future[Path | None]] = []
369
+ run_date = event.run_date
370
+ out = DownloadJobRunsOutput()
371
+ with ThreadPoolExecutor(max_workers=event.worker_count) as pool:
372
+ for workflow in repository.get_workflow_runs(
373
+ created=created_on_day(run_date),
374
+ branch=branch, # type: ignore
375
+ ):
376
+ workflow_stem = Path(workflow.path).stem
377
+ if workflow_stem not in event.workflow_file_stems:
378
+ continue
379
+ workflow_dir = (
380
+ settings.github_ci_run_logs / branch / year_month_day(run_date) / f"{workflow.id}_{workflow_stem}"
381
+ )
382
+ logger.info(f"workflow dir for {workflow_stem} @ {workflow.created_at.isoformat()}: {workflow_dir}")
383
+ if workflow_dir.exists():
384
+ paths = list(workflow_dir.rglob("*.log"))
385
+ logger.info(f"found {len(paths)} logs in existing workflow dir: {workflow_dir}")
386
+ out.log_paths.extend(paths)
387
+ continue
388
+ futures.extend(
389
+ pool.submit(download_job_safely, workflow_dir, job)
390
+ for job in workflow.jobs("all")
391
+ if is_test_job(job.name)
392
+ )
393
+ done, not_done = wait(futures, timeout=event.max_wait_seconds)
394
+ out.job_download_timeouts = len(not_done)
395
+ for future in done:
396
+ try:
397
+ if log_path := future.result():
398
+ out.log_paths.append(log_path)
399
+ else:
400
+ out.job_download_empty += 1
401
+ except Exception as e:
402
+ logger.error(f"failed to download job logs: {e}")
403
+ out.job_download_errors += 1
404
+ return out
405
+
406
+
407
+ class ParseJobLogsInput(Event):
408
+ settings: AtlasInitSettings
409
+ log_paths: list[Path]
410
+ resources: TFResources
411
+ branch: str
412
+
413
+
414
+ class ParseJobLogsOutput(Event):
415
+ test_runs: list[GoTestRun] = Field(default_factory=list)
416
+
417
+ def tests_with_status(self, status: GoTestStatus) -> list[GoTestRun]:
418
+ return [test for test in self.test_runs if test.status == status]
419
+
420
+
421
+ def parse_job_tf_test_logs(
422
+ event: ParseJobLogsInput,
423
+ ) -> ParseJobLogsOutput:
424
+ out = ParseJobLogsOutput()
425
+ for log_path in event.log_paths:
426
+ log_text = log_path.read_text()
427
+ env = find_env_of_mongodb_base_url(log_text)
428
+ try:
429
+ result = parse_tests(log_text.splitlines())
430
+ except ValidationError as e:
431
+ logger.warning(f"failed to parse tests from {log_path}: {e}")
432
+ continue
433
+ for test in result:
434
+ test.log_path = log_path
435
+ test.env = env or "unknown"
436
+ test.resources = event.resources.find_test_resources(test)
437
+ test.branch = event.branch
438
+ out.test_runs.extend(result)
439
+ return out
440
+
441
+
442
+ def find_env_of_mongodb_base_url(log_text: str) -> str:
443
+ for match in re.finditer(r"MONGODB_ATLAS_BASE_URL: (.*)$", log_text, re.MULTILINE):
444
+ full_url = match.group(1)
445
+ parsed = BaseURLEnvironment(url=Url(full_url))
446
+ return parsed.env
447
+ return ""
448
+
449
+
450
+ class BaseURLEnvironment(Entity):
451
+ """
452
+ >>> BaseURLEnvironment(url="https://cloud-dev.mongodb.com/").env
453
+ 'dev'
454
+ """
455
+
456
+ url: Url
457
+ env: str = ""
458
+
459
+ @model_validator(mode="after")
460
+ def set_env(self) -> BaseURLEnvironment:
461
+ host = self.url.host
462
+ assert host, f"host not found in url: {self.url}"
463
+ cloud_env = host.split(".")[0]
464
+ self.env = cloud_env.removeprefix("cloud-")
465
+ return self
466
+
467
+
468
+ class ParseTestErrorInput(Event):
469
+ test: GoTestRun
470
+ api_spec_paths: ApiSpecPaths | None = None
471
+
472
+
473
+ def parse_test_error(event: ParseTestErrorInput) -> GoTestError:
474
+ run = event.test
475
+ assert run.is_failure, f"test is not failed: {run.name}"
476
+ details = parse_error_details(run)
477
+ info = DetailsInfo(run=run, paths=event.api_spec_paths)
478
+ details.add_info_fields(info)
479
+ return GoTestError(details=details, run=run)
480
+
481
+
482
+ def ask_user_to_classify_error(cls: GoTestErrorClassification, test: GoTestRun) -> GoTestErrorClass | None:
483
+ details = cls.details
484
+ try:
485
+ print_to_live(test.output_lines_str)
486
+ print_to_live(f"error details: {details}")
487
+ return select_list(
488
+ f"choose classification for test='{test.name_with_package}' in {test.env}",
489
+ choices=list(GoTestErrorClass),
490
+ default=cls.error_class,
491
+ ) # type: ignore
492
+ except KeyboardInterrupt:
493
+ return None
File without changes
@@ -0,0 +1,97 @@
1
+ from pydantic import BaseModel, Field
2
+ from typing import Dict, List, Optional
3
+ from enum import Enum
4
+
5
+
6
+ class HttpMethod(str, Enum):
7
+ """HTTP methods enum"""
8
+
9
+ GET = "GET"
10
+ POST = "POST"
11
+ PATCH = "PATCH"
12
+ DELETE = "DELETE"
13
+ PUT = "PUT"
14
+
15
+
16
+ class WaitConfig(BaseModel):
17
+ """Configuration for waiting/polling operations"""
18
+
19
+ state_property: str = Field(..., description="Property to check for state")
20
+ pending_states: List[str] = Field(..., description="States that indicate operation is still in progress")
21
+ target_states: List[str] = Field(..., description="States that indicate operation is complete")
22
+ timeout_seconds: int = Field(..., description="Maximum time to wait in seconds")
23
+ min_timeout_seconds: int = Field(..., description="Minimum timeout in seconds")
24
+ delay_seconds: int = Field(..., description="Delay between polling attempts in seconds")
25
+
26
+
27
+ class OperationConfig(BaseModel):
28
+ """Configuration for a single API operation (read, create, update, delete)"""
29
+
30
+ path: str = Field(..., description="API endpoint path with path parameters")
31
+ method: HttpMethod = Field(..., description="HTTP method for the operation")
32
+ wait: Optional[WaitConfig] = Field(None, description="Wait configuration for async operations")
33
+
34
+
35
+ class SchemaOverrides(BaseModel):
36
+ """Schema overrides for specific fields"""
37
+
38
+ sensitive: Optional[bool] = Field(None, description="Mark field as sensitive")
39
+ # Add other override properties as needed
40
+
41
+
42
+ class SchemaConfig(BaseModel):
43
+ """Schema configuration for the resource"""
44
+
45
+ aliases: Optional[Dict[str, str]] = Field(None, description="Field name aliases mapping")
46
+ overrides: Optional[Dict[str, SchemaOverrides]] = Field(None, description="Field-specific overrides")
47
+ ignores: Optional[List[str]] = Field(None, description="Fields to ignore")
48
+ timeouts: Optional[List[str]] = Field(None, description="Operations that support timeouts")
49
+
50
+
51
+ class ResourceConfig(BaseModel):
52
+ """Configuration for a single API resource"""
53
+
54
+ read: Optional[OperationConfig] = Field(None, description="Read operation configuration")
55
+ create: Optional[OperationConfig] = Field(None, description="Create operation configuration")
56
+ update: Optional[OperationConfig] = Field(None, description="Update operation configuration")
57
+ delete: Optional[OperationConfig] = Field(None, description="Delete operation configuration")
58
+ version_header: Optional[str] = Field(None, description="API version header value")
59
+ custom_schema: Optional[SchemaConfig] = Field(None, description="Schema configuration", alias="schema")
60
+
61
+ @property
62
+ def paths(self) -> list[str]:
63
+ return [
64
+ operation.path
65
+ for operation in [self.read, self.create, self.update, self.delete]
66
+ if operation and operation.path
67
+ ]
68
+
69
+
70
+ class ApiResourcesConfig(BaseModel):
71
+ """Root configuration model containing all API resources"""
72
+
73
+ resources: Dict[str, ResourceConfig] = Field(..., description="Dictionary of resource configurations")
74
+
75
+ class Config:
76
+ extra = "allow" # Allow additional fields not explicitly defined
77
+
78
+ def get_resource(self, name: str) -> ResourceConfig:
79
+ """Get a specific resource configuration by name"""
80
+ resource = self.resources.get(name)
81
+ if not resource:
82
+ raise ValueError(f"Resource '{name}' not found in configuration")
83
+ return resource
84
+
85
+ def list_resources(self) -> List[str]:
86
+ """Get list of all resource names"""
87
+ return list(self.resources.keys())
88
+
89
+ def get_resources_with_operation(self, operation: str) -> List[str]:
90
+ """Get list of resources that support a specific operation"""
91
+ result = []
92
+ result.extend(
93
+ name
94
+ for name, config in self.resources.items()
95
+ if hasattr(config, operation) and getattr(config, operation) is not None
96
+ )
97
+ return result
@@ -0,0 +1,74 @@
1
+ from queue import Queue
2
+ from atlas_init.cli_tf.codegen.models import ResourceConfig
3
+ from atlas_init.cli_tf.openapi import OpenapiSchema
4
+ from atlas_init.cli_tf.schema_v2 import SchemaResource
5
+
6
+
7
+ def minimal_api_spec_simplified(resource: ResourceConfig, full_spec: OpenapiSchema) -> OpenapiSchema:
8
+ minimal_spec = OpenapiSchema(
9
+ openapi=full_spec.openapi,
10
+ info={"description": "minimal spec", "version": full_spec.info["version"], "title": full_spec.info["title"]},
11
+ paths={},
12
+ components={"schemas": {}, "parameters": {}},
13
+ )
14
+ include_refs: Queue[str] = Queue()
15
+ seen_refs: set[str] = set()
16
+
17
+ for path in resource.paths:
18
+ path_dict = minimal_spec.paths[path] = full_spec.paths[path]
19
+ remove_non_2xx_responses(path_dict)
20
+ for ref in full_spec.method_refs(path):
21
+ minimal_spec.add_schema_ref(ref, full_spec.resolve_ref(ref))
22
+ include_refs.put(ref)
23
+ for ref in full_spec.parameter_refs(path):
24
+ minimal_spec.add_schema_ref(ref, full_spec.resolve_ref(ref))
25
+ include_refs.put(ref)
26
+
27
+ def add_from_resource_ref(ref_resource: SchemaResource) -> None:
28
+ for attribute in ref_resource.attributes.values():
29
+ if attribute.schema_ref:
30
+ minimal_spec.add_schema_ref(attribute.schema_ref, full_spec.resolve_ref(attribute.schema_ref))
31
+ include_refs.put(attribute.schema_ref)
32
+ if attribute.parameter_ref:
33
+ minimal_spec.add_schema_ref(
34
+ attribute.parameter_ref,
35
+ full_spec.resolve_ref(attribute.parameter_ref),
36
+ )
37
+ if ref := attribute.additional_properties_ref:
38
+ minimal_spec.add_schema_ref(ref, full_spec.resolve_ref(ref))
39
+ include_refs.put(ref)
40
+ for ref in ref_resource.extra_refs():
41
+ minimal_spec.add_schema_ref(ref, full_spec.resolve_ref(ref))
42
+ include_refs.put(ref)
43
+
44
+ while not include_refs.empty():
45
+ ref = include_refs.get()
46
+ if ref in seen_refs:
47
+ continue
48
+ seen_refs.add(ref)
49
+ if ref.startswith(full_spec.SCHEMAS_PREFIX):
50
+ ref_resource = full_spec.schema_ref_component(ref, set())
51
+ add_from_resource_ref(ref_resource)
52
+ else:
53
+ param_name = ref.split("/")[-1]
54
+ minimal_spec.components["parameters"][param_name] = full_spec.resolve_ref(ref)
55
+ sorted_components = sorted(minimal_spec.components["schemas"].items())
56
+ sorted_parameters = sorted(minimal_spec.components["parameters"].items())
57
+ modify_schema_properties(sorted_components)
58
+ minimal_spec.components["schemas"] = dict(sorted_components)
59
+ minimal_spec.components["parameters"] = dict(sorted_parameters)
60
+ return minimal_spec
61
+
62
+
63
+ def modify_schema_properties(schema_properties: list[tuple[str, dict]]):
64
+ for _, schema_values in schema_properties:
65
+ properties = schema_values.get("properties", {})
66
+ for _, prop_values in properties.items():
67
+ prop_values.pop("name", None) # Remove 'name' field if it exists
68
+
69
+
70
+ def remove_non_2xx_responses(path_dict: dict) -> None:
71
+ for method_dict in path_dict.values():
72
+ method_dict["responses"] = {
73
+ code: response for code, response in method_dict.get("responses", {}).items() if code.startswith("2")
74
+ }