atlas-init 0.6.0__py3-none-any.whl → 0.7.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. atlas_init/__init__.py +1 -1
  2. atlas_init/cli_args.py +19 -1
  3. atlas_init/cli_tf/ci_tests.py +116 -24
  4. atlas_init/cli_tf/go_test_run.py +14 -2
  5. atlas_init/cli_tf/go_test_summary.py +334 -82
  6. atlas_init/cli_tf/go_test_tf_error.py +20 -12
  7. atlas_init/cli_tf/hcl/modifier2.py +120 -0
  8. atlas_init/cli_tf/openapi.py +10 -6
  9. atlas_init/html_out/__init__.py +0 -0
  10. atlas_init/html_out/md_export.py +143 -0
  11. atlas_init/sdk_ext/__init__.py +0 -0
  12. atlas_init/sdk_ext/go.py +102 -0
  13. atlas_init/sdk_ext/typer_app.py +18 -0
  14. atlas_init/settings/env_vars.py +13 -1
  15. atlas_init/settings/env_vars_generated.py +2 -0
  16. atlas_init/tf/.terraform.lock.hcl +33 -33
  17. atlas_init/tf/modules/aws_s3/provider.tf +1 -1
  18. atlas_init/tf/modules/aws_vpc/provider.tf +1 -1
  19. atlas_init/tf/modules/cloud_provider/provider.tf +1 -1
  20. atlas_init/tf/modules/cluster/provider.tf +1 -1
  21. atlas_init/tf/modules/encryption_at_rest/provider.tf +1 -1
  22. atlas_init/tf/modules/federated_vars/federated_vars.tf +1 -2
  23. atlas_init/tf/modules/federated_vars/provider.tf +1 -1
  24. atlas_init/tf/modules/project_extra/provider.tf +1 -1
  25. atlas_init/tf/modules/stream_instance/provider.tf +1 -1
  26. atlas_init/tf/modules/vpc_peering/provider.tf +1 -1
  27. atlas_init/tf/modules/vpc_privatelink/versions.tf +1 -1
  28. atlas_init/tf/providers.tf +1 -1
  29. atlas_init/tf_ext/__init__.py +0 -0
  30. atlas_init/tf_ext/__main__.py +3 -0
  31. atlas_init/tf_ext/api_call.py +325 -0
  32. atlas_init/tf_ext/args.py +17 -0
  33. atlas_init/tf_ext/constants.py +3 -0
  34. atlas_init/tf_ext/models.py +106 -0
  35. atlas_init/tf_ext/paths.py +126 -0
  36. atlas_init/tf_ext/settings.py +39 -0
  37. atlas_init/tf_ext/tf_dep.py +324 -0
  38. atlas_init/tf_ext/tf_modules.py +394 -0
  39. atlas_init/tf_ext/tf_vars.py +173 -0
  40. atlas_init/tf_ext/typer_app.py +24 -0
  41. {atlas_init-0.6.0.dist-info → atlas_init-0.7.0.dist-info}/METADATA +3 -2
  42. {atlas_init-0.6.0.dist-info → atlas_init-0.7.0.dist-info}/RECORD +45 -28
  43. atlas_init-0.7.0.dist-info/entry_points.txt +5 -0
  44. atlas_init-0.6.0.dist-info/entry_points.txt +0 -2
  45. {atlas_init-0.6.0.dist-info → atlas_init-0.7.0.dist-info}/WHEEL +0 -0
  46. {atlas_init-0.6.0.dist-info → atlas_init-0.7.0.dist-info}/licenses/LICENSE +0 -0
@@ -6,9 +6,10 @@ from collections import Counter
6
6
  from dataclasses import dataclass, field
7
7
  from datetime import date, datetime, timedelta
8
8
  from enum import StrEnum
9
- from functools import total_ordering
9
+ from functools import reduce, total_ordering
10
10
  from pathlib import Path
11
- from typing import ClassVar
11
+ import re
12
+ from typing import Callable, ClassVar, TypeVar
12
13
 
13
14
  from ask_shell.rich_progress import new_task
14
15
  from model_lib import Entity
@@ -18,8 +19,15 @@ from zero_3rdparty.iter_utils import group_by_once
18
19
 
19
20
  from atlas_init.cli_tf.github_logs import summary_dir
20
21
  from atlas_init.cli_tf.go_test_run import GoTestRun, GoTestStatus
21
- from atlas_init.cli_tf.go_test_tf_error import GoTestError, GoTestErrorClass, GoTestErrorClassification
22
- from atlas_init.crud.mongo_dao import init_mongo_dao
22
+ from atlas_init.cli_tf.go_test_tf_error import (
23
+ GoTestError,
24
+ GoTestErrorClass,
25
+ GoTestErrorClassification,
26
+ details_short_description,
27
+ parse_error_details,
28
+ )
29
+ from atlas_init.crud.mongo_dao import MongoDao, init_mongo_dao
30
+ from atlas_init.html_out.md_export import MonthlyReportPaths
23
31
  from atlas_init.settings.env_vars import AtlasInitSettings
24
32
 
25
33
  logger = logging.getLogger(__name__)
@@ -30,6 +38,7 @@ _COMPLETE_STATUSES = {GoTestStatus.PASS, GoTestStatus.FAIL}
30
38
  class GoTestSummary(Entity):
31
39
  name: str
32
40
  results: list[GoTestRun] = Field(default_factory=list)
41
+ classifications: dict[str, GoTestErrorClassification] = Field(default_factory=dict)
33
42
 
34
43
  @model_validator(mode="after")
35
44
  def sort_results(self):
@@ -44,7 +53,8 @@ class GoTestSummary(Entity):
44
53
  def success_rate(self) -> float:
45
54
  total = self.total_completed
46
55
  if total == 0:
47
- logger.warning(f"No results to calculate success rate for {self.name}")
56
+ if not self.is_skipped:
57
+ logger.warning(f"No results to calculate success rate for {self.name}")
48
58
  return 0
49
59
  return sum(r.status == "PASS" for r in self.results) / total
50
60
 
@@ -89,20 +99,67 @@ def summary_str(summary: GoTestSummary, start_date: datetime, end_date: datetime
89
99
  )
90
100
 
91
101
 
102
+ def test_detail_md(summary: GoTestSummary, start_date: datetime, end_date: datetime) -> str:
103
+ return "\n".join(
104
+ [
105
+ f"# {summary.name} Test Details",
106
+ summary_line(summary.results),
107
+ f"Success rate: {summary.success_rate_human}",
108
+ "",
109
+ *error_table(summary),
110
+ "## Timeline",
111
+ *timeline_lines(summary, start_date, end_date),
112
+ ]
113
+ )
114
+
115
+
92
116
  def timeline_lines(summary: GoTestSummary, start_date: datetime, end_date: datetime) -> list[str]:
93
117
  lines = []
94
118
  one_day = timedelta(days=1)
95
119
  for active_date in datetime_utils.day_range(start_date.date(), (end_date + one_day).date(), one_day):
96
120
  active_tests = summary.select_tests(active_date)
97
121
  if not active_tests:
98
- lines.append(f"{active_date:%Y-%m-%d}: MISSING")
122
+ lines.append(f"- {active_date:%Y-%m-%d}: MISSING")
99
123
  continue
100
-
101
- tests_str = ", ".join(format_test_oneline(t) for t in active_tests)
102
- lines.append(f"{active_date:%Y-%m-%d}: {tests_str}")
124
+ lines.append(f"- {active_date:%Y-%m-%d}")
125
+ if len(active_tests) == 1:
126
+ test = active_tests[0]
127
+ if test.is_failure:
128
+ lines.extend(_extract_error_lines(test, summary))
129
+ else:
130
+ lines[-1] += f" {format_test_oneline(test)}"
131
+ if len(active_tests) > 1:
132
+ for test in active_tests:
133
+ error_lines = _extract_error_lines(test, summary)
134
+ lines.extend(
135
+ [
136
+ f" - {format_test_oneline(test)}",
137
+ *error_lines,
138
+ ]
139
+ )
103
140
  return lines
104
141
 
105
142
 
143
+ def _error_header(test: GoTestRun) -> str:
144
+ return f"Error {test.ts.isoformat('T', timespec='seconds')}"
145
+
146
+
147
+ def _extract_error_lines(test: GoTestRun, summary: GoTestSummary) -> list[str]:
148
+ if not test.is_failure:
149
+ return []
150
+ error_classification = summary.classifications.get(test.id)
151
+ classification_lines = [str(error_classification)] if error_classification else []
152
+ details_lines = [details_short_description(error_classification.details)] if error_classification else []
153
+ return [
154
+ "",
155
+ f"### {_error_header(test)}",
156
+ *classification_lines,
157
+ *details_lines,
158
+ f"```\n{test.output_lines_str}\n```",
159
+ "",
160
+ ]
161
+
162
+
106
163
  def failure_details(summary: GoTestSummary) -> list[str]:
107
164
  lines = ["## Failures"]
108
165
  for test in summary.results:
@@ -117,8 +174,47 @@ def failure_details(summary: GoTestSummary) -> list[str]:
117
174
  return lines
118
175
 
119
176
 
177
+ def error_table(summary: GoTestSummary) -> list[str]:
178
+ error_rows: list[dict] = []
179
+ for test in summary.results:
180
+ if test.is_failure:
181
+ anchor = header_to_markdown_link(_error_header(test))
182
+ row = {
183
+ "Date": f"[{test.ts.strftime('%Y-%m-%d %H:%M')}]({anchor})",
184
+ "Env": test.env,
185
+ "Runtime": f"{test.run_seconds:.2f}s",
186
+ }
187
+ error_rows.append(row)
188
+ if error_cls := summary.classifications.get(test.id):
189
+ row["Error Class"] = error_cls.error_class
190
+ row["Details"] = details_short_description(error_cls.details)
191
+ elif auto_class := GoTestErrorClass.auto_classification(test.output_lines_str):
192
+ row["Error Class"] = auto_class
193
+ if "Details" not in row:
194
+ row["Details"] = details_short_description(parse_error_details(test))
195
+ if not error_rows:
196
+ return []
197
+ headers = sorted(reduce(lambda x, y: x.union(y.keys()), error_rows, set()))
198
+ return markdown_table_lines("Error Table", error_rows, headers, lambda row: [row.get(key, "") for key in headers])
199
+
200
+
120
201
  def format_test_oneline(test: GoTestRun) -> str:
121
- return f"[{test.status} {test.runtime_human}]({test.url})" # type: ignore
202
+ if job_url := test.job_url:
203
+ return f"[{test.status} {test.runtime_human}]({job_url})"
204
+ return f"{test.status} {test.runtime_human}" # type: ignore
205
+
206
+
207
+ def header_to_markdown_link(header: str) -> str:
208
+ """
209
+ Converts a markdown header to a markdown link anchor.
210
+ Example:
211
+ 'Error 2025-05-23T00:28:50+00:00' -> '#error-2025-05-23t0028500000'
212
+ """
213
+ anchor = header.strip().lower()
214
+ # Remove all characters except alphanumerics, spaces, and hyphens
215
+ anchor = re.sub(r"[^a-z0-9 \-]", "", anchor)
216
+ anchor = anchor.replace(" ", "-")
217
+ return f"#{anchor}"
122
218
 
123
219
 
124
220
  def create_detailed_summary(
@@ -188,7 +284,7 @@ def create_test_report(
188
284
  error_details="",
189
285
  )
190
286
  envs = {run.env for run in runs if run.env}
191
- lines = [summary_line(runs, errors)]
287
+ lines = [summary_line(runs)]
192
288
  if errors:
193
289
  env_name_str = f" in {env_name}" if env_name else ""
194
290
  lines.append(f"\n\n## Errors Overview{env_name_str}")
@@ -210,18 +306,25 @@ def create_test_report(
210
306
  )
211
307
 
212
308
 
213
- def summary_line(runs: list[GoTestRun], errors: list[GoTestError]):
309
+ def run_statuses(runs: list[GoTestRun]) -> str:
310
+ if counter := Counter([run.status for run in runs]):
311
+ return " ".join(
312
+ f"{cls}(x {count})" if count > 1 else cls
313
+ for cls, count in sorted(counter.items(), key=lambda item: item[1], reverse=True)
314
+ )
315
+ return ""
316
+
317
+
318
+ def summary_line(runs: list[GoTestRun]):
214
319
  run_delta = GoTestRun.run_delta(runs)
215
320
  envs = {run.env for run in runs if run.env}
216
321
  pkg_test_names = {run.name_with_package for run in runs}
217
- skipped = sum(run.status == GoTestStatus.SKIP for run in runs)
218
- passed = sum(run.status == GoTestStatus.PASS for run in runs)
219
322
  envs_str = ", ".join(sorted(envs))
220
323
  branches = {run.branch for run in runs if run.branch}
221
324
  branches_str = (
222
325
  "from " + ", ".join(sorted(branches)) + " branches" if len(branches) > 1 else f"from {branches.pop()} branch"
223
326
  )
224
- return f"# Found {len(runs)} TestRuns in {envs_str} {run_delta} {branches_str}: {len(pkg_test_names)} unique tests, {len(errors)} Errors, {skipped} Skipped, {passed} Passed"
327
+ return f"# Found {len(runs)} TestRuns in {envs_str} {run_delta} {branches_str}: {len(pkg_test_names)} unique tests, {run_statuses(runs)}"
225
328
 
226
329
 
227
330
  def error_overview_lines(errors: list[GoTestError], single_indent: str) -> list[str]:
@@ -310,45 +413,38 @@ class TFCITestOutput(Entity):
310
413
  )
311
414
 
312
415
 
313
- class DailyReportIn(Entity):
314
- report_date: datetime
416
+ class RunHistoryFilter(Entity):
315
417
  run_history_start: datetime
316
418
  run_history_end: datetime
317
- env_filter: list[str] = field(default_factory=list)
419
+ env_filter: list[str] = Field(default_factory=list)
318
420
  skip_branch_filter: bool = False
319
- skip_columns: set[ErrorRowColumns] = field(default_factory=set)
320
421
 
321
422
 
322
- class DailyReportOut(Entity):
323
- summary_md: str
324
- details_md: str
423
+ class MonthlyReportIn(Entity):
424
+ name: str
425
+ branch: str
426
+ history_filter: RunHistoryFilter
427
+ skip_columns: set[ErrorRowColumns] = Field(default_factory=set)
428
+ skip_rows: list[Callable[[TestRow], bool]] = Field(default_factory=list)
429
+ existing_details_md: dict[str, str] = Field(default_factory=dict)
430
+ report_paths: MonthlyReportPaths
325
431
 
432
+ @classmethod
433
+ def skip_skipped(cls, test: TestRow) -> bool:
434
+ return all(run.is_skipped for runs in test.last_env_runs.values() for run in runs)
326
435
 
327
- def create_daily_report(output: TFCITestOutput, settings: AtlasInitSettings, event: DailyReportIn) -> DailyReportOut:
328
- errors = output.found_errors
329
- error_classes = {cls.run_id: cls.error_class for cls in output.classified_errors}
330
- one_line_summary = summary_line(output.found_tests, errors)
436
+ @classmethod
437
+ def skip_if_no_failures(cls, test: TestRow) -> bool:
438
+ return not any(run.is_failure for runs in test.last_env_runs.values() for run in runs)
331
439
 
332
- with new_task("Daily Report"):
333
- with new_task("Collecting error rows") as task:
334
- failure_rows = asyncio.run(_collect_error_rows(errors, error_classes, settings, event, task))
335
- if not failure_rows:
336
- return DailyReportOut(summary_md=f"🎉All tests passed\n{one_line_summary}", details_md="")
337
- columns = ErrorRowColumns.column_names(failure_rows, event.skip_columns)
338
- summary_md = [
339
- "# Daily Report",
340
- one_line_summary,
341
- "",
342
- "## Errors Table",
343
- " | ".join(columns),
344
- " | ".join("---" for _ in columns),
345
- *(" | ".join(row.as_row(columns)) for row in failure_rows),
346
- ]
347
- return DailyReportOut(summary_md="\n".join(summary_md), details_md="TODO")
440
+
441
+ class MonthlyReportOut(Entity):
442
+ summary_md: str
443
+ test_details_md: dict[str, str] = Field(default_factory=dict)
348
444
 
349
445
 
350
446
  class ErrorRowColumns(StrEnum):
351
- GROUP_NAME = "Group or Package"
447
+ GROUP_NAME = "Group with Package"
352
448
  TEST = "Test"
353
449
  ERROR_CLASS = "Error Class"
354
450
  DETAILS_SUMMARY = "Details Summary"
@@ -358,7 +454,7 @@ class ErrorRowColumns(StrEnum):
358
454
  __ENV_BASED__: ClassVar[list[str]] = [PASS_RATE, TIME_SINCE_PASS]
359
455
 
360
456
  @classmethod
361
- def column_names(cls, rows: list[ErrorRow], skip_columns: set[ErrorRowColumns]) -> list[str]:
457
+ def column_names(cls, rows: list[TestRow], skip_columns: set[ErrorRowColumns]) -> list[str]:
362
458
  if not rows:
363
459
  return []
364
460
  envs = set()
@@ -371,16 +467,17 @@ class ErrorRowColumns(StrEnum):
371
467
 
372
468
 
373
469
  @total_ordering
374
- class ErrorRow(Entity):
470
+ class TestRow(Entity):
375
471
  group_name: str
376
472
  package_url: str
473
+ full_name: str
377
474
  test_name: str
378
- error_class: GoTestErrorClass
475
+ error_classes: list[GoTestErrorClass]
379
476
  details_summary: str
380
477
  last_env_runs: dict[str, list[GoTestRun]] = field(default_factory=dict)
381
478
 
382
479
  def __lt__(self, other) -> bool:
383
- if not isinstance(other, ErrorRow):
480
+ if not isinstance(other, TestRow):
384
481
  raise TypeError
385
482
  return (self.group_name, self.test_name) < (other.group_name, other.test_name)
386
483
 
@@ -403,10 +500,20 @@ class ErrorRow(Entity):
403
500
  time_since[env] = "never run"
404
501
  continue
405
502
  time_since[env] = next(
406
- (run.when for run in sorted(runs, reverse=True) if run.status == GoTestStatus.PASS), "never pass"
503
+ (run.ts.strftime("%Y-%m-%d") for run in sorted(runs, reverse=True) if run.status == GoTestStatus.PASS),
504
+ "never pass",
407
505
  )
408
506
  return time_since
409
507
 
508
+ @property
509
+ def error_classes_str(self) -> str:
510
+ if counter := Counter(self.error_classes):
511
+ return " ".join(
512
+ f"{cls}(x {count})" if count > 1 else cls
513
+ for cls, count in sorted(counter.items(), key=lambda item: item[1], reverse=True)
514
+ )
515
+ return "No error classes"
516
+
410
517
  def as_row(self, columns: list[str]) -> list[str]:
411
518
  values = []
412
519
  pass_rates = self.pass_rates
@@ -414,18 +521,23 @@ class ErrorRow(Entity):
414
521
  for col in columns:
415
522
  match col:
416
523
  case ErrorRowColumns.GROUP_NAME:
417
- values.append(self.group_name or self.package_url or "Unknown Group")
524
+ group_part = self.full_name.removesuffix(self.test_name).rstrip("/")
525
+ values.append(group_part or "Unknown Group")
418
526
  case ErrorRowColumns.TEST:
419
527
  values.append(self.test_name)
420
528
  case ErrorRowColumns.ERROR_CLASS:
421
- values.append(self.error_class)
529
+ values.append(self.error_classes_str)
422
530
  case ErrorRowColumns.DETAILS_SUMMARY:
423
531
  values.append(self.details_summary)
424
532
  case s if s.startswith(ErrorRowColumns.PASS_RATE):
425
533
  env = s.split(" (")[-1].rstrip(")")
426
534
  env_pass_rate = pass_rates.get(env, 0.0)
427
535
  env_run_count = len(self.last_env_runs.get(env, []))
428
- values.append(f"{env_pass_rate:.2%} ({env_run_count} runs)" if env in pass_rates else "N/A")
536
+ pass_rate_pct = f"{env_pass_rate:.2%} ({env_run_count} runs)" if env in pass_rates else "N/A"
537
+ if pass_rate_pct.startswith("100.00%"):
538
+ values.append("always") # use always to avoid sorting errors, 100% showing before 2%
539
+ else:
540
+ values.append(pass_rate_pct)
429
541
  case s if s.startswith(ErrorRowColumns.TIME_SINCE_PASS):
430
542
  env = s.split(" (")[-1].rstrip(")")
431
543
  values.append(time_since_pass.get(env, "never passed"))
@@ -435,43 +547,183 @@ class ErrorRow(Entity):
435
547
  return values
436
548
 
437
549
 
438
- async def _collect_error_rows(
550
+ def create_monthly_report(settings: AtlasInitSettings, event: MonthlyReportIn) -> MonthlyReportOut:
551
+ with new_task(f"Monthly Report for {event.name} on {event.branch}"):
552
+ test_rows, detail_files_md = asyncio.run(_collect_monthly_test_rows_and_summaries(settings, event))
553
+ assert test_rows, "No error rows found for monthly report"
554
+ columns = ErrorRowColumns.column_names(test_rows, event.skip_columns)
555
+ skip_rows = (
556
+ []
557
+ if event.skip_rows == []
558
+ else [
559
+ "",
560
+ "## Skip Test Filters",
561
+ *[f"- {method.__name__}" for method in event.skip_rows],
562
+ "",
563
+ ]
564
+ )
565
+ summary_md = [
566
+ f"# Monthly Report for {event.name} on {event.branch} from {event.history_filter.run_history_start:%Y-%m-%d} to {event.history_filter.run_history_end:%Y-%m-%d} Found {len(test_rows)} unique Tests",
567
+ *skip_rows,
568
+ *markdown_table_lines("Test Run Table", test_rows, columns, lambda row: row.as_row(columns)),
569
+ ]
570
+ return MonthlyReportOut(
571
+ summary_md="\n".join(summary_md),
572
+ test_details_md=detail_files_md,
573
+ )
574
+
575
+
576
+ class DailyReportIn(Entity):
577
+ report_date: datetime
578
+ history_filter: RunHistoryFilter
579
+ skip_columns: set[ErrorRowColumns] = Field(default_factory=set)
580
+ row_modifier: Callable[[TestRow, dict[str, str]], dict[str, str]] | None = Field(
581
+ default=None, description=f"Use the {ErrorRowColumns} to access column-->value mapping"
582
+ )
583
+
584
+
585
+ class DailyReportOut(Entity):
586
+ summary_md: str
587
+ details_md: str
588
+
589
+
590
+ T = TypeVar("T")
591
+
592
+
593
+ def markdown_table_lines(
594
+ header: str, rows: list[T], columns: list[str], row_to_line: Callable[[T], list[str]], *, header_level: int = 2
595
+ ) -> list[str]:
596
+ if not rows:
597
+ return []
598
+ return [
599
+ f"{'#' * header_level} {header}",
600
+ "",
601
+ " | ".join(columns),
602
+ " | ".join("---" for _ in columns),
603
+ *(" | ".join(row_to_line(row)) for row in rows),
604
+ "",
605
+ ]
606
+
607
+
608
+ def create_daily_report(output: TFCITestOutput, settings: AtlasInitSettings, event: DailyReportIn) -> DailyReportOut:
609
+ errors = output.found_errors
610
+ error_classes = {cls.run_id: cls.error_class for cls in output.classified_errors}
611
+ one_line_summary = summary_line(output.found_tests)
612
+
613
+ with new_task("Daily Report"):
614
+ with new_task("Collecting error rows") as task:
615
+ failure_rows = asyncio.run(
616
+ _collect_daily_error_rows(errors, error_classes, settings, event.history_filter, task)
617
+ )
618
+ if not failure_rows:
619
+ return DailyReportOut(summary_md=f"🎉All tests passed\n{one_line_summary}", details_md="")
620
+ columns = ErrorRowColumns.column_names(failure_rows, event.skip_columns)
621
+
622
+ def as_md_row(row: TestRow) -> list[str]:
623
+ if row_modifier := event.row_modifier:
624
+ row_dict = dict(zip(columns, row.as_row(columns)))
625
+ row_dict = row_modifier(row, row_dict)
626
+ return [row_dict[col] for col in columns]
627
+ return row.as_row(columns)
628
+
629
+ summary_md = [
630
+ f"# Daily Report on {event.report_date:%Y-%m-%d}",
631
+ one_line_summary,
632
+ "",
633
+ *markdown_table_lines("Errors Table", failure_rows, columns, as_md_row),
634
+ ]
635
+ return DailyReportOut(summary_md="\n".join(summary_md), details_md="TODO")
636
+
637
+
638
+ async def _collect_daily_error_rows(
439
639
  errors: list[GoTestError],
440
640
  error_classes: dict[str, GoTestErrorClass],
441
641
  settings: AtlasInitSettings,
442
- event: DailyReportIn,
642
+ event: RunHistoryFilter,
443
643
  task: new_task,
444
- ) -> list[ErrorRow]:
445
- error_rows: list[ErrorRow] = []
644
+ ) -> list[TestRow]:
645
+ error_rows: list[TestRow] = []
446
646
  dao = await init_mongo_dao(settings)
447
647
  for error in errors:
448
- package_url = error.run.package_url
449
- group_name = error.run.group_name
450
- package_url = error.run.package_url or ""
648
+ test_run = error.run
451
649
  error_class = error_classes[error.run_id]
452
- branch = error.run.branch
453
- branch_filter = []
454
- if branch and not event.skip_branch_filter:
455
- branch_filter.append(branch)
456
- run_history = await dao.read_run_history(
457
- test_name=error.run_name,
458
- package_url=package_url,
459
- group_name=group_name,
460
- start_date=event.run_history_start,
461
- end_date=event.run_history_end,
462
- envs=event.env_filter,
463
- branches=branch_filter,
464
- )
465
- last_env_runs = group_by_once(run_history, key=lambda run: run.env or "unknown-env")
466
- error_rows.append(
467
- ErrorRow(
468
- group_name=group_name,
469
- package_url=package_url,
470
- test_name=error.run_name,
471
- error_class=error_class,
472
- details_summary=error.short_description,
473
- last_env_runs=last_env_runs,
474
- )
475
- )
650
+ summary = error.short_description
651
+ error_row, _ = await _create_test_row(event, dao, test_run, error_class, summary)
652
+ error_rows.append(error_row)
476
653
  task.update(advance=1)
477
654
  return sorted(error_rows)
655
+
656
+
657
+ async def _collect_monthly_test_rows_and_summaries(
658
+ settings: AtlasInitSettings,
659
+ event: MonthlyReportIn,
660
+ ) -> tuple[list[TestRow], dict[str, str]]:
661
+ dao = await init_mongo_dao(settings)
662
+ branch = event.branch
663
+ history_filter = event.history_filter
664
+ summary_name = event.name
665
+ skip_rows = event.skip_rows
666
+ last_day_test_names = await dao.read_tf_tests_for_day(branch, history_filter.run_history_end)
667
+ test_runs_by_name: dict[str, GoTestRun] = {run.full_name: run for run in last_day_test_names}
668
+ test_rows = []
669
+ detail_files_md: dict[str, str] = {}
670
+ with new_task("Collecting monthly error rows", total=len(last_day_test_names)) as task:
671
+ for name_with_group, test_run in test_runs_by_name.items():
672
+ test_row, runs = await _create_test_row(
673
+ history_filter,
674
+ dao,
675
+ test_run,
676
+ )
677
+ if any(skip(test_row) for skip in skip_rows):
678
+ continue
679
+ test_rows.append(test_row)
680
+ run_ids = [run.id for run in runs]
681
+ classifications = await dao.read_error_classifications(run_ids)
682
+ test_row.error_classes = [cls.error_class for cls in classifications.values()]
683
+ test_row.details_summary = (
684
+ f"[{run_statuses(runs)}]({settings.github_ci_summary_details_rel_path(summary_name, name_with_group)})"
685
+ )
686
+ if name_with_group not in event.existing_details_md:
687
+ summary = GoTestSummary(name=name_with_group, results=runs, classifications=classifications)
688
+ detail_files_md[name_with_group] = test_detail_md(
689
+ summary, history_filter.run_history_start, history_filter.run_history_end
690
+ )
691
+ task.update(advance=1)
692
+ return sorted(test_rows), detail_files_md
693
+
694
+
695
+ async def _create_test_row(
696
+ history_filter: RunHistoryFilter,
697
+ dao: MongoDao,
698
+ test_run: GoTestRun,
699
+ error_class: GoTestErrorClass | None = None,
700
+ summary: str = "",
701
+ ) -> tuple[TestRow, list[GoTestRun]]:
702
+ package_url = test_run.package_url
703
+ group_name = test_run.group_name
704
+ package_url = test_run.package_url or ""
705
+ branch = test_run.branch
706
+ branch_filter = []
707
+ test_name = test_run.name
708
+ if branch and not history_filter.skip_branch_filter:
709
+ branch_filter.append(branch)
710
+ run_history = await dao.read_run_history(
711
+ test_name=test_name,
712
+ package_url=package_url,
713
+ group_name=group_name,
714
+ start_date=history_filter.run_history_start,
715
+ end_date=history_filter.run_history_end,
716
+ envs=history_filter.env_filter,
717
+ branches=branch_filter,
718
+ )
719
+ last_env_runs = group_by_once(run_history, key=lambda run: run.env or "unknown-env")
720
+ error_classes = [error_class] if error_class else []
721
+ return TestRow(
722
+ full_name=test_run.full_name,
723
+ group_name=group_name,
724
+ package_url=package_url,
725
+ test_name=test_name,
726
+ error_classes=error_classes,
727
+ details_summary=summary,
728
+ last_env_runs=last_env_runs,
729
+ ), run_history
@@ -23,6 +23,7 @@ class GoTestErrorClass(StrEnum):
23
23
  FLAKY_400 = "flaky_400"
24
24
  FLAKY_500 = "flaky_500"
25
25
  FLAKY_CHECK = "flaky_check"
26
+ FLAKY_CLIENT = "flaky_client"
26
27
  OUT_OF_CAPACITY = "out_of_capacity"
27
28
  PROJECT_LIMIT_EXCEEDED = "project_limit_exceeded"
28
29
  DANGLING_RESOURCE = "dangling_resource"
@@ -36,6 +37,7 @@ class GoTestErrorClass(StrEnum):
36
37
  FLAKY_400: "retry",
37
38
  FLAKY_500: "retry",
38
39
  FLAKY_CHECK: "retry",
40
+ FLAKY_CLIENT: "retry",
39
41
  PROVIDER_DOWNLOAD: "retry",
40
42
  OUT_OF_CAPACITY: "retry_later",
41
43
  PROJECT_LIMIT_EXCEEDED: "clean_project",
@@ -46,7 +48,8 @@ class GoTestErrorClass(StrEnum):
46
48
  }
47
49
  __CONTAINS_MAPPING__ = {
48
50
  OUT_OF_CAPACITY: ("OUT_OF_CAPACITY",),
49
- FLAKY_500: ("HTTP 500", "UNEXPECTED_ERROR"),
51
+ FLAKY_500: ("HTTP 500", "UNEXPECTED_ERROR", "503 Service Unavailable"),
52
+ FLAKY_CLIENT: ("dial tcp: lookup", "i/o timeout"),
50
53
  PROVIDER_DOWNLOAD: [
51
54
  "mongodbatlas: failed to retrieve authentication checksums for provider",
52
55
  "Error: Failed to install provider github.com: bad response",
@@ -65,7 +68,7 @@ class GoTestErrorClass(StrEnum):
65
68
  (
66
69
  error_class
67
70
  for error_class, contains_list in cls.__CONTAINS_MAPPING__.items()
68
- if all(contains(output, contains_part) for contains_part in contains_list)
71
+ if any(contains(output, contains_part) for contains_part in contains_list)
69
72
  ),
70
73
  None,
71
74
  ) # type: ignore
@@ -311,16 +314,8 @@ class GoTestError(Entity):
311
314
 
312
315
  @property
313
316
  def short_description(self) -> str:
314
- match self.details:
315
- case GoTestGeneralCheckError():
316
- return str(self.details)
317
- case GoTestResourceCheckError():
318
- return f"CheckFailure for {self.details.tf_resource_type}.{self.details.tf_resource_name} at Step: {self.details.step_nr} Checks: {self.details.check_numbers_str}"
319
- case GoTestAPIError(api_path_normalized=api_path_normalized) if api_path_normalized:
320
- return f"API Error {self.details.api_error_code_str} {api_path_normalized}"
321
- case GoTestAPIError(api_path=api_path):
322
- return f"{self.details.api_error_code_str} {api_path}"
323
- return ""
317
+ details = self.details
318
+ return details_short_description(details) if details else ""
324
319
 
325
320
  def header(self, use_ticks: bool = False) -> str:
326
321
  name_with_ticks = f"`{self.run.name_with_package}`" if use_ticks else self.run.name_with_package
@@ -329,6 +324,19 @@ class GoTestError(Entity):
329
324
  return f"{name_with_ticks}"
330
325
 
331
326
 
327
+ def details_short_description(details: ErrorDetailsT) -> str:
328
+ match details:
329
+ case GoTestGeneralCheckError():
330
+ return str(details)
331
+ case GoTestResourceCheckError():
332
+ return f"CheckFailure for {details.tf_resource_type}.{details.tf_resource_name} at Step: {details.step_nr} Checks: {details.check_numbers_str}"
333
+ case GoTestAPIError(api_path_normalized=api_path_normalized) if api_path_normalized:
334
+ return f"API Error {details.api_error_code_str} {api_path_normalized}"
335
+ case GoTestAPIError(api_path=api_path):
336
+ return f"{details.api_error_code_str} {api_path}"
337
+ return ""
338
+
339
+
332
340
  one_of_methods = "|".join(API_METHODS)
333
341
 
334
342