atlas-init 0.4.5__py3-none-any.whl → 0.7.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (83) hide show
  1. atlas_init/__init__.py +1 -1
  2. atlas_init/cli.py +2 -0
  3. atlas_init/cli_args.py +19 -1
  4. atlas_init/cli_cfn/cfn_parameter_finder.py +59 -51
  5. atlas_init/cli_cfn/example.py +8 -16
  6. atlas_init/cli_helper/go.py +6 -10
  7. atlas_init/cli_root/mms_released.py +46 -0
  8. atlas_init/cli_tf/app.py +3 -84
  9. atlas_init/cli_tf/ci_tests.py +585 -0
  10. atlas_init/cli_tf/codegen/__init__.py +0 -0
  11. atlas_init/cli_tf/codegen/models.py +97 -0
  12. atlas_init/cli_tf/codegen/openapi_minimal.py +74 -0
  13. atlas_init/cli_tf/github_logs.py +7 -94
  14. atlas_init/cli_tf/go_test_run.py +395 -130
  15. atlas_init/cli_tf/go_test_summary.py +589 -10
  16. atlas_init/cli_tf/go_test_tf_error.py +388 -0
  17. atlas_init/cli_tf/hcl/modifier.py +14 -12
  18. atlas_init/cli_tf/hcl/modifier2.py +207 -0
  19. atlas_init/cli_tf/mock_tf_log.py +1 -1
  20. atlas_init/cli_tf/{schema_v2_api_parsing.py → openapi.py} +101 -19
  21. atlas_init/cli_tf/schema_v2.py +43 -1
  22. atlas_init/crud/__init__.py +0 -0
  23. atlas_init/crud/mongo_client.py +115 -0
  24. atlas_init/crud/mongo_dao.py +296 -0
  25. atlas_init/crud/mongo_utils.py +239 -0
  26. atlas_init/html_out/__init__.py +0 -0
  27. atlas_init/html_out/md_export.py +143 -0
  28. atlas_init/repos/go_sdk.py +12 -3
  29. atlas_init/repos/path.py +110 -7
  30. atlas_init/sdk_ext/__init__.py +0 -0
  31. atlas_init/sdk_ext/go.py +102 -0
  32. atlas_init/sdk_ext/typer_app.py +18 -0
  33. atlas_init/settings/config.py +3 -6
  34. atlas_init/settings/env_vars.py +18 -2
  35. atlas_init/settings/env_vars_generated.py +2 -0
  36. atlas_init/settings/interactive2.py +134 -0
  37. atlas_init/tf/.terraform.lock.hcl +59 -59
  38. atlas_init/tf/always.tf +5 -5
  39. atlas_init/tf/main.tf +3 -3
  40. atlas_init/tf/modules/aws_kms/aws_kms.tf +1 -1
  41. atlas_init/tf/modules/aws_s3/provider.tf +2 -1
  42. atlas_init/tf/modules/aws_vpc/provider.tf +2 -1
  43. atlas_init/tf/modules/cfn/cfn.tf +0 -8
  44. atlas_init/tf/modules/cfn/kms.tf +5 -5
  45. atlas_init/tf/modules/cfn/provider.tf +7 -0
  46. atlas_init/tf/modules/cfn/variables.tf +1 -1
  47. atlas_init/tf/modules/cloud_provider/cloud_provider.tf +1 -1
  48. atlas_init/tf/modules/cloud_provider/provider.tf +2 -1
  49. atlas_init/tf/modules/cluster/cluster.tf +31 -31
  50. atlas_init/tf/modules/cluster/provider.tf +2 -1
  51. atlas_init/tf/modules/encryption_at_rest/provider.tf +2 -1
  52. atlas_init/tf/modules/federated_vars/federated_vars.tf +2 -3
  53. atlas_init/tf/modules/federated_vars/provider.tf +2 -1
  54. atlas_init/tf/modules/project_extra/project_extra.tf +1 -10
  55. atlas_init/tf/modules/project_extra/provider.tf +8 -0
  56. atlas_init/tf/modules/stream_instance/provider.tf +8 -0
  57. atlas_init/tf/modules/stream_instance/stream_instance.tf +0 -9
  58. atlas_init/tf/modules/vpc_peering/provider.tf +10 -0
  59. atlas_init/tf/modules/vpc_peering/vpc_peering.tf +0 -10
  60. atlas_init/tf/modules/vpc_privatelink/versions.tf +2 -1
  61. atlas_init/tf/outputs.tf +1 -0
  62. atlas_init/tf/providers.tf +1 -1
  63. atlas_init/tf/variables.tf +7 -7
  64. atlas_init/tf_ext/__init__.py +0 -0
  65. atlas_init/tf_ext/__main__.py +3 -0
  66. atlas_init/tf_ext/api_call.py +325 -0
  67. atlas_init/tf_ext/args.py +17 -0
  68. atlas_init/tf_ext/constants.py +3 -0
  69. atlas_init/tf_ext/models.py +106 -0
  70. atlas_init/tf_ext/paths.py +126 -0
  71. atlas_init/tf_ext/settings.py +39 -0
  72. atlas_init/tf_ext/tf_dep.py +324 -0
  73. atlas_init/tf_ext/tf_modules.py +394 -0
  74. atlas_init/tf_ext/tf_vars.py +173 -0
  75. atlas_init/tf_ext/typer_app.py +24 -0
  76. atlas_init/typer_app.py +4 -8
  77. {atlas_init-0.4.5.dist-info → atlas_init-0.7.0.dist-info}/METADATA +8 -4
  78. atlas_init-0.7.0.dist-info/RECORD +138 -0
  79. atlas_init-0.7.0.dist-info/entry_points.txt +5 -0
  80. atlas_init-0.4.5.dist-info/RECORD +0 -105
  81. atlas_init-0.4.5.dist-info/entry_points.txt +0 -2
  82. {atlas_init-0.4.5.dist-info → atlas_init-0.7.0.dist-info}/WHEEL +0 -0
  83. {atlas_init-0.4.5.dist-info → atlas_init-0.7.0.dist-info}/licenses/LICENSE +0 -0
@@ -1,13 +1,34 @@
1
+ from __future__ import annotations
2
+
3
+ import asyncio
1
4
  import logging
5
+ from collections import Counter
6
+ from dataclasses import dataclass, field
2
7
  from datetime import date, datetime, timedelta
3
- from functools import total_ordering
8
+ from enum import StrEnum
9
+ from functools import reduce, total_ordering
10
+ from pathlib import Path
11
+ import re
12
+ from typing import Callable, ClassVar, TypeVar
4
13
 
14
+ from ask_shell.rich_progress import new_task
5
15
  from model_lib import Entity
6
16
  from pydantic import Field, model_validator
7
17
  from zero_3rdparty import datetime_utils, file_utils
18
+ from zero_3rdparty.iter_utils import group_by_once
8
19
 
9
20
  from atlas_init.cli_tf.github_logs import summary_dir
10
21
  from atlas_init.cli_tf.go_test_run import GoTestRun, GoTestStatus
22
+ from atlas_init.cli_tf.go_test_tf_error import (
23
+ GoTestError,
24
+ GoTestErrorClass,
25
+ GoTestErrorClassification,
26
+ details_short_description,
27
+ parse_error_details,
28
+ )
29
+ from atlas_init.crud.mongo_dao import MongoDao, init_mongo_dao
30
+ from atlas_init.html_out.md_export import MonthlyReportPaths
31
+ from atlas_init.settings.env_vars import AtlasInitSettings
11
32
 
12
33
  logger = logging.getLogger(__name__)
13
34
  _COMPLETE_STATUSES = {GoTestStatus.PASS, GoTestStatus.FAIL}
@@ -17,6 +38,7 @@ _COMPLETE_STATUSES = {GoTestStatus.PASS, GoTestStatus.FAIL}
17
38
  class GoTestSummary(Entity):
18
39
  name: str
19
40
  results: list[GoTestRun] = Field(default_factory=list)
41
+ classifications: dict[str, GoTestErrorClassification] = Field(default_factory=dict)
20
42
 
21
43
  @model_validator(mode="after")
22
44
  def sort_results(self):
@@ -31,7 +53,8 @@ class GoTestSummary(Entity):
31
53
  def success_rate(self) -> float:
32
54
  total = self.total_completed
33
55
  if total == 0:
34
- logger.warning(f"No results to calculate success rate for {self.name}")
56
+ if not self.is_skipped:
57
+ logger.warning(f"No results to calculate success rate for {self.name}")
35
58
  return 0
36
59
  return sum(r.status == "PASS" for r in self.results) / total
37
60
 
@@ -76,20 +99,67 @@ def summary_str(summary: GoTestSummary, start_date: datetime, end_date: datetime
76
99
  )
77
100
 
78
101
 
102
+ def test_detail_md(summary: GoTestSummary, start_date: datetime, end_date: datetime) -> str:
103
+ return "\n".join(
104
+ [
105
+ f"# {summary.name} Test Details",
106
+ summary_line(summary.results),
107
+ f"Success rate: {summary.success_rate_human}",
108
+ "",
109
+ *error_table(summary),
110
+ "## Timeline",
111
+ *timeline_lines(summary, start_date, end_date),
112
+ ]
113
+ )
114
+
115
+
79
116
  def timeline_lines(summary: GoTestSummary, start_date: datetime, end_date: datetime) -> list[str]:
80
117
  lines = []
81
118
  one_day = timedelta(days=1)
82
119
  for active_date in datetime_utils.day_range(start_date.date(), (end_date + one_day).date(), one_day):
83
120
  active_tests = summary.select_tests(active_date)
84
121
  if not active_tests:
85
- lines.append(f"{active_date:%Y-%m-%d}: MISSING")
122
+ lines.append(f"- {active_date:%Y-%m-%d}: MISSING")
86
123
  continue
87
-
88
- tests_str = ", ".join(format_test_oneline(t) for t in active_tests)
89
- lines.append(f"{active_date:%Y-%m-%d}: {tests_str}")
124
+ lines.append(f"- {active_date:%Y-%m-%d}")
125
+ if len(active_tests) == 1:
126
+ test = active_tests[0]
127
+ if test.is_failure:
128
+ lines.extend(_extract_error_lines(test, summary))
129
+ else:
130
+ lines[-1] += f" {format_test_oneline(test)}"
131
+ if len(active_tests) > 1:
132
+ for test in active_tests:
133
+ error_lines = _extract_error_lines(test, summary)
134
+ lines.extend(
135
+ [
136
+ f" - {format_test_oneline(test)}",
137
+ *error_lines,
138
+ ]
139
+ )
90
140
  return lines
91
141
 
92
142
 
143
+ def _error_header(test: GoTestRun) -> str:
144
+ return f"Error {test.ts.isoformat('T', timespec='seconds')}"
145
+
146
+
147
+ def _extract_error_lines(test: GoTestRun, summary: GoTestSummary) -> list[str]:
148
+ if not test.is_failure:
149
+ return []
150
+ error_classification = summary.classifications.get(test.id)
151
+ classification_lines = [str(error_classification)] if error_classification else []
152
+ details_lines = [details_short_description(error_classification.details)] if error_classification else []
153
+ return [
154
+ "",
155
+ f"### {_error_header(test)}",
156
+ *classification_lines,
157
+ *details_lines,
158
+ f"```\n{test.output_lines_str}\n```",
159
+ "",
160
+ ]
161
+
162
+
93
163
  def failure_details(summary: GoTestSummary) -> list[str]:
94
164
  lines = ["## Failures"]
95
165
  for test in summary.results:
@@ -97,15 +167,54 @@ def failure_details(summary: GoTestSummary) -> list[str]:
97
167
  lines.extend(
98
168
  (
99
169
  f"### {test.when} {format_test_oneline(test)}",
100
- test.finish_summary(),
170
+ test.finish_summary(), # type: ignore
101
171
  "",
102
172
  )
103
173
  )
104
174
  return lines
105
175
 
106
176
 
177
+ def error_table(summary: GoTestSummary) -> list[str]:
178
+ error_rows: list[dict] = []
179
+ for test in summary.results:
180
+ if test.is_failure:
181
+ anchor = header_to_markdown_link(_error_header(test))
182
+ row = {
183
+ "Date": f"[{test.ts.strftime('%Y-%m-%d %H:%M')}]({anchor})",
184
+ "Env": test.env,
185
+ "Runtime": f"{test.run_seconds:.2f}s",
186
+ }
187
+ error_rows.append(row)
188
+ if error_cls := summary.classifications.get(test.id):
189
+ row["Error Class"] = error_cls.error_class
190
+ row["Details"] = details_short_description(error_cls.details)
191
+ elif auto_class := GoTestErrorClass.auto_classification(test.output_lines_str):
192
+ row["Error Class"] = auto_class
193
+ if "Details" not in row:
194
+ row["Details"] = details_short_description(parse_error_details(test))
195
+ if not error_rows:
196
+ return []
197
+ headers = sorted(reduce(lambda x, y: x.union(y.keys()), error_rows, set()))
198
+ return markdown_table_lines("Error Table", error_rows, headers, lambda row: [row.get(key, "") for key in headers])
199
+
200
+
107
201
  def format_test_oneline(test: GoTestRun) -> str:
108
- return f"[{test.status} {test.runtime_human}]({test.url})"
202
+ if job_url := test.job_url:
203
+ return f"[{test.status} {test.runtime_human}]({job_url})"
204
+ return f"{test.status} {test.runtime_human}" # type: ignore
205
+
206
+
207
+ def header_to_markdown_link(header: str) -> str:
208
+ """
209
+ Converts a markdown header to a markdown link anchor.
210
+ Example:
211
+ 'Error 2025-05-23T00:28:50+00:00' -> '#error-2025-05-23t0028500000'
212
+ """
213
+ anchor = header.strip().lower()
214
+ # Remove all characters except alphanumerics, spaces, and hyphens
215
+ anchor = re.sub(r"[^a-z0-9 \-]", "", anchor)
216
+ anchor = anchor.replace(" ", "-")
217
+ return f"#{anchor}"
109
218
 
110
219
 
111
220
  def create_detailed_summary(
@@ -142,9 +251,479 @@ def create_short_summary(test_results: dict[str, list[GoTestRun]], failing_names
142
251
  fail_tests = test_results[fail_name]
143
252
  summary.append(f"- {fail_name} has {len(fail_tests)} failures:")
144
253
  summary.extend(
145
- f" - [{fail_run.when} failed in {fail_run.runtime_human}]({fail_run.url})" for fail_run in fail_tests
254
+ f" - [{fail_run.when} failed in {fail_run.runtime_human}]({fail_run.url})" # type: ignore
255
+ for fail_run in fail_tests
146
256
  )
147
257
  summary_fail_details.append(f"\n\n ## {fail_name} details:")
148
- summary_fail_details.extend(f"```\n{fail_run.finish_summary()}\n```" for fail_run in fail_tests)
258
+ summary_fail_details.extend(f"```\n{fail_run.finish_summary()}\n```" for fail_run in fail_tests) # type: ignore
149
259
  logger.info("\n".join(summary_fail_details))
150
260
  return summary
261
+
262
+
263
+ @dataclass
264
+ class GoRunTestReport:
265
+ summary: str
266
+ error_details: str
267
+
268
+
269
+ def create_test_report(
270
+ runs: list[GoTestRun],
271
+ errors: list[GoTestError],
272
+ *,
273
+ indent_size=2,
274
+ max_runs=20,
275
+ env_name: str = "",
276
+ ) -> GoRunTestReport:
277
+ if env_name:
278
+ runs = [run for run in runs if run.env == env_name]
279
+ errors = [error for error in errors if error.run.env == env_name]
280
+ single_indent = " " * indent_size
281
+ if not runs:
282
+ return GoRunTestReport(
283
+ summary="No test runs found",
284
+ error_details="",
285
+ )
286
+ envs = {run.env for run in runs if run.env}
287
+ lines = [summary_line(runs)]
288
+ if errors:
289
+ env_name_str = f" in {env_name}" if env_name else ""
290
+ lines.append(f"\n\n## Errors Overview{env_name_str}")
291
+ lines.extend(error_overview_lines(errors, single_indent))
292
+ for env in envs:
293
+ env_runs = [run for run in runs if run.env == env]
294
+ lines.append(f"\n\n## {env.upper()} Had {len(env_runs)} Runs")
295
+ lines.extend(env_summary_lines(env_runs, max_runs, single_indent))
296
+ if len(envs) > 1:
297
+ lines.append(f"\n\n## All Environments Had {len(runs)} Runs")
298
+ lines.extend(env_summary_lines(runs, max_runs, single_indent))
299
+ error_detail_lines = []
300
+ if errors:
301
+ error_detail_lines.append("# Errors Details")
302
+ error_detail_lines.extend(error_details(errors, include_env=len(envs) > 1))
303
+ return GoRunTestReport(
304
+ summary="\n".join(lines),
305
+ error_details="\n".join(error_detail_lines),
306
+ )
307
+
308
+
309
+ def run_statuses(runs: list[GoTestRun]) -> str:
310
+ if counter := Counter([run.status for run in runs]):
311
+ return " ".join(
312
+ f"{cls}(x {count})" if count > 1 else cls
313
+ for cls, count in sorted(counter.items(), key=lambda item: item[1], reverse=True)
314
+ )
315
+ return ""
316
+
317
+
318
+ def summary_line(runs: list[GoTestRun]):
319
+ run_delta = GoTestRun.run_delta(runs)
320
+ envs = {run.env for run in runs if run.env}
321
+ pkg_test_names = {run.name_with_package for run in runs}
322
+ envs_str = ", ".join(sorted(envs))
323
+ branches = {run.branch for run in runs if run.branch}
324
+ branches_str = (
325
+ "from " + ", ".join(sorted(branches)) + " branches" if len(branches) > 1 else f"from {branches.pop()} branch"
326
+ )
327
+ return f"# Found {len(runs)} TestRuns in {envs_str} {run_delta} {branches_str}: {len(pkg_test_names)} unique tests, {run_statuses(runs)}"
328
+
329
+
330
+ def error_overview_lines(errors: list[GoTestError], single_indent: str) -> list[str]:
331
+ lines = []
332
+ grouped_errors = GoTestError.group_by_classification(errors)
333
+ if errors_unclassified := grouped_errors.unclassified:
334
+ lines.append(f"- Found {len(grouped_errors.unclassified)} unclassified errors:")
335
+ lines.extend(count_errors_by_test(single_indent, errors_unclassified))
336
+ if errors_by_class := grouped_errors.classified:
337
+ for classification, errors in errors_by_class.items():
338
+ lines.append(f"- Error Type `{classification}`:")
339
+ lines.extend(count_errors_by_test(single_indent, errors))
340
+ return lines
341
+
342
+
343
+ def count_errors_by_test(indent: str, errors: list[GoTestError]) -> list[str]:
344
+ lines: list[str] = []
345
+ counter = Counter()
346
+ for error in errors:
347
+ counter[error.header(use_ticks=True)] += 1
348
+ for error_header, count in counter.most_common():
349
+ if count > 1:
350
+ lines.append(f"{indent}- {count} x {error_header}")
351
+ else:
352
+ lines.append(f"{indent}- {error_header}")
353
+ return sorted(lines)
354
+
355
+
356
+ def env_summary_lines(env_runs: list[GoTestRun], max_runs: int, single_indent: str) -> list[str]:
357
+ lines: list[str] = []
358
+ if pass_rates := GoTestRun.lowest_pass_rate(env_runs, max_tests=max_runs, include_single_run=False):
359
+ lines.append(f"- Lowest pass rate: {GoTestRun.run_delta(env_runs)}")
360
+ for pass_rate, name, name_tests in pass_rates:
361
+ ran_count_str = f"ran {len(name_tests)} times" if len(name_tests) > 1 else "ran 1 time"
362
+ if last_pass := GoTestRun.last_pass(name_tests):
363
+ lines.append(f"{single_indent}- {pass_rate:.2%} {name} ({ran_count_str}) last PASS {last_pass}")
364
+ else:
365
+ lines.append(f"{single_indent}- {pass_rate:.2%} {name} ({ran_count_str}) never passed")
366
+ if pass_stats := GoTestRun.last_pass_stats(env_runs, max_tests=max_runs):
367
+ lines.append(f"- Longest time since `{GoTestStatus.PASS}`: {GoTestRun.run_delta(env_runs)}")
368
+ lines.extend(
369
+ f"{single_indent}- {pass_stat.pass_when} {pass_stat.name_with_package}" for pass_stat in pass_stats
370
+ )
371
+ lines.append(f"- Slowest tests: {GoTestRun.run_delta(env_runs)}")
372
+ for time_stat in GoTestRun.slowest_tests(env_runs):
373
+ avg_time_str = (
374
+ f"(avg = {time_stat.average_duration} across {len(time_stat.runs)} runs)"
375
+ if time_stat.average_seconds
376
+ else ""
377
+ )
378
+ lines.append(
379
+ f"{single_indent}- {time_stat.slowest_duration} {time_stat.name_with_package} {avg_time_str}".rstrip()
380
+ )
381
+ return lines
382
+
383
+
384
+ def error_details(errors: list[GoTestError], include_env: bool) -> list[str]:
385
+ lines: list[str] = []
386
+ for name, name_errors in GoTestError.group_by_name_with_package(errors).items():
387
+ lines.append(
388
+ f"## {name} had {len(name_errors)} errors {GoTestRun.run_delta([error.run for error in name_errors])}",
389
+ )
390
+ for error in sorted(name_errors, reverse=True): # newest first
391
+ env_str = f" in {error.run.env} " if include_env and error.run.env else ""
392
+ lines.extend(
393
+ [
394
+ f"### Started @ {error.run.ts} {env_str}ran for ({error.run.runtime_human})",
395
+ f"- error classes: bot={error.bot_error_class}, human={error.human_error_class}",
396
+ f"- details summary: {error.short_description}",
397
+ f"- test output:\n```log\n{error.run.output_lines_str}\n```\n",
398
+ ]
399
+ )
400
+ return lines
401
+
402
+
403
+ class TFCITestOutput(Entity):
404
+ """Represent the CI Test Output for a day"""
405
+
406
+ log_paths: list[Path] = Field(
407
+ default_factory=list, description="Paths to the log files of the test runs analyzed by the run history."
408
+ )
409
+ found_tests: list[GoTestRun] = Field(default_factory=list, description="All tests for report day.")
410
+ found_errors: list[GoTestError] = Field(default_factory=list, description="All errors for the report day.")
411
+ classified_errors: list[GoTestErrorClassification] = Field(
412
+ default_factory=list, description="Classified errors for the report day."
413
+ )
414
+
415
+
416
+ class RunHistoryFilter(Entity):
417
+ run_history_start: datetime
418
+ run_history_end: datetime
419
+ env_filter: list[str] = Field(default_factory=list)
420
+ skip_branch_filter: bool = False
421
+
422
+
423
+ class MonthlyReportIn(Entity):
424
+ name: str
425
+ branch: str
426
+ history_filter: RunHistoryFilter
427
+ skip_columns: set[ErrorRowColumns] = Field(default_factory=set)
428
+ skip_rows: list[Callable[[TestRow], bool]] = Field(default_factory=list)
429
+ existing_details_md: dict[str, str] = Field(default_factory=dict)
430
+ report_paths: MonthlyReportPaths
431
+
432
+ @classmethod
433
+ def skip_skipped(cls, test: TestRow) -> bool:
434
+ return all(run.is_skipped for runs in test.last_env_runs.values() for run in runs)
435
+
436
+ @classmethod
437
+ def skip_if_no_failures(cls, test: TestRow) -> bool:
438
+ return not any(run.is_failure for runs in test.last_env_runs.values() for run in runs)
439
+
440
+
441
+ class MonthlyReportOut(Entity):
442
+ summary_md: str
443
+ test_details_md: dict[str, str] = Field(default_factory=dict)
444
+
445
+
446
+ class ErrorRowColumns(StrEnum):
447
+ GROUP_NAME = "Group with Package"
448
+ TEST = "Test"
449
+ ERROR_CLASS = "Error Class"
450
+ DETAILS_SUMMARY = "Details Summary"
451
+ PASS_RATE = "Pass Rate" # nosec B105 # This is not a security issue, just a column name
452
+ TIME_SINCE_PASS = "Time Since PASS" # nosec B105 # This is not a security issue, just a column name
453
+
454
+ __ENV_BASED__: ClassVar[list[str]] = [PASS_RATE, TIME_SINCE_PASS]
455
+
456
+ @classmethod
457
+ def column_names(cls, rows: list[TestRow], skip_columns: set[ErrorRowColumns]) -> list[str]:
458
+ if not rows:
459
+ return []
460
+ envs = set()
461
+ for row in rows:
462
+ envs.update(row.last_env_runs.keys())
463
+ columns: list[str] = [cls.GROUP_NAME, cls.TEST, cls.ERROR_CLASS, cls.DETAILS_SUMMARY]
464
+ for env in sorted(envs):
465
+ columns.extend(f"{env_col} ({env})" for env_col in cls.__ENV_BASED__ if env_col not in skip_columns)
466
+ return [col for col in columns if col not in skip_columns]
467
+
468
+
469
+ @total_ordering
470
+ class TestRow(Entity):
471
+ group_name: str
472
+ package_url: str
473
+ full_name: str
474
+ test_name: str
475
+ error_classes: list[GoTestErrorClass]
476
+ details_summary: str
477
+ last_env_runs: dict[str, list[GoTestRun]] = field(default_factory=dict)
478
+
479
+ def __lt__(self, other) -> bool:
480
+ if not isinstance(other, TestRow):
481
+ raise TypeError
482
+ return (self.group_name, self.test_name) < (other.group_name, other.test_name)
483
+
484
+ @property
485
+ def pass_rates(self) -> dict[str, float]:
486
+ rates = {}
487
+ for env, runs in self.last_env_runs.items():
488
+ if not runs:
489
+ continue
490
+ total = len(runs)
491
+ passed = sum(run.status == GoTestStatus.PASS for run in runs)
492
+ rates[env] = passed / total if total > 0 else 0.0
493
+ return rates
494
+
495
+ @property
496
+ def time_since_pass(self) -> dict[str, str]:
497
+ time_since = {}
498
+ for env, runs in self.last_env_runs.items():
499
+ if not runs:
500
+ time_since[env] = "never run"
501
+ continue
502
+ time_since[env] = next(
503
+ (run.ts.strftime("%Y-%m-%d") for run in sorted(runs, reverse=True) if run.status == GoTestStatus.PASS),
504
+ "never pass",
505
+ )
506
+ return time_since
507
+
508
+ @property
509
+ def error_classes_str(self) -> str:
510
+ if counter := Counter(self.error_classes):
511
+ return " ".join(
512
+ f"{cls}(x {count})" if count > 1 else cls
513
+ for cls, count in sorted(counter.items(), key=lambda item: item[1], reverse=True)
514
+ )
515
+ return "No error classes"
516
+
517
+ def as_row(self, columns: list[str]) -> list[str]:
518
+ values = []
519
+ pass_rates = self.pass_rates
520
+ time_since_pass = self.time_since_pass
521
+ for col in columns:
522
+ match col:
523
+ case ErrorRowColumns.GROUP_NAME:
524
+ group_part = self.full_name.removesuffix(self.test_name).rstrip("/")
525
+ values.append(group_part or "Unknown Group")
526
+ case ErrorRowColumns.TEST:
527
+ values.append(self.test_name)
528
+ case ErrorRowColumns.ERROR_CLASS:
529
+ values.append(self.error_classes_str)
530
+ case ErrorRowColumns.DETAILS_SUMMARY:
531
+ values.append(self.details_summary)
532
+ case s if s.startswith(ErrorRowColumns.PASS_RATE):
533
+ env = s.split(" (")[-1].rstrip(")")
534
+ env_pass_rate = pass_rates.get(env, 0.0)
535
+ env_run_count = len(self.last_env_runs.get(env, []))
536
+ pass_rate_pct = f"{env_pass_rate:.2%} ({env_run_count} runs)" if env in pass_rates else "N/A"
537
+ if pass_rate_pct.startswith("100.00%"):
538
+ values.append("always") # use always to avoid sorting errors, 100% showing before 2%
539
+ else:
540
+ values.append(pass_rate_pct)
541
+ case s if s.startswith(ErrorRowColumns.TIME_SINCE_PASS):
542
+ env = s.split(" (")[-1].rstrip(")")
543
+ values.append(time_since_pass.get(env, "never passed"))
544
+ case _:
545
+ logger.warning(f"Unknown column: {col}, skipping")
546
+ values.append("N/A")
547
+ return values
548
+
549
+
550
+ def create_monthly_report(settings: AtlasInitSettings, event: MonthlyReportIn) -> MonthlyReportOut:
551
+ with new_task(f"Monthly Report for {event.name} on {event.branch}"):
552
+ test_rows, detail_files_md = asyncio.run(_collect_monthly_test_rows_and_summaries(settings, event))
553
+ assert test_rows, "No error rows found for monthly report"
554
+ columns = ErrorRowColumns.column_names(test_rows, event.skip_columns)
555
+ skip_rows = (
556
+ []
557
+ if event.skip_rows == []
558
+ else [
559
+ "",
560
+ "## Skip Test Filters",
561
+ *[f"- {method.__name__}" for method in event.skip_rows],
562
+ "",
563
+ ]
564
+ )
565
+ summary_md = [
566
+ f"# Monthly Report for {event.name} on {event.branch} from {event.history_filter.run_history_start:%Y-%m-%d} to {event.history_filter.run_history_end:%Y-%m-%d} Found {len(test_rows)} unique Tests",
567
+ *skip_rows,
568
+ *markdown_table_lines("Test Run Table", test_rows, columns, lambda row: row.as_row(columns)),
569
+ ]
570
+ return MonthlyReportOut(
571
+ summary_md="\n".join(summary_md),
572
+ test_details_md=detail_files_md,
573
+ )
574
+
575
+
576
+ class DailyReportIn(Entity):
577
+ report_date: datetime
578
+ history_filter: RunHistoryFilter
579
+ skip_columns: set[ErrorRowColumns] = Field(default_factory=set)
580
+ row_modifier: Callable[[TestRow, dict[str, str]], dict[str, str]] | None = Field(
581
+ default=None, description=f"Use the {ErrorRowColumns} to access column-->value mapping"
582
+ )
583
+
584
+
585
+ class DailyReportOut(Entity):
586
+ summary_md: str
587
+ details_md: str
588
+
589
+
590
+ T = TypeVar("T")
591
+
592
+
593
+ def markdown_table_lines(
594
+ header: str, rows: list[T], columns: list[str], row_to_line: Callable[[T], list[str]], *, header_level: int = 2
595
+ ) -> list[str]:
596
+ if not rows:
597
+ return []
598
+ return [
599
+ f"{'#' * header_level} {header}",
600
+ "",
601
+ " | ".join(columns),
602
+ " | ".join("---" for _ in columns),
603
+ *(" | ".join(row_to_line(row)) for row in rows),
604
+ "",
605
+ ]
606
+
607
+
608
+ def create_daily_report(output: TFCITestOutput, settings: AtlasInitSettings, event: DailyReportIn) -> DailyReportOut:
609
+ errors = output.found_errors
610
+ error_classes = {cls.run_id: cls.error_class for cls in output.classified_errors}
611
+ one_line_summary = summary_line(output.found_tests)
612
+
613
+ with new_task("Daily Report"):
614
+ with new_task("Collecting error rows") as task:
615
+ failure_rows = asyncio.run(
616
+ _collect_daily_error_rows(errors, error_classes, settings, event.history_filter, task)
617
+ )
618
+ if not failure_rows:
619
+ return DailyReportOut(summary_md=f"🎉All tests passed\n{one_line_summary}", details_md="")
620
+ columns = ErrorRowColumns.column_names(failure_rows, event.skip_columns)
621
+
622
+ def as_md_row(row: TestRow) -> list[str]:
623
+ if row_modifier := event.row_modifier:
624
+ row_dict = dict(zip(columns, row.as_row(columns)))
625
+ row_dict = row_modifier(row, row_dict)
626
+ return [row_dict[col] for col in columns]
627
+ return row.as_row(columns)
628
+
629
+ summary_md = [
630
+ f"# Daily Report on {event.report_date:%Y-%m-%d}",
631
+ one_line_summary,
632
+ "",
633
+ *markdown_table_lines("Errors Table", failure_rows, columns, as_md_row),
634
+ ]
635
+ return DailyReportOut(summary_md="\n".join(summary_md), details_md="TODO")
636
+
637
+
638
+ async def _collect_daily_error_rows(
639
+ errors: list[GoTestError],
640
+ error_classes: dict[str, GoTestErrorClass],
641
+ settings: AtlasInitSettings,
642
+ event: RunHistoryFilter,
643
+ task: new_task,
644
+ ) -> list[TestRow]:
645
+ error_rows: list[TestRow] = []
646
+ dao = await init_mongo_dao(settings)
647
+ for error in errors:
648
+ test_run = error.run
649
+ error_class = error_classes[error.run_id]
650
+ summary = error.short_description
651
+ error_row, _ = await _create_test_row(event, dao, test_run, error_class, summary)
652
+ error_rows.append(error_row)
653
+ task.update(advance=1)
654
+ return sorted(error_rows)
655
+
656
+
657
+ async def _collect_monthly_test_rows_and_summaries(
658
+ settings: AtlasInitSettings,
659
+ event: MonthlyReportIn,
660
+ ) -> tuple[list[TestRow], dict[str, str]]:
661
+ dao = await init_mongo_dao(settings)
662
+ branch = event.branch
663
+ history_filter = event.history_filter
664
+ summary_name = event.name
665
+ skip_rows = event.skip_rows
666
+ last_day_test_names = await dao.read_tf_tests_for_day(branch, history_filter.run_history_end)
667
+ test_runs_by_name: dict[str, GoTestRun] = {run.full_name: run for run in last_day_test_names}
668
+ test_rows = []
669
+ detail_files_md: dict[str, str] = {}
670
+ with new_task("Collecting monthly error rows", total=len(last_day_test_names)) as task:
671
+ for name_with_group, test_run in test_runs_by_name.items():
672
+ test_row, runs = await _create_test_row(
673
+ history_filter,
674
+ dao,
675
+ test_run,
676
+ )
677
+ if any(skip(test_row) for skip in skip_rows):
678
+ continue
679
+ test_rows.append(test_row)
680
+ run_ids = [run.id for run in runs]
681
+ classifications = await dao.read_error_classifications(run_ids)
682
+ test_row.error_classes = [cls.error_class for cls in classifications.values()]
683
+ test_row.details_summary = (
684
+ f"[{run_statuses(runs)}]({settings.github_ci_summary_details_rel_path(summary_name, name_with_group)})"
685
+ )
686
+ if name_with_group not in event.existing_details_md:
687
+ summary = GoTestSummary(name=name_with_group, results=runs, classifications=classifications)
688
+ detail_files_md[name_with_group] = test_detail_md(
689
+ summary, history_filter.run_history_start, history_filter.run_history_end
690
+ )
691
+ task.update(advance=1)
692
+ return sorted(test_rows), detail_files_md
693
+
694
+
695
+ async def _create_test_row(
696
+ history_filter: RunHistoryFilter,
697
+ dao: MongoDao,
698
+ test_run: GoTestRun,
699
+ error_class: GoTestErrorClass | None = None,
700
+ summary: str = "",
701
+ ) -> tuple[TestRow, list[GoTestRun]]:
702
+ package_url = test_run.package_url
703
+ group_name = test_run.group_name
704
+ package_url = test_run.package_url or ""
705
+ branch = test_run.branch
706
+ branch_filter = []
707
+ test_name = test_run.name
708
+ if branch and not history_filter.skip_branch_filter:
709
+ branch_filter.append(branch)
710
+ run_history = await dao.read_run_history(
711
+ test_name=test_name,
712
+ package_url=package_url,
713
+ group_name=group_name,
714
+ start_date=history_filter.run_history_start,
715
+ end_date=history_filter.run_history_end,
716
+ envs=history_filter.env_filter,
717
+ branches=branch_filter,
718
+ )
719
+ last_env_runs = group_by_once(run_history, key=lambda run: run.env or "unknown-env")
720
+ error_classes = [error_class] if error_class else []
721
+ return TestRow(
722
+ full_name=test_run.full_name,
723
+ group_name=group_name,
724
+ package_url=package_url,
725
+ test_name=test_name,
726
+ error_classes=error_classes,
727
+ details_summary=summary,
728
+ last_env_runs=last_env_runs,
729
+ ), run_history