atlas-init 0.4.4__py3-none-any.whl → 0.6.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- atlas_init/__init__.py +1 -1
- atlas_init/cli.py +2 -0
- atlas_init/cli_cfn/app.py +3 -4
- atlas_init/cli_cfn/cfn_parameter_finder.py +61 -53
- atlas_init/cli_cfn/contract.py +4 -7
- atlas_init/cli_cfn/example.py +8 -18
- atlas_init/cli_helper/go.py +7 -11
- atlas_init/cli_root/mms_released.py +46 -0
- atlas_init/cli_root/trigger.py +6 -6
- atlas_init/cli_tf/app.py +3 -84
- atlas_init/cli_tf/ci_tests.py +493 -0
- atlas_init/cli_tf/codegen/__init__.py +0 -0
- atlas_init/cli_tf/codegen/models.py +97 -0
- atlas_init/cli_tf/codegen/openapi_minimal.py +74 -0
- atlas_init/cli_tf/github_logs.py +7 -94
- atlas_init/cli_tf/go_test_run.py +385 -132
- atlas_init/cli_tf/go_test_summary.py +331 -4
- atlas_init/cli_tf/go_test_tf_error.py +380 -0
- atlas_init/cli_tf/hcl/modifier.py +14 -12
- atlas_init/cli_tf/hcl/modifier2.py +87 -0
- atlas_init/cli_tf/mock_tf_log.py +1 -1
- atlas_init/cli_tf/{schema_v2_api_parsing.py → openapi.py} +95 -17
- atlas_init/cli_tf/schema_v2.py +43 -1
- atlas_init/crud/__init__.py +0 -0
- atlas_init/crud/mongo_client.py +115 -0
- atlas_init/crud/mongo_dao.py +296 -0
- atlas_init/crud/mongo_utils.py +239 -0
- atlas_init/repos/go_sdk.py +12 -3
- atlas_init/repos/path.py +110 -7
- atlas_init/settings/config.py +3 -6
- atlas_init/settings/env_vars.py +22 -31
- atlas_init/settings/interactive2.py +134 -0
- atlas_init/tf/.terraform.lock.hcl +59 -59
- atlas_init/tf/always.tf +5 -5
- atlas_init/tf/main.tf +3 -3
- atlas_init/tf/modules/aws_kms/aws_kms.tf +1 -1
- atlas_init/tf/modules/aws_s3/provider.tf +2 -1
- atlas_init/tf/modules/aws_vpc/provider.tf +2 -1
- atlas_init/tf/modules/cfn/cfn.tf +0 -8
- atlas_init/tf/modules/cfn/kms.tf +5 -5
- atlas_init/tf/modules/cfn/provider.tf +7 -0
- atlas_init/tf/modules/cfn/variables.tf +1 -1
- atlas_init/tf/modules/cloud_provider/cloud_provider.tf +1 -1
- atlas_init/tf/modules/cloud_provider/provider.tf +2 -1
- atlas_init/tf/modules/cluster/cluster.tf +31 -31
- atlas_init/tf/modules/cluster/provider.tf +2 -1
- atlas_init/tf/modules/encryption_at_rest/provider.tf +2 -1
- atlas_init/tf/modules/federated_vars/federated_vars.tf +1 -1
- atlas_init/tf/modules/federated_vars/provider.tf +2 -1
- atlas_init/tf/modules/project_extra/project_extra.tf +1 -10
- atlas_init/tf/modules/project_extra/provider.tf +8 -0
- atlas_init/tf/modules/stream_instance/provider.tf +8 -0
- atlas_init/tf/modules/stream_instance/stream_instance.tf +0 -9
- atlas_init/tf/modules/vpc_peering/provider.tf +10 -0
- atlas_init/tf/modules/vpc_peering/vpc_peering.tf +0 -10
- atlas_init/tf/modules/vpc_privatelink/versions.tf +2 -1
- atlas_init/tf/outputs.tf +1 -0
- atlas_init/tf/providers.tf +1 -1
- atlas_init/tf/variables.tf +7 -7
- atlas_init/typer_app.py +4 -8
- {atlas_init-0.4.4.dist-info → atlas_init-0.6.0.dist-info}/METADATA +7 -4
- atlas_init-0.6.0.dist-info/RECORD +121 -0
- atlas_init-0.4.4.dist-info/RECORD +0 -105
- {atlas_init-0.4.4.dist-info → atlas_init-0.6.0.dist-info}/WHEEL +0 -0
- {atlas_init-0.4.4.dist-info → atlas_init-0.6.0.dist-info}/entry_points.txt +0 -0
- {atlas_init-0.4.4.dist-info → atlas_init-0.6.0.dist-info}/licenses/LICENSE +0 -0
@@ -1,13 +1,26 @@
|
|
1
|
+
from __future__ import annotations
|
2
|
+
|
3
|
+
import asyncio
|
1
4
|
import logging
|
5
|
+
from collections import Counter
|
6
|
+
from dataclasses import dataclass, field
|
2
7
|
from datetime import date, datetime, timedelta
|
8
|
+
from enum import StrEnum
|
3
9
|
from functools import total_ordering
|
10
|
+
from pathlib import Path
|
11
|
+
from typing import ClassVar
|
4
12
|
|
13
|
+
from ask_shell.rich_progress import new_task
|
5
14
|
from model_lib import Entity
|
6
15
|
from pydantic import Field, model_validator
|
7
16
|
from zero_3rdparty import datetime_utils, file_utils
|
17
|
+
from zero_3rdparty.iter_utils import group_by_once
|
8
18
|
|
9
19
|
from atlas_init.cli_tf.github_logs import summary_dir
|
10
20
|
from atlas_init.cli_tf.go_test_run import GoTestRun, GoTestStatus
|
21
|
+
from atlas_init.cli_tf.go_test_tf_error import GoTestError, GoTestErrorClass, GoTestErrorClassification
|
22
|
+
from atlas_init.crud.mongo_dao import init_mongo_dao
|
23
|
+
from atlas_init.settings.env_vars import AtlasInitSettings
|
11
24
|
|
12
25
|
logger = logging.getLogger(__name__)
|
13
26
|
_COMPLETE_STATUSES = {GoTestStatus.PASS, GoTestStatus.FAIL}
|
@@ -97,7 +110,7 @@ def failure_details(summary: GoTestSummary) -> list[str]:
|
|
97
110
|
lines.extend(
|
98
111
|
(
|
99
112
|
f"### {test.when} {format_test_oneline(test)}",
|
100
|
-
test.finish_summary(),
|
113
|
+
test.finish_summary(), # type: ignore
|
101
114
|
"",
|
102
115
|
)
|
103
116
|
)
|
@@ -105,7 +118,7 @@ def failure_details(summary: GoTestSummary) -> list[str]:
|
|
105
118
|
|
106
119
|
|
107
120
|
def format_test_oneline(test: GoTestRun) -> str:
|
108
|
-
return f"[{test.status} {test.runtime_human}]({test.url})"
|
121
|
+
return f"[{test.status} {test.runtime_human}]({test.url})" # type: ignore
|
109
122
|
|
110
123
|
|
111
124
|
def create_detailed_summary(
|
@@ -142,9 +155,323 @@ def create_short_summary(test_results: dict[str, list[GoTestRun]], failing_names
|
|
142
155
|
fail_tests = test_results[fail_name]
|
143
156
|
summary.append(f"- {fail_name} has {len(fail_tests)} failures:")
|
144
157
|
summary.extend(
|
145
|
-
f" - [{fail_run.when} failed in {fail_run.runtime_human}]({fail_run.url})"
|
158
|
+
f" - [{fail_run.when} failed in {fail_run.runtime_human}]({fail_run.url})" # type: ignore
|
159
|
+
for fail_run in fail_tests
|
146
160
|
)
|
147
161
|
summary_fail_details.append(f"\n\n ## {fail_name} details:")
|
148
|
-
summary_fail_details.extend(f"```\n{fail_run.finish_summary()}\n```" for fail_run in fail_tests)
|
162
|
+
summary_fail_details.extend(f"```\n{fail_run.finish_summary()}\n```" for fail_run in fail_tests) # type: ignore
|
149
163
|
logger.info("\n".join(summary_fail_details))
|
150
164
|
return summary
|
165
|
+
|
166
|
+
|
167
|
+
@dataclass
|
168
|
+
class GoRunTestReport:
|
169
|
+
summary: str
|
170
|
+
error_details: str
|
171
|
+
|
172
|
+
|
173
|
+
def create_test_report(
|
174
|
+
runs: list[GoTestRun],
|
175
|
+
errors: list[GoTestError],
|
176
|
+
*,
|
177
|
+
indent_size=2,
|
178
|
+
max_runs=20,
|
179
|
+
env_name: str = "",
|
180
|
+
) -> GoRunTestReport:
|
181
|
+
if env_name:
|
182
|
+
runs = [run for run in runs if run.env == env_name]
|
183
|
+
errors = [error for error in errors if error.run.env == env_name]
|
184
|
+
single_indent = " " * indent_size
|
185
|
+
if not runs:
|
186
|
+
return GoRunTestReport(
|
187
|
+
summary="No test runs found",
|
188
|
+
error_details="",
|
189
|
+
)
|
190
|
+
envs = {run.env for run in runs if run.env}
|
191
|
+
lines = [summary_line(runs, errors)]
|
192
|
+
if errors:
|
193
|
+
env_name_str = f" in {env_name}" if env_name else ""
|
194
|
+
lines.append(f"\n\n## Errors Overview{env_name_str}")
|
195
|
+
lines.extend(error_overview_lines(errors, single_indent))
|
196
|
+
for env in envs:
|
197
|
+
env_runs = [run for run in runs if run.env == env]
|
198
|
+
lines.append(f"\n\n## {env.upper()} Had {len(env_runs)} Runs")
|
199
|
+
lines.extend(env_summary_lines(env_runs, max_runs, single_indent))
|
200
|
+
if len(envs) > 1:
|
201
|
+
lines.append(f"\n\n## All Environments Had {len(runs)} Runs")
|
202
|
+
lines.extend(env_summary_lines(runs, max_runs, single_indent))
|
203
|
+
error_detail_lines = []
|
204
|
+
if errors:
|
205
|
+
error_detail_lines.append("# Errors Details")
|
206
|
+
error_detail_lines.extend(error_details(errors, include_env=len(envs) > 1))
|
207
|
+
return GoRunTestReport(
|
208
|
+
summary="\n".join(lines),
|
209
|
+
error_details="\n".join(error_detail_lines),
|
210
|
+
)
|
211
|
+
|
212
|
+
|
213
|
+
def summary_line(runs: list[GoTestRun], errors: list[GoTestError]):
|
214
|
+
run_delta = GoTestRun.run_delta(runs)
|
215
|
+
envs = {run.env for run in runs if run.env}
|
216
|
+
pkg_test_names = {run.name_with_package for run in runs}
|
217
|
+
skipped = sum(run.status == GoTestStatus.SKIP for run in runs)
|
218
|
+
passed = sum(run.status == GoTestStatus.PASS for run in runs)
|
219
|
+
envs_str = ", ".join(sorted(envs))
|
220
|
+
branches = {run.branch for run in runs if run.branch}
|
221
|
+
branches_str = (
|
222
|
+
"from " + ", ".join(sorted(branches)) + " branches" if len(branches) > 1 else f"from {branches.pop()} branch"
|
223
|
+
)
|
224
|
+
return f"# Found {len(runs)} TestRuns in {envs_str} {run_delta} {branches_str}: {len(pkg_test_names)} unique tests, {len(errors)} Errors, {skipped} Skipped, {passed} Passed"
|
225
|
+
|
226
|
+
|
227
|
+
def error_overview_lines(errors: list[GoTestError], single_indent: str) -> list[str]:
|
228
|
+
lines = []
|
229
|
+
grouped_errors = GoTestError.group_by_classification(errors)
|
230
|
+
if errors_unclassified := grouped_errors.unclassified:
|
231
|
+
lines.append(f"- Found {len(grouped_errors.unclassified)} unclassified errors:")
|
232
|
+
lines.extend(count_errors_by_test(single_indent, errors_unclassified))
|
233
|
+
if errors_by_class := grouped_errors.classified:
|
234
|
+
for classification, errors in errors_by_class.items():
|
235
|
+
lines.append(f"- Error Type `{classification}`:")
|
236
|
+
lines.extend(count_errors_by_test(single_indent, errors))
|
237
|
+
return lines
|
238
|
+
|
239
|
+
|
240
|
+
def count_errors_by_test(indent: str, errors: list[GoTestError]) -> list[str]:
|
241
|
+
lines: list[str] = []
|
242
|
+
counter = Counter()
|
243
|
+
for error in errors:
|
244
|
+
counter[error.header(use_ticks=True)] += 1
|
245
|
+
for error_header, count in counter.most_common():
|
246
|
+
if count > 1:
|
247
|
+
lines.append(f"{indent}- {count} x {error_header}")
|
248
|
+
else:
|
249
|
+
lines.append(f"{indent}- {error_header}")
|
250
|
+
return sorted(lines)
|
251
|
+
|
252
|
+
|
253
|
+
def env_summary_lines(env_runs: list[GoTestRun], max_runs: int, single_indent: str) -> list[str]:
|
254
|
+
lines: list[str] = []
|
255
|
+
if pass_rates := GoTestRun.lowest_pass_rate(env_runs, max_tests=max_runs, include_single_run=False):
|
256
|
+
lines.append(f"- Lowest pass rate: {GoTestRun.run_delta(env_runs)}")
|
257
|
+
for pass_rate, name, name_tests in pass_rates:
|
258
|
+
ran_count_str = f"ran {len(name_tests)} times" if len(name_tests) > 1 else "ran 1 time"
|
259
|
+
if last_pass := GoTestRun.last_pass(name_tests):
|
260
|
+
lines.append(f"{single_indent}- {pass_rate:.2%} {name} ({ran_count_str}) last PASS {last_pass}")
|
261
|
+
else:
|
262
|
+
lines.append(f"{single_indent}- {pass_rate:.2%} {name} ({ran_count_str}) never passed")
|
263
|
+
if pass_stats := GoTestRun.last_pass_stats(env_runs, max_tests=max_runs):
|
264
|
+
lines.append(f"- Longest time since `{GoTestStatus.PASS}`: {GoTestRun.run_delta(env_runs)}")
|
265
|
+
lines.extend(
|
266
|
+
f"{single_indent}- {pass_stat.pass_when} {pass_stat.name_with_package}" for pass_stat in pass_stats
|
267
|
+
)
|
268
|
+
lines.append(f"- Slowest tests: {GoTestRun.run_delta(env_runs)}")
|
269
|
+
for time_stat in GoTestRun.slowest_tests(env_runs):
|
270
|
+
avg_time_str = (
|
271
|
+
f"(avg = {time_stat.average_duration} across {len(time_stat.runs)} runs)"
|
272
|
+
if time_stat.average_seconds
|
273
|
+
else ""
|
274
|
+
)
|
275
|
+
lines.append(
|
276
|
+
f"{single_indent}- {time_stat.slowest_duration} {time_stat.name_with_package} {avg_time_str}".rstrip()
|
277
|
+
)
|
278
|
+
return lines
|
279
|
+
|
280
|
+
|
281
|
+
def error_details(errors: list[GoTestError], include_env: bool) -> list[str]:
|
282
|
+
lines: list[str] = []
|
283
|
+
for name, name_errors in GoTestError.group_by_name_with_package(errors).items():
|
284
|
+
lines.append(
|
285
|
+
f"## {name} had {len(name_errors)} errors {GoTestRun.run_delta([error.run for error in name_errors])}",
|
286
|
+
)
|
287
|
+
for error in sorted(name_errors, reverse=True): # newest first
|
288
|
+
env_str = f" in {error.run.env} " if include_env and error.run.env else ""
|
289
|
+
lines.extend(
|
290
|
+
[
|
291
|
+
f"### Started @ {error.run.ts} {env_str}ran for ({error.run.runtime_human})",
|
292
|
+
f"- error classes: bot={error.bot_error_class}, human={error.human_error_class}",
|
293
|
+
f"- details summary: {error.short_description}",
|
294
|
+
f"- test output:\n```log\n{error.run.output_lines_str}\n```\n",
|
295
|
+
]
|
296
|
+
)
|
297
|
+
return lines
|
298
|
+
|
299
|
+
|
300
|
+
class TFCITestOutput(Entity):
|
301
|
+
"""Represent the CI Test Output for a day"""
|
302
|
+
|
303
|
+
log_paths: list[Path] = Field(
|
304
|
+
default_factory=list, description="Paths to the log files of the test runs analyzed by the run history."
|
305
|
+
)
|
306
|
+
found_tests: list[GoTestRun] = Field(default_factory=list, description="All tests for report day.")
|
307
|
+
found_errors: list[GoTestError] = Field(default_factory=list, description="All errors for the report day.")
|
308
|
+
classified_errors: list[GoTestErrorClassification] = Field(
|
309
|
+
default_factory=list, description="Classified errors for the report day."
|
310
|
+
)
|
311
|
+
|
312
|
+
|
313
|
+
class DailyReportIn(Entity):
|
314
|
+
report_date: datetime
|
315
|
+
run_history_start: datetime
|
316
|
+
run_history_end: datetime
|
317
|
+
env_filter: list[str] = field(default_factory=list)
|
318
|
+
skip_branch_filter: bool = False
|
319
|
+
skip_columns: set[ErrorRowColumns] = field(default_factory=set)
|
320
|
+
|
321
|
+
|
322
|
+
class DailyReportOut(Entity):
|
323
|
+
summary_md: str
|
324
|
+
details_md: str
|
325
|
+
|
326
|
+
|
327
|
+
def create_daily_report(output: TFCITestOutput, settings: AtlasInitSettings, event: DailyReportIn) -> DailyReportOut:
|
328
|
+
errors = output.found_errors
|
329
|
+
error_classes = {cls.run_id: cls.error_class for cls in output.classified_errors}
|
330
|
+
one_line_summary = summary_line(output.found_tests, errors)
|
331
|
+
|
332
|
+
with new_task("Daily Report"):
|
333
|
+
with new_task("Collecting error rows") as task:
|
334
|
+
failure_rows = asyncio.run(_collect_error_rows(errors, error_classes, settings, event, task))
|
335
|
+
if not failure_rows:
|
336
|
+
return DailyReportOut(summary_md=f"🎉All tests passed\n{one_line_summary}", details_md="")
|
337
|
+
columns = ErrorRowColumns.column_names(failure_rows, event.skip_columns)
|
338
|
+
summary_md = [
|
339
|
+
"# Daily Report",
|
340
|
+
one_line_summary,
|
341
|
+
"",
|
342
|
+
"## Errors Table",
|
343
|
+
" | ".join(columns),
|
344
|
+
" | ".join("---" for _ in columns),
|
345
|
+
*(" | ".join(row.as_row(columns)) for row in failure_rows),
|
346
|
+
]
|
347
|
+
return DailyReportOut(summary_md="\n".join(summary_md), details_md="TODO")
|
348
|
+
|
349
|
+
|
350
|
+
class ErrorRowColumns(StrEnum):
|
351
|
+
GROUP_NAME = "Group or Package"
|
352
|
+
TEST = "Test"
|
353
|
+
ERROR_CLASS = "Error Class"
|
354
|
+
DETAILS_SUMMARY = "Details Summary"
|
355
|
+
PASS_RATE = "Pass Rate" # nosec B105 # This is not a security issue, just a column name
|
356
|
+
TIME_SINCE_PASS = "Time Since PASS" # nosec B105 # This is not a security issue, just a column name
|
357
|
+
|
358
|
+
__ENV_BASED__: ClassVar[list[str]] = [PASS_RATE, TIME_SINCE_PASS]
|
359
|
+
|
360
|
+
@classmethod
|
361
|
+
def column_names(cls, rows: list[ErrorRow], skip_columns: set[ErrorRowColumns]) -> list[str]:
|
362
|
+
if not rows:
|
363
|
+
return []
|
364
|
+
envs = set()
|
365
|
+
for row in rows:
|
366
|
+
envs.update(row.last_env_runs.keys())
|
367
|
+
columns: list[str] = [cls.GROUP_NAME, cls.TEST, cls.ERROR_CLASS, cls.DETAILS_SUMMARY]
|
368
|
+
for env in sorted(envs):
|
369
|
+
columns.extend(f"{env_col} ({env})" for env_col in cls.__ENV_BASED__ if env_col not in skip_columns)
|
370
|
+
return [col for col in columns if col not in skip_columns]
|
371
|
+
|
372
|
+
|
373
|
+
@total_ordering
|
374
|
+
class ErrorRow(Entity):
|
375
|
+
group_name: str
|
376
|
+
package_url: str
|
377
|
+
test_name: str
|
378
|
+
error_class: GoTestErrorClass
|
379
|
+
details_summary: str
|
380
|
+
last_env_runs: dict[str, list[GoTestRun]] = field(default_factory=dict)
|
381
|
+
|
382
|
+
def __lt__(self, other) -> bool:
|
383
|
+
if not isinstance(other, ErrorRow):
|
384
|
+
raise TypeError
|
385
|
+
return (self.group_name, self.test_name) < (other.group_name, other.test_name)
|
386
|
+
|
387
|
+
@property
|
388
|
+
def pass_rates(self) -> dict[str, float]:
|
389
|
+
rates = {}
|
390
|
+
for env, runs in self.last_env_runs.items():
|
391
|
+
if not runs:
|
392
|
+
continue
|
393
|
+
total = len(runs)
|
394
|
+
passed = sum(run.status == GoTestStatus.PASS for run in runs)
|
395
|
+
rates[env] = passed / total if total > 0 else 0.0
|
396
|
+
return rates
|
397
|
+
|
398
|
+
@property
|
399
|
+
def time_since_pass(self) -> dict[str, str]:
|
400
|
+
time_since = {}
|
401
|
+
for env, runs in self.last_env_runs.items():
|
402
|
+
if not runs:
|
403
|
+
time_since[env] = "never run"
|
404
|
+
continue
|
405
|
+
time_since[env] = next(
|
406
|
+
(run.when for run in sorted(runs, reverse=True) if run.status == GoTestStatus.PASS), "never pass"
|
407
|
+
)
|
408
|
+
return time_since
|
409
|
+
|
410
|
+
def as_row(self, columns: list[str]) -> list[str]:
|
411
|
+
values = []
|
412
|
+
pass_rates = self.pass_rates
|
413
|
+
time_since_pass = self.time_since_pass
|
414
|
+
for col in columns:
|
415
|
+
match col:
|
416
|
+
case ErrorRowColumns.GROUP_NAME:
|
417
|
+
values.append(self.group_name or self.package_url or "Unknown Group")
|
418
|
+
case ErrorRowColumns.TEST:
|
419
|
+
values.append(self.test_name)
|
420
|
+
case ErrorRowColumns.ERROR_CLASS:
|
421
|
+
values.append(self.error_class)
|
422
|
+
case ErrorRowColumns.DETAILS_SUMMARY:
|
423
|
+
values.append(self.details_summary)
|
424
|
+
case s if s.startswith(ErrorRowColumns.PASS_RATE):
|
425
|
+
env = s.split(" (")[-1].rstrip(")")
|
426
|
+
env_pass_rate = pass_rates.get(env, 0.0)
|
427
|
+
env_run_count = len(self.last_env_runs.get(env, []))
|
428
|
+
values.append(f"{env_pass_rate:.2%} ({env_run_count} runs)" if env in pass_rates else "N/A")
|
429
|
+
case s if s.startswith(ErrorRowColumns.TIME_SINCE_PASS):
|
430
|
+
env = s.split(" (")[-1].rstrip(")")
|
431
|
+
values.append(time_since_pass.get(env, "never passed"))
|
432
|
+
case _:
|
433
|
+
logger.warning(f"Unknown column: {col}, skipping")
|
434
|
+
values.append("N/A")
|
435
|
+
return values
|
436
|
+
|
437
|
+
|
438
|
+
async def _collect_error_rows(
|
439
|
+
errors: list[GoTestError],
|
440
|
+
error_classes: dict[str, GoTestErrorClass],
|
441
|
+
settings: AtlasInitSettings,
|
442
|
+
event: DailyReportIn,
|
443
|
+
task: new_task,
|
444
|
+
) -> list[ErrorRow]:
|
445
|
+
error_rows: list[ErrorRow] = []
|
446
|
+
dao = await init_mongo_dao(settings)
|
447
|
+
for error in errors:
|
448
|
+
package_url = error.run.package_url
|
449
|
+
group_name = error.run.group_name
|
450
|
+
package_url = error.run.package_url or ""
|
451
|
+
error_class = error_classes[error.run_id]
|
452
|
+
branch = error.run.branch
|
453
|
+
branch_filter = []
|
454
|
+
if branch and not event.skip_branch_filter:
|
455
|
+
branch_filter.append(branch)
|
456
|
+
run_history = await dao.read_run_history(
|
457
|
+
test_name=error.run_name,
|
458
|
+
package_url=package_url,
|
459
|
+
group_name=group_name,
|
460
|
+
start_date=event.run_history_start,
|
461
|
+
end_date=event.run_history_end,
|
462
|
+
envs=event.env_filter,
|
463
|
+
branches=branch_filter,
|
464
|
+
)
|
465
|
+
last_env_runs = group_by_once(run_history, key=lambda run: run.env or "unknown-env")
|
466
|
+
error_rows.append(
|
467
|
+
ErrorRow(
|
468
|
+
group_name=group_name,
|
469
|
+
package_url=package_url,
|
470
|
+
test_name=error.run_name,
|
471
|
+
error_class=error_class,
|
472
|
+
details_summary=error.short_description,
|
473
|
+
last_env_runs=last_env_runs,
|
474
|
+
)
|
475
|
+
)
|
476
|
+
task.update(advance=1)
|
477
|
+
return sorted(error_rows)
|