argus-alm 0.12.3__py3-none-any.whl → 0.12.4b1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (31) hide show
  1. argus/backend/controller/admin_api.py +26 -0
  2. argus/backend/controller/api.py +26 -1
  3. argus/backend/controller/main.py +21 -0
  4. argus/backend/controller/testrun_api.py +16 -0
  5. argus/backend/controller/view_api.py +162 -0
  6. argus/backend/models/web.py +16 -0
  7. argus/backend/plugins/core.py +25 -10
  8. argus/backend/plugins/driver_matrix_tests/controller.py +39 -0
  9. argus/backend/plugins/driver_matrix_tests/model.py +251 -3
  10. argus/backend/plugins/driver_matrix_tests/raw_types.py +27 -0
  11. argus/backend/plugins/driver_matrix_tests/service.py +18 -0
  12. argus/backend/plugins/driver_matrix_tests/udt.py +14 -13
  13. argus/backend/plugins/generic/model.py +5 -2
  14. argus/backend/plugins/sct/service.py +13 -1
  15. argus/backend/service/argus_service.py +116 -20
  16. argus/backend/service/build_system_monitor.py +37 -7
  17. argus/backend/service/jenkins_service.py +2 -1
  18. argus/backend/service/release_manager.py +14 -0
  19. argus/backend/service/stats.py +147 -11
  20. argus/backend/service/testrun.py +44 -5
  21. argus/backend/service/views.py +258 -0
  22. argus/backend/template_filters.py +7 -0
  23. argus/backend/util/common.py +14 -2
  24. argus/client/driver_matrix_tests/cli.py +110 -0
  25. argus/client/driver_matrix_tests/client.py +56 -193
  26. argus_alm-0.12.4b1.dist-info/METADATA +129 -0
  27. {argus_alm-0.12.3.dist-info → argus_alm-0.12.4b1.dist-info}/RECORD +30 -27
  28. {argus_alm-0.12.3.dist-info → argus_alm-0.12.4b1.dist-info}/entry_points.txt +1 -0
  29. argus_alm-0.12.3.dist-info/METADATA +0 -207
  30. {argus_alm-0.12.3.dist-info → argus_alm-0.12.4b1.dist-info}/LICENSE +0 -0
  31. {argus_alm-0.12.3.dist-info → argus_alm-0.12.4b1.dist-info}/WHEEL +0 -0
@@ -1,3 +1,5 @@
1
+ from math import ceil
2
+ from dataclasses import dataclass
1
3
  import subprocess
2
4
  import json
3
5
  import logging
@@ -25,9 +27,20 @@ from argus.backend.models.web import (
25
27
  )
26
28
  from argus.backend.events.event_processors import EVENT_PROCESSORS
27
29
  from argus.backend.service.testrun import TestRunService
30
+ from argus.backend.util.common import chunk
28
31
 
29
32
  LOGGER = logging.getLogger(__name__)
30
33
 
34
+ @dataclass(init=True, frozen=True)
35
+ class ScheduleUpdateRequest:
36
+ release_id: UUID
37
+ schedule_id: UUID
38
+ assignee: UUID
39
+ new_tests: list[UUID]
40
+ old_tests: list[UUID]
41
+ comments: dict[UUID, str]
42
+
43
+
31
44
 
32
45
  class ArgusService:
33
46
  # pylint: disable=no-self-use,too-many-arguments,too-many-instance-attributes,too-many-locals, too-many-public-methods
@@ -233,7 +246,7 @@ class ArgusService:
233
246
  row.save()
234
247
 
235
248
  def submit_new_schedule(self, release: str | UUID, start_time: str, end_time: str, tests: list[str | UUID],
236
- groups: list[str | UUID], assignees: list[str | UUID], tag: str) -> dict:
249
+ groups: list[str | UUID], assignees: list[str | UUID], tag: str, comments: dict[str, str] | None, group_ids: dict[str, str] | None) -> dict:
237
250
  release = UUID(release) if isinstance(release, str) else release
238
251
  if len(assignees) == 0:
239
252
  raise Exception("Assignees not specified in the new schedule")
@@ -283,6 +296,19 @@ class ArgusService:
283
296
  assignee_entity.save()
284
297
  response["assignees"].append(assignee_id)
285
298
 
299
+ if comments:
300
+ for test_id, new_comment in comments.items():
301
+ try:
302
+ comment = ReleasePlannerComment.get(release=release, group=group_ids[test_id], test=test_id)
303
+ except ReleasePlannerComment.DoesNotExist:
304
+ comment = ReleasePlannerComment()
305
+ comment.release = release
306
+ comment.group = group_ids[test_id]
307
+ comment.test = test_id
308
+
309
+ comment.comment = new_comment
310
+ comment.save()
311
+
286
312
  return response
287
313
 
288
314
  def get_schedules_for_release(self, release_id: str | UUID) -> dict:
@@ -397,11 +423,52 @@ class ArgusService:
397
423
  "newComment": new_comment,
398
424
  }
399
425
 
426
+ def update_schedule(self, release_id: UUID | str, schedule_id: UUID | str, old_tests: list[UUID | str], new_tests: list[UUID | str], comments: dict[str, str], assignee: UUID | str):
427
+ schedule: ArgusSchedule = ArgusSchedule.get(release_id=release_id, id=schedule_id)
428
+ new_tests: set[UUID] = {UUID(id) for id in new_tests}
429
+ old_tests: set[UUID] = {UUID(id) for id in old_tests}
430
+
431
+ all_test_ids = old_tests.union(new_tests)
432
+ tests = []
433
+ for batch in chunk(all_test_ids):
434
+ tests.extend(ArgusTest.filter(id__in=batch).all())
435
+ tests_by_id: dict[UUID, ArgusTest] = { test.id: test for test in tests }
436
+
437
+ all_scheduled_tests: list[ArgusScheduleTest] = list(ArgusScheduleTest.filter(schedule_id=schedule_id).all())
438
+ tests_to_remove = all_test_ids.difference(new_tests)
439
+ for scheduled_test in all_scheduled_tests:
440
+ if scheduled_test.test_id in tests_to_remove:
441
+ test = tests_by_id.get(scheduled_test.test_id)
442
+ scheduled_test.delete()
443
+ if test:
444
+ self.update_schedule_comment({"newComment": "", "releaseId": test.release_id, "groupId": test.group_id, "testId": test.id})
445
+
446
+ tests_to_add = new_tests.difference(old_tests)
447
+ for test_id in tests_to_add:
448
+ entity = ArgusScheduleTest()
449
+ entity.id = uuid_from_time(schedule.period_start)
450
+ entity.schedule_id = schedule.id
451
+ entity.test_id = UUID(test_id) if isinstance(test_id, str) else test_id
452
+ entity.release_id = release_id
453
+ entity.save()
454
+ self.assign_runs_for_scheduled_test(schedule, entity.test_id, assignee)
455
+
456
+ for test_id, comment in comments.items():
457
+ test = tests_by_id.get(UUID(test_id))
458
+ if test:
459
+ self.update_schedule_comment({"newComment": comment, "releaseId": test.release_id, "groupId": test.group_id, "testId": test.id})
460
+
461
+ schedule_assignee: ArgusScheduleAssignee = ArgusScheduleAssignee.get(schedule_id=schedule_id)
462
+ schedule_assignee.assignee = assignee
463
+ schedule_assignee.save()
464
+ return True
465
+
400
466
  def delete_schedule(self, payload: dict) -> dict:
401
467
  """
402
468
  {
403
469
  "release": hex-uuid,
404
- "schedule_id": uuid1
470
+ "schedule_id": uuid1,
471
+ "deleteComments": bool
405
472
  }
406
473
  """
407
474
  release_id = payload.get("releaseId")
@@ -412,6 +479,8 @@ class ArgusService:
412
479
  if not schedule_id:
413
480
  raise Exception("Schedule id not specified in the request")
414
481
 
482
+ delete_comments = payload.get("deleteComments", False)
483
+
415
484
  release = ArgusRelease.get(id=release_id)
416
485
  schedule = ArgusSchedule.get(release_id=release.id, id=schedule_id)
417
486
  tests = ArgusScheduleTest.filter(schedule_id=schedule.id).all()
@@ -441,6 +510,15 @@ class ArgusService:
441
510
  entity.delete()
442
511
 
443
512
  schedule.delete()
513
+
514
+ if delete_comments:
515
+ tests = []
516
+ for batch in chunk(full_schedule["tests"]):
517
+ tests.extend(ArgusTest.filter(id__in=batch).all())
518
+
519
+ for test in tests:
520
+ self.update_schedule_comment({"newComment": "", "releaseId": test.release_id, "groupId": test.group_id, "testId": test.id})
521
+
444
522
  return {
445
523
  "releaseId": release.id,
446
524
  "scheduleId": schedule_id,
@@ -480,6 +558,15 @@ class ArgusService:
480
558
 
481
559
  return response
482
560
 
561
+ def _batch_get_schedules_from_ids(self, release_id: UUID, schedule_ids: list[UUID]) -> list[ArgusSchedule]:
562
+ schedules = []
563
+ step_size = 90
564
+ for step in range(0, ceil(len(schedule_ids) / step_size)):
565
+ start_pos = step*step_size
566
+ next_slice = schedule_ids[start_pos:start_pos+step_size]
567
+ schedules.extend(ArgusSchedule.filter(release_id=release_id, id__in=next_slice).all())
568
+ return schedules
569
+
483
570
  def get_groups_assignees(self, release_id: UUID | str):
484
571
  release_id = UUID(release_id) if isinstance(release_id, str) else release_id
485
572
  release = ArgusRelease.get(id=release_id)
@@ -487,19 +574,19 @@ class ArgusService:
487
574
  groups = ArgusGroup.filter(release_id=release_id).all()
488
575
  group_ids = [group.id for group in groups if group.enabled]
489
576
 
490
- total_ids = len(group_ids)
491
577
  schedule_ids = set()
492
- step = 0
493
- step_size = 60
494
- while total_ids > 0:
495
- group_slice = group_ids[step:step+step_size]
496
- scheduled_groups = ArgusScheduleGroup.filter(release_id=release.id, group_id__in=group_slice).all()
497
- batch_ids = {schedule.schedule_id for schedule in scheduled_groups}
498
- schedule_ids.union(batch_ids)
499
- total_ids = max(0, total_ids - step_size)
500
- step += step_size
501
-
502
- schedules = ArgusSchedule.filter(release_id=release.id, id__in=tuple(schedule_ids)).all()
578
+ group_schedules =[]
579
+ step_size = 90
580
+
581
+ for step in range(0, ceil(len(group_ids) / step_size)):
582
+ start_pos = step*step_size
583
+ next_slice = group_ids[start_pos:start_pos+step_size]
584
+ group_batch = list(ArgusScheduleGroup.filter(release_id=release.id, group_id__in=next_slice).all())
585
+ group_schedules.extend(group_batch)
586
+ batch_ids = {schedule.schedule_id for schedule in group_batch}
587
+ schedule_ids = schedule_ids.union(batch_ids)
588
+
589
+ schedules = self._batch_get_schedules_from_ids(release.id, list(schedule_ids))
503
590
 
504
591
  valid_schedules = schedules
505
592
  if release.perpetual:
@@ -510,7 +597,7 @@ class ArgusService:
510
597
  for schedule in valid_schedules:
511
598
  assignees = ArgusScheduleAssignee.filter(schedule_id=schedule.id).all()
512
599
  assignees_uuids = [assignee.assignee for assignee in assignees]
513
- schedule_groups = filter(lambda g: g.schedule_id == schedule.id, scheduled_groups)
600
+ schedule_groups = filter(lambda g: g.schedule_id == schedule.id, group_schedules)
514
601
  groups = {str(group.group_id): assignees_uuids for group in schedule_groups}
515
602
  response = {**groups, **response}
516
603
 
@@ -525,10 +612,19 @@ class ArgusService:
525
612
 
526
613
  test_ids = [test.id for test in tests if test.enabled]
527
614
 
528
- scheduled_tests = ArgusScheduleTest.filter(release_id=release.id, test_id__in=tuple(test_ids)).all()
529
- schedule_ids = {test.schedule_id for test in scheduled_tests}
530
- schedules: list[ArgusSchedule] = list(ArgusSchedule.filter(
531
- release_id=release.id, id__in=tuple(schedule_ids)).all())
615
+ schedule_ids = set()
616
+ test_schedules = []
617
+ step_size = 90
618
+
619
+ for step in range(0, ceil(len(test_ids) / step_size)):
620
+ start_pos = step*step_size
621
+ next_slice = test_ids[start_pos:start_pos+step_size]
622
+ test_batch = ArgusScheduleTest.filter(release_id=release.id, test_id__in=next_slice).all()
623
+ test_schedules.extend(test_batch)
624
+ batch_ids = {schedule.schedule_id for schedule in test_batch}
625
+ schedule_ids = schedule_ids.union(batch_ids)
626
+
627
+ schedules = self._batch_get_schedules_from_ids(release.id, list(schedule_ids))
532
628
 
533
629
  if release.perpetual:
534
630
  today = datetime.datetime.utcnow()
@@ -538,7 +634,7 @@ class ArgusService:
538
634
  for schedule in schedules:
539
635
  assignees = ArgusScheduleAssignee.filter(schedule_id=schedule.id).all()
540
636
  assignees_uuids = [assignee.assignee for assignee in assignees]
541
- schedule_tests = filter(lambda t: t.schedule_id == schedule.id, scheduled_tests)
637
+ schedule_tests = filter(lambda t: t.schedule_id == schedule.id, test_schedules)
542
638
  tests = {str(test.test_id): assignees_uuids for test in schedule_tests}
543
639
  response = {**tests, **response}
544
640
 
@@ -2,23 +2,28 @@ import logging
2
2
  from abc import ABC, abstractmethod
3
3
  import jenkins
4
4
  import click
5
+ import re
5
6
  from flask import current_app
6
7
  from flask.cli import with_appcontext
7
8
 
8
9
  from argus.backend.db import ScyllaCluster
9
- from argus.backend.models.web import ArgusRelease, ArgusGroup, ArgusTest
10
+ from argus.backend.models.web import ArgusRelease, ArgusGroup, ArgusTest, ArgusTestException
10
11
  from argus.backend.service.release_manager import ReleaseManagerService
11
12
 
12
13
  LOGGER = logging.getLogger(__name__)
13
14
 
14
15
 
15
16
  class ArgusTestsMonitor(ABC):
17
+ BUILD_SYSTEM_FILTERED_PREFIXES = [
18
+
19
+ ]
20
+
16
21
  def __init__(self) -> None:
17
22
  self._cluster = ScyllaCluster.get()
18
23
  self._existing_releases = list(ArgusRelease.all())
19
24
  self._existing_groups = list(ArgusGroup.all())
20
25
  self._existing_tests = list(ArgusTest.all())
21
- self._filtered_groups: list[str] = current_app.config["BUILD_SYSTEM_FILTERED_PREFIXES"]
26
+ self._filtered_groups: list[str] = self.BUILD_SYSTEM_FILTERED_PREFIXES
22
27
 
23
28
  def create_release(self, release_name):
24
29
  # pylint: disable=no-self-use
@@ -68,17 +73,39 @@ class ArgusTestsMonitor(ABC):
68
73
 
69
74
 
70
75
  class JenkinsMonitor(ArgusTestsMonitor):
76
+
77
+ BUILD_SYSTEM_FILTERED_PREFIXES = [
78
+ "releng",
79
+ ]
80
+
81
+ JENKINS_MONITORED_RELEASES = [
82
+ r"^scylla-master$",
83
+ r"^scylla-staging$",
84
+ r"^scylla-\d+\.\d+$",
85
+ r"^manager-3.\d+$",
86
+ r"^scylla-operator/operator-master$",
87
+ r"^scylla-operator/operator-\d+.\d+$",
88
+ r"^scylla-enterprise$",
89
+ r"^enterprise-20\d{2}\.\d+$",
90
+ r"^siren-tests$",
91
+ ]
92
+
71
93
  def __init__(self) -> None:
72
94
  super().__init__()
73
95
  self._jenkins = jenkins.Jenkins(url=current_app.config["JENKINS_URL"],
74
96
  username=current_app.config["JENKINS_USER"],
75
97
  password=current_app.config["JENKINS_API_TOKEN"])
76
- self._monitored_releases = current_app.config["JENKINS_MONITORED_RELEASES"]
98
+ self._monitored_releases = self.JENKINS_MONITORED_RELEASES
99
+
100
+ def _check_release_name(self, release_name: str):
101
+ return any(re.match(pattern, release_name, re.IGNORECASE) for pattern in self._monitored_releases)
77
102
 
78
103
  def collect(self):
79
104
  click.echo("Collecting new tests from jenkins")
80
105
  all_jobs = self._jenkins.get_all_jobs()
81
- all_monitored_folders = [job for job in all_jobs if job["fullname"] in self._monitored_releases]
106
+ all_monitored_folders = [job for job in all_jobs if self._check_release_name(job["fullname"])]
107
+ LOGGER.info("Will collect %s", [f["fullname"] for f in all_monitored_folders])
108
+
82
109
  for release in all_monitored_folders:
83
110
  LOGGER.info("Processing release %s", release["name"])
84
111
  try:
@@ -143,9 +170,12 @@ class JenkinsMonitor(ArgusTestsMonitor):
143
170
  except StopIteration:
144
171
  LOGGER.warning("Test %s for release %s (group %s) doesn't exist, creating...",
145
172
  job["name"], saved_release.name, saved_group.name)
146
- saved_test = self.create_test(
147
- saved_release, saved_group, job["name"], job["fullname"], job["url"])
148
- self._existing_tests.append(saved_test)
173
+ try:
174
+ saved_test = self.create_test(
175
+ saved_release, saved_group, job["name"], job["fullname"], job["url"])
176
+ self._existing_tests.append(saved_test)
177
+ except ArgusTestException:
178
+ LOGGER.error("Unable to create test for build_id %s", job["fullname"], exc_info=True)
149
179
 
150
180
  def collect_groups_for_release(self, jobs):
151
181
  # pylint: disable=no-self-use
@@ -195,7 +195,8 @@ class JenkinsService:
195
195
  LOGGER.info(old_config)
196
196
  xml = ET.fromstring(old_config)
197
197
  display_name = xml.find("displayName")
198
- display_name.text = new_name
198
+ if display_name:
199
+ display_name.text = new_name
199
200
  new_config = ET.tostring(xml, encoding="unicode")
200
201
  self._jenkins.create_job(name=jenkins_new_build_id, config_xml=new_config)
201
202
  new_job_info = self._jenkins.get_job_info(name=jenkins_new_build_id)
@@ -44,6 +44,20 @@ class ReleaseManagerService:
44
44
  def get_tests(self, group_id: UUID) -> list[ArgusTest]:
45
45
  return list(ArgusTest.filter(group_id=group_id).all())
46
46
 
47
+ def toggle_test_enabled(self, test_id: UUID, new_state: bool) -> bool:
48
+ test: ArgusTest = ArgusTest.get(id=test_id)
49
+ test.enabled = new_state
50
+ test.save()
51
+
52
+ return test
53
+
54
+ def toggle_group_enabled(self, group_id: UUID, new_state: bool) -> bool:
55
+ test: ArgusGroup = ArgusGroup.get(id=group_id)
56
+ test.enabled = new_state
57
+ test.save()
58
+
59
+ return test
60
+
47
61
  def create_release(self, release_name: str, pretty_name: str, perpetual: bool) -> ArgusRelease:
48
62
  try:
49
63
  release = ArgusRelease.get(name=release_name)
@@ -6,11 +6,12 @@ from datetime import datetime
6
6
  from typing import TypedDict
7
7
  from uuid import UUID
8
8
 
9
+ from cassandra.cqlengine.models import Model
9
10
  from argus.backend.plugins.loader import all_plugin_models
10
- from argus.backend.util.common import get_build_number
11
+ from argus.backend.util.common import chunk, get_build_number
11
12
  from argus.backend.util.enums import TestStatus, TestInvestigationStatus
12
13
  from argus.backend.models.web import ArgusGithubIssue, ArgusRelease, ArgusGroup, ArgusTest,\
13
- ArgusScheduleTest, ArgusTestRunComment
14
+ ArgusScheduleTest, ArgusTestRunComment, ArgusUserView
14
15
  from argus.backend.db import ScyllaCluster
15
16
 
16
17
  LOGGER = logging.getLogger(__name__)
@@ -142,6 +143,96 @@ def generate_field_status_map(
142
143
  status_map[run_number] = (run[field_name], run)
143
144
  return status_map
144
145
 
146
+ class ViewStats:
147
+ def __init__(self, release: ArgusUserView) -> None:
148
+ self.release = release
149
+ self.groups: list[GroupStats] = []
150
+ self.status_map = {status: 0 for status in TestStatus}
151
+ self.total_tests = 0
152
+ self.last_status = TestStatus.NOT_PLANNED
153
+ self.last_investigation_status = TestInvestigationStatus.NOT_INVESTIGATED
154
+ self.has_bug_report = False
155
+ self.issues: list[ArgusGithubIssue] = []
156
+ self.comments: list[ArgusTestRunComment] = []
157
+ self.test_schedules: dict[UUID, ArgusScheduleTest] = {}
158
+ self.forced_collection = False
159
+ self.rows = []
160
+ self.releases = {}
161
+ self.all_tests = []
162
+
163
+ def to_dict(self) -> dict:
164
+ converted_groups = {str(group.group.id): group.to_dict() for group in self.groups}
165
+ aggregated_investigation_status = {}
166
+ for group in converted_groups.values():
167
+ for investigation_status in TestInvestigationStatus:
168
+ current_status = aggregated_investigation_status.get(investigation_status.value, {})
169
+ result = {
170
+ status.value: current_status.get(status.value, 0) + group.get(investigation_status.value, {}).get(status, 0)
171
+ for status in TestStatus
172
+ }
173
+ aggregated_investigation_status[investigation_status.value] = result
174
+
175
+ return {
176
+ "release": dict(self.release.items()),
177
+ "releases": self.releases,
178
+ "groups": converted_groups,
179
+ "total": self.total_tests,
180
+ **self.status_map,
181
+ "disabled": False,
182
+ "perpetual": False,
183
+ "lastStatus": self.last_investigation_status,
184
+ "lastInvestigationStatus": self.last_investigation_status,
185
+ "hasBugReport": self.has_bug_report,
186
+ **aggregated_investigation_status
187
+ }
188
+
189
+ def _fetch_multiple_release_queries(self, entity: Model, releases: list[str]):
190
+ result_set = []
191
+ for release_id in releases:
192
+ result_set.extend(entity.filter(release_id=release_id).all())
193
+ return result_set
194
+
195
+ def collect(self, rows: list[TestRunStatRow], limited=False, force=False, dict: dict[str, TestRunStatRow] | None = None, tests: list[ArgusTest] = None) -> None:
196
+ self.forced_collection = force
197
+ all_release_ids = list({t.release_id for t in tests})
198
+ if not limited:
199
+ self.test_schedules = reduce(
200
+ lambda acc, row: acc[row["test_id"]].append(row) or acc,
201
+ self._fetch_multiple_release_queries(ArgusScheduleTest, all_release_ids),
202
+ defaultdict(list)
203
+ )
204
+
205
+ self.rows = rows
206
+ self.dict = dict
207
+ if not limited or force:
208
+ self.issues = reduce(
209
+ lambda acc, row: acc[row["run_id"]].append(row) or acc,
210
+ self._fetch_multiple_release_queries(ArgusGithubIssue, all_release_ids),
211
+ defaultdict(list)
212
+ )
213
+ self.comments = reduce(
214
+ lambda acc, row: acc[row["test_run_id"]].append(row) or acc,
215
+ self._fetch_multiple_release_queries(ArgusTestRunComment, all_release_ids),
216
+ defaultdict(list)
217
+ )
218
+ self.all_tests = tests
219
+ groups = []
220
+ for slice in chunk(list({t.release_id for t in tests})):
221
+ self.releases.update({str(release.id): release for release in ArgusRelease.filter(id__in=slice).all()})
222
+
223
+ for slice in chunk(list({t.group_id for t in tests})):
224
+ groups.extend(ArgusGroup.filter(id__in=slice).all())
225
+ for group in groups:
226
+ if group.enabled:
227
+ stats = GroupStats(group=group, parent_release=self)
228
+ stats.collect(limited=limited)
229
+ self.groups.append(stats)
230
+
231
+ def increment_status(self, status=TestStatus.NOT_PLANNED):
232
+ self.total_tests += 1
233
+ self.status_map[TestStatus(status)] += 1
234
+ self.last_status = TestStatus(status)
235
+
145
236
 
146
237
  class ReleaseStats:
147
238
  def __init__(self, release: ArgusRelease) -> None:
@@ -154,7 +245,7 @@ class ReleaseStats:
154
245
  self.has_bug_report = False
155
246
  self.issues: list[ArgusGithubIssue] = []
156
247
  self.comments: list[ArgusTestRunComment] = []
157
- self.test_schedules: list[ArgusScheduleTest] = []
248
+ self.test_schedules: dict[UUID, ArgusScheduleTest] = {}
158
249
  self.forced_collection = False
159
250
  self.rows = []
160
251
  self.all_tests = []
@@ -190,9 +281,11 @@ class ReleaseStats:
190
281
  return
191
282
 
192
283
  if not self.release.perpetual and not limited:
193
- self.test_schedules = list(ArgusScheduleTest.filter(
194
- release_id=self.release.id
195
- ).all())
284
+ self.test_schedules = reduce(
285
+ lambda acc, row: acc[row["test_id"]].append(row) or acc,
286
+ ArgusScheduleTest.filter(release_id=self.release.id).all(),
287
+ defaultdict(list)
288
+ )
196
289
 
197
290
  self.rows = rows
198
291
  self.dict = dict
@@ -261,8 +354,7 @@ class GroupStats:
261
354
  stats = TestStats(
262
355
  test=test,
263
356
  parent_group=self,
264
- schedules=tuple(
265
- schedule for schedule in self.parent_release.test_schedules if schedule.test_id == test.id)
357
+ schedules=self.parent_release.test_schedules.get(test.id, [])
266
358
  )
267
359
  stats.collect(limited=limited)
268
360
  self.tests.append(stats)
@@ -371,10 +463,10 @@ class ReleaseStatsCollector:
371
463
 
372
464
  def collect(self, limited=False, force=False, include_no_version=False) -> dict:
373
465
  self.release: ArgusRelease = ArgusRelease.get(name=self.release_name)
374
- all_tests = ArgusTest.filter(release_id=self.release.id).all()
375
- build_ids = [t.build_system_id for t in all_tests]
466
+ all_tests: list[ArgusTest] = list(ArgusTest.filter(release_id=self.release.id).all())
467
+ build_ids = reduce(lambda acc, test: acc[test.plugin_name or "unknown"].append(test.build_system_id) or acc, all_tests, defaultdict(list))
376
468
  self.release_rows = [futures for plugin in all_plugin_models()
377
- for futures in plugin.get_stats_for_release(release=self.release, build_ids=build_ids)]
469
+ for futures in plugin.get_stats_for_release(release=self.release, build_ids=build_ids.get(plugin._plugin_name, []))]
378
470
  self.release_rows = [row for future in self.release_rows for row in future.result()]
379
471
  if self.release.dormant and not force:
380
472
  return {
@@ -402,3 +494,47 @@ class ReleaseStatsCollector:
402
494
  self.release_stats = ReleaseStats(release=self.release)
403
495
  self.release_stats.collect(rows=self.release_rows, limited=limited, force=force, dict=self.release_dict, tests=all_tests)
404
496
  return self.release_stats.to_dict()
497
+
498
+
499
+ class ViewStatsCollector:
500
+ def __init__(self, view_id: UUID, filter: str | None = None) -> None:
501
+ self.database = ScyllaCluster.get()
502
+ self.session = self.database.get_session()
503
+ self.view = None
504
+ self.view_stats = None
505
+ self.view_rows = []
506
+ self.runs_by_build_id = {}
507
+ self.view_id = view_id
508
+ self.filter = filter
509
+
510
+ def collect(self, limited=False, force=False, include_no_version=False) -> dict:
511
+ self.view: ArgusUserView = ArgusUserView.get(id=self.view_id)
512
+ all_tests: list[ArgusTest] = []
513
+ for slice in chunk(self.view.tests):
514
+ all_tests.extend(ArgusTest.filter(id__in=slice).all())
515
+ build_ids = reduce(lambda acc, test: acc[test.plugin_name or "unknown"].append(test.build_system_id) or acc, all_tests, defaultdict(list))
516
+ self.view_rows = [futures for plugin in all_plugin_models()
517
+ for futures in plugin.get_stats_for_release(release=self.view, build_ids=build_ids.get(plugin._plugin_name, []))]
518
+ self.view_rows = [row for future in self.view_rows for row in future.result()]
519
+
520
+ if self.filter:
521
+ if include_no_version:
522
+ expr = lambda row: row["scylla_version"] == self.filter or not row["scylla_version"]
523
+ elif self.filter == "!noVersion":
524
+ expr = lambda row: not row["scylla_version"]
525
+ else:
526
+ expr = lambda row: row["scylla_version"] == self.filter
527
+ else:
528
+ if include_no_version:
529
+ expr = lambda row: row
530
+ else:
531
+ expr = lambda row: row["scylla_version"]
532
+ self.view_rows = list(filter(expr, self.view_rows))
533
+ for row in self.view_rows:
534
+ runs = self.runs_by_build_id.get(row["build_id"], [])
535
+ runs.append(row)
536
+ self.runs_by_build_id[row["build_id"]] = runs
537
+
538
+ self.view_stats = ViewStats(release=self.view)
539
+ self.view_stats.collect(rows=self.view_rows, limited=limited, force=force, dict=self.runs_by_build_id, tests=all_tests)
540
+ return self.view_stats.to_dict()
@@ -1,4 +1,6 @@
1
+ from collections import defaultdict
1
2
  from datetime import datetime, timedelta
3
+ from functools import reduce
2
4
  import json
3
5
  import logging
4
6
  import re
@@ -22,6 +24,7 @@ from argus.backend.models.web import (
22
24
  ArgusRelease,
23
25
  ArgusTest,
24
26
  ArgusTestRunComment,
27
+ ArgusUserView,
25
28
  User,
26
29
  UserOauthToken,
27
30
  )
@@ -33,7 +36,7 @@ from argus.backend.events.event_processors import EVENT_PROCESSORS
33
36
  from argus.backend.service.event_service import EventService
34
37
  from argus.backend.service.notification_manager import NotificationManagerService
35
38
  from argus.backend.service.stats import ComparableTestStatus
36
- from argus.backend.util.common import get_build_number, strip_html_tags
39
+ from argus.backend.util.common import chunk, get_build_number, strip_html_tags
37
40
  from argus.backend.util.enums import TestInvestigationStatus, TestStatus
38
41
 
39
42
  LOGGER = logging.getLogger(__name__)
@@ -163,6 +166,12 @@ class TestRunService:
163
166
  def change_run_assignee(self, test_id: UUID, run_id: UUID, new_assignee: UUID | None):
164
167
  test = ArgusTest.get(id=test_id)
165
168
  plugin = self.get_plugin(plugin_name=test.plugin_name)
169
+ if not plugin:
170
+ return {
171
+ "test_run_id": run.id,
172
+ "assignee": None
173
+ }
174
+
166
175
  run: PluginModelBase = plugin.model.get(id=run_id)
167
176
  old_assignee = run.assignee
168
177
  run.assignee = new_assignee
@@ -219,6 +228,7 @@ class TestRunService:
219
228
  plugin = self.get_plugin(test.plugin_name)
220
229
  release: ArgusRelease = ArgusRelease.get(id=test.release_id)
221
230
  comment = ArgusTestRunComment()
231
+ comment.test_id = test.id
222
232
  comment.message = message_stripped
223
233
  comment.reactions = reactions
224
234
  comment.mentions = [m.id for m in mentions]
@@ -374,13 +384,23 @@ class TestRunService:
374
384
 
375
385
  return response
376
386
 
387
+ def _get_github_issues_for_view(self, view_id: UUID | str) -> list[ArgusGithubIssue]:
388
+ view: ArgusUserView = ArgusUserView.get(id=view_id)
389
+ issues = []
390
+ for batch in chunk(view.tests):
391
+ issues.extend(ArgusGithubIssue.filter(test_id__in=batch).allow_filtering().all())
392
+
393
+ return issues
394
+
377
395
  def get_github_issues(self, filter_key: str, filter_id: UUID, aggregate_by_issue: bool = False) -> dict:
378
- if filter_key not in ["release_id", "group_id", "test_id", "run_id", "user_id"]:
396
+ if filter_key not in ["release_id", "group_id", "test_id", "run_id", "user_id", "view_id"]:
379
397
  raise Exception(
380
- "filter_key can only be one of: \"release_id\", \"group_id\", \"test_id\", \"run_id\", \"user_id\""
398
+ "filter_key can only be one of: \"release_id\", \"group_id\", \"test_id\", \"run_id\", \"user_id\", \"view_id\""
381
399
  )
382
-
383
- all_issues = ArgusGithubIssue.filter(**{filter_key: filter_id}).all()
400
+ if filter_key == "view_id":
401
+ all_issues = self._get_github_issues_for_view(filter_id)
402
+ else:
403
+ all_issues = ArgusGithubIssue.filter(**{filter_key: filter_id}).all()
384
404
  if aggregate_by_issue:
385
405
  runs_by_issue = {}
386
406
  response = []
@@ -398,6 +418,25 @@ class TestRunService:
398
418
  response = [dict(issue.items()) for issue in all_issues]
399
419
  return response
400
420
 
421
+ def resolve_run_build_id_and_number_multiple(self, runs: list[tuple[UUID, UUID]]) -> dict[UUID, dict[str, Any]]:
422
+ test_ids = [r[0] for r in runs]
423
+ all_tests: list = []
424
+ for id_slice in chunk(test_ids):
425
+ all_tests.extend(ArgusTest.filter(id__in=id_slice).all())
426
+
427
+ tests: dict[str, ArgusTest] = {str(t.id): t for t in all_tests}
428
+ runs_by_plugin = reduce(lambda acc, val: acc[tests[val[0]].plugin_name].append(val[1]) or acc, runs, defaultdict(list))
429
+ all_runs = {}
430
+ for plugin, run_ids in runs_by_plugin.items():
431
+ model = AVAILABLE_PLUGINS.get(plugin).model
432
+ model_runs = []
433
+ for run_id in run_ids:
434
+ model_runs.append(model.filter(id=run_id).only(["build_id", "start_time", "build_job_url", "id", "test_id"]).get())
435
+ all_runs.update({ str(run["id"]): {**run, "build_number": get_build_number(run["build_job_url"])} for run in model_runs })
436
+
437
+ return all_runs
438
+
439
+
401
440
  def delete_github_issue(self, issue_id: UUID) -> dict:
402
441
  issue: ArgusGithubIssue = ArgusGithubIssue.get(id=issue_id)
403
442