argus-alm 0.11.3__py3-none-any.whl → 0.11.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -7,7 +7,7 @@ from uuid import UUID
7
7
  from cassandra.util import uuid_from_time # pylint: disable=no-name-in-module
8
8
  from flask import current_app
9
9
  from argus.backend.db import ScyllaCluster
10
- from argus.backend.plugins.loader import all_plugin_models
10
+ from argus.backend.plugins.loader import AVAILABLE_PLUGINS, all_plugin_models
11
11
  from argus.backend.plugins.sct.testrun import SCTTestRun
12
12
  from argus.backend.service.notification_manager import NotificationManagerService
13
13
  from argus.backend.models.web import (
@@ -425,12 +425,16 @@ class ArgusService:
425
425
  full_schedule["assignees"] = [assignee.assignee for assignee in assignees]
426
426
 
427
427
  schedule_user = User.get(id=assignees[0].assignee)
428
-
429
- jobs_for_schedule = self.get_jobs_for_user(user=schedule_user, ignore_time=True, schedules=[full_schedule])
430
-
431
428
  service = TestRunService()
432
- for job in jobs_for_schedule:
433
- service.change_run_assignee(test_id=job["test_id"], run_id=job["id"], new_assignee=None)
429
+
430
+ for model in all_plugin_models():
431
+ for run in model.get_jobs_assigned_to_user(schedule_user):
432
+ if run["release_id"] != release.id:
433
+ continue
434
+ if run["test_id"] not in full_schedule["tests"]:
435
+ continue
436
+ if schedule.period_start < run["start_time"] < schedule.period_end:
437
+ service.change_run_assignee(test_id=run["test_id"], run_id=run["id"], new_assignee=None)
434
438
 
435
439
  for entities in [tests, groups, assignees]:
436
440
  for entity in entities:
@@ -531,33 +535,13 @@ class ArgusService:
531
535
 
532
536
  return response
533
537
 
534
- def get_jobs_for_user(self, user: User, ignore_time: bool = False, schedules: list[dict] = None):
535
- runs = [run for plugin in all_plugin_models() for run in plugin.get_jobs_assigned_to_user(user=user)]
536
- schedules = self.get_schedules_for_user(user) if not schedules else schedules
537
- valid_runs = []
538
+ def get_jobs_for_user(self, user: User):
538
539
  today = datetime.datetime.now()
539
- month_ago = today - datetime.timedelta(days=30)
540
- for run in runs:
541
- run_date = run["start_time"]
542
- if user.id == run["assignee"] and run_date >= month_ago and not ignore_time:
543
- valid_runs.append(run)
544
- continue
545
- for schedule in schedules:
546
- if not run["release_id"] == schedule["release_id"]:
547
- continue
548
- if not schedule["period_start"] < run_date < schedule["period_end"]:
549
- continue
550
- if run["assignee"] in schedule["assignees"]:
551
- valid_runs.append(run)
552
- break
553
- if run["group_id"] in schedule["groups"]:
554
- valid_runs.append(run)
555
- break
556
- filtered_tests = [test for test in schedule["tests"] if test == run["test_id"]]
557
- if len(filtered_tests) > 0:
558
- valid_runs.append(run)
559
- break
560
- return valid_runs
540
+ validity_period = today - datetime.timedelta(days=current_app.config.get("JOB_VALIDITY_PERIOD_DAYS", 30))
541
+ for plugin in all_plugin_models():
542
+ for run in plugin.get_jobs_assigned_to_user(user=user):
543
+ if run["start_time"] >= validity_period:
544
+ yield run
561
545
 
562
546
  def get_schedules_for_user(self, user: User) -> list[dict]:
563
547
  all_assigned_schedules = ArgusScheduleAssignee.filter(assignee=user.id).all()
@@ -78,7 +78,7 @@ class JenkinsMonitor(ArgusTestsMonitor):
78
78
  def collect(self):
79
79
  click.echo("Collecting new tests from jenkins")
80
80
  all_jobs = self._jenkins.get_all_jobs()
81
- all_monitored_folders = [job for job in all_jobs if job["name"] in self._monitored_releases]
81
+ all_monitored_folders = [job for job in all_jobs if job["fullname"] in self._monitored_releases]
82
82
  for release in all_monitored_folders:
83
83
  LOGGER.info("Processing release %s", release["name"])
84
84
  try:
@@ -89,8 +89,23 @@ class JenkinsMonitor(ArgusTestsMonitor):
89
89
  saved_release = self.create_release(release["name"])
90
90
  self._existing_releases.append(saved_release)
91
91
 
92
- groups = self.collect_groups_for_release(release["jobs"])
92
+ try:
93
+ groups = self.collect_groups_for_release(release["jobs"])
94
+ except KeyError:
95
+ LOGGER.error("Empty release!\n %s", release)
96
+ continue
93
97
  folder_stack = [dict(parent_name="", parent_display_name="", group=g) for g in reversed(groups)]
98
+ root_folder = {
99
+ "parent_name": "",
100
+ "parent_display_name": "",
101
+ "group": {
102
+ "name": f"{release['fullname']}-root",
103
+ "displayName": "-- root directory --",
104
+ "fullname": release["fullname"],
105
+ "jobs": self.collect_root_folder_jobs(release["jobs"]),
106
+ }
107
+ }
108
+ folder_stack.append(root_folder)
94
109
  while len(folder_stack) != 0:
95
110
  group_dict = folder_stack.pop()
96
111
  group = group_dict["group"]
@@ -104,7 +119,7 @@ class JenkinsMonitor(ArgusTestsMonitor):
104
119
  LOGGER.warning(
105
120
  "Group %s for release %s doesn't exist, creating...", group_name, saved_release.name)
106
121
  try:
107
- display_name = self._jenkins.get_job_info(name=group["fullname"])["displayName"]
122
+ display_name = group.get("displayName", self._jenkins.get_job_info(name=group["fullname"])["displayName"])
108
123
  display_name = display_name if not group_dict[
109
124
  "parent_display_name"] else f"{group_dict['parent_display_name']} - {display_name}"
110
125
  except Exception:
@@ -138,3 +153,6 @@ class JenkinsMonitor(ArgusTestsMonitor):
138
153
  groups = [group for group in groups if self.check_filter(group["name"])]
139
154
 
140
155
  return groups
156
+
157
+ def collect_root_folder_jobs(self, jobs):
158
+ return [job for job in jobs if "WorkflowJob" in job["_class"]]
@@ -0,0 +1,18 @@
1
+ from datetime import datetime
2
+ import json
3
+ from argus.backend.models.web import ArgusEvent, ArgusEventTypes
4
+
5
+
6
+ class EventService:
7
+ @staticmethod
8
+ def create_run_event(kind: ArgusEventTypes, body: dict, user_id=None, run_id=None, release_id=None, group_id=None, test_id=None):
9
+ event = ArgusEvent()
10
+ event.release_id = release_id
11
+ event.group_id = group_id
12
+ event.test_id = test_id
13
+ event.user_id = user_id
14
+ event.run_id = run_id
15
+ event.body = json.dumps(body, ensure_ascii=True, separators=(',', ':'))
16
+ event.kind = kind.value
17
+ event.created_at = datetime.utcnow()
18
+ event.save()
@@ -76,6 +76,70 @@ class ComparableTestStatus:
76
76
  return self._get_prio() <= __o._get_prio()
77
77
 
78
78
 
79
+ class ComparableTestInvestigationStatus:
80
+ PRIORITY_MAP = {
81
+ TestInvestigationStatus.NOT_INVESTIGATED: 10,
82
+ TestInvestigationStatus.IN_PROGRESS: 9,
83
+ TestInvestigationStatus.INVESTIGATED: 8,
84
+ TestInvestigationStatus.IGNORED: 7,
85
+ }
86
+
87
+ def __init__(self, status: TestInvestigationStatus):
88
+ self._status = status
89
+
90
+ def _get_prio(self):
91
+ return self.PRIORITY_MAP.get(self._status, 0)
92
+
93
+ def __eq__(self, __o: object) -> bool:
94
+ if not isinstance(__o, ComparableTestInvestigationStatus):
95
+ return False
96
+ return self._get_prio() == __o._get_prio()
97
+
98
+ def __ne__(self, __o: object) -> bool:
99
+ if not isinstance(__o, ComparableTestInvestigationStatus):
100
+ return False
101
+ return not self.__eq__(__o)
102
+
103
+ def __lt__(self, __o: object) -> bool:
104
+ if not isinstance(__o, ComparableTestInvestigationStatus):
105
+ return False
106
+ return self._get_prio() < __o._get_prio()
107
+
108
+ def __gt__(self, __o: object) -> bool:
109
+ if not isinstance(__o, ComparableTestInvestigationStatus):
110
+ return False
111
+ return self._get_prio() > __o._get_prio()
112
+
113
+ def __ge__(self, __o: object) -> bool:
114
+ if not isinstance(__o, ComparableTestInvestigationStatus):
115
+ return False
116
+ return self._get_prio() >= __o._get_prio()
117
+
118
+ def __le__(self, __o: object) -> bool:
119
+ if not isinstance(__o, ComparableTestInvestigationStatus):
120
+ return False
121
+ return self._get_prio() <= __o._get_prio()
122
+
123
+
124
+ def generate_field_status_map(
125
+ last_runs: list[TestRunStatRow],
126
+ field_name = "status",
127
+ container_class = TestStatus,
128
+ cmp_class = ComparableTestStatus
129
+ ) -> dict[int, str]:
130
+
131
+ status_map = {}
132
+ for run in last_runs:
133
+ run_number = get_build_number(run["build_job_url"])
134
+ match status := status_map.get(run_number):
135
+ case str():
136
+ if cmp_class(container_class(status)) < cmp_class(container_class(run[field_name])):
137
+ status_map[run_number] = run[field_name]
138
+ case _:
139
+ status_map[run_number] = run[field_name]
140
+ return status_map
141
+
142
+
79
143
  class ReleaseStats:
80
144
  def __init__(self, release: ArgusRelease) -> None:
81
145
  self.release = release
@@ -227,18 +291,6 @@ class TestStats:
227
291
  "hasComments": self.has_comments
228
292
  }
229
293
 
230
- def _generate_status_map(self, last_runs: list[TestRunStatRow]) -> dict[int, str]:
231
- status_map = {}
232
- for run in last_runs:
233
- run_number = get_build_number(run["build_job_url"])
234
- match status := status_map.get(run_number):
235
- case str():
236
- if ComparableTestStatus(TestStatus(status)) < ComparableTestStatus(TestStatus(run["status"])):
237
- status_map[run_number] = run["status"]
238
- case _:
239
- status_map[run_number] = run["status"]
240
- return status_map
241
-
242
294
  def collect(self, limited=False):
243
295
 
244
296
  # TODO: Parametrize run limit
@@ -252,10 +304,12 @@ class TestStats:
252
304
  self.status = TestStatus.NOT_RUN if self.is_scheduled else TestStatus.NOT_PLANNED
253
305
  self.parent_group.increment_status(status=self.status)
254
306
  return
255
- status_map = self._generate_status_map(last_runs)
307
+ status_map = generate_field_status_map(last_runs)
308
+ investigation_status_map = generate_field_status_map(
309
+ last_runs, "investigation_status", TestInvestigationStatus, ComparableTestInvestigationStatus)
256
310
 
257
311
  self.status = status_map.get(get_build_number(last_run["build_job_url"]))
258
- self.investigation_status = TestInvestigationStatus(last_run["investigation_status"])
312
+ self.investigation_status = investigation_status_map.get(get_build_number(last_run["build_job_url"]))
259
313
  self.start_time = last_run["start_time"]
260
314
 
261
315
  self.parent_group.increment_status(status=self.status)
@@ -300,7 +354,7 @@ class ReleaseStatsCollector:
300
354
 
301
355
  if self.release_version:
302
356
  self.release_rows = list(
303
- filter(lambda row: row["scylla_version"] == self.release_version, self.release_rows))
357
+ filter(lambda row: row["scylla_version"] == self.release_version or not row["scylla_version"], self.release_rows))
304
358
 
305
359
  self.release_stats = ReleaseStats(release=self.release)
306
360
  self.release_stats.collect(rows=self.release_rows, limited=limited, force=force)
@@ -8,6 +8,9 @@ from uuid import UUID
8
8
 
9
9
  import requests
10
10
  from flask import g
11
+ from cassandra.query import BatchStatement, ConsistencyLevel
12
+ from cassandra.cqlengine.query import BatchQuery
13
+ from argus.backend.db import ScyllaCluster
11
14
 
12
15
  from argus.backend.models.web import (
13
16
  ArgusEvent,
@@ -27,7 +30,9 @@ from argus.backend.plugins.core import PluginInfoBase, PluginModelBase
27
30
 
28
31
  from argus.backend.plugins.loader import AVAILABLE_PLUGINS
29
32
  from argus.backend.events.event_processors import EVENT_PROCESSORS
33
+ from argus.backend.service.event_service import EventService
30
34
  from argus.backend.service.notification_manager import NotificationManagerService
35
+ from argus.backend.service.stats import ComparableTestStatus
31
36
  from argus.backend.util.common import get_build_number, strip_html_tags
32
37
  from argus.backend.util.enums import TestInvestigationStatus, TestStatus
33
38
 
@@ -77,6 +82,8 @@ class TestRunService:
77
82
  for row in last_runs:
78
83
  row["build_number"] = get_build_number(build_job_url=row["build_job_url"])
79
84
 
85
+ last_runs = sorted(last_runs, reverse=True, key=lambda run: (run["build_number"], ComparableTestStatus(TestStatus(run["status"]))))
86
+
80
87
  return last_runs
81
88
 
82
89
  def get_runs_by_id(self, test_id: UUID, runs: list[UUID]): # FIXME: Not needed, use get_run and individual polling
@@ -105,7 +112,7 @@ class TestRunService:
105
112
  run.status = new_status.value
106
113
  run.save()
107
114
 
108
- self.create_run_event(
115
+ EventService.create_run_event(
109
116
  kind=ArgusEventTypes.TestRunStatusChanged,
110
117
  body={
111
118
  "message": "Status was changed from {old_status} to {new_status} by {username}",
@@ -133,7 +140,7 @@ class TestRunService:
133
140
  run.investigation_status = new_status.value
134
141
  run.save()
135
142
 
136
- self.create_run_event(
143
+ EventService.create_run_event(
137
144
  kind=ArgusEventTypes.TestRunStatusChanged,
138
145
  body={
139
146
  "message": "Investigation status was changed from {old_status} to {new_status} by {username}",
@@ -172,7 +179,7 @@ class TestRunService:
172
179
  LOGGER.warning("Non existent assignee was present on the run %s for test %s: %s",
173
180
  run_id, test_id, old_assignee)
174
181
  old_assignee = None
175
- self.create_run_event(
182
+ EventService.create_run_event(
176
183
  kind=ArgusEventTypes.AssigneeChanged,
177
184
  body={
178
185
  "message": "Assignee was changed from \"{old_user}\" to \"{new_user}\" by {username}",
@@ -241,7 +248,7 @@ class TestRunService:
241
248
  content_params=params
242
249
  )
243
250
 
244
- self.create_run_event(kind=ArgusEventTypes.TestRunCommentPosted, body={
251
+ EventService.create_run_event(kind=ArgusEventTypes.TestRunCommentPosted, body={
245
252
  "message": "A comment was posted by {username}",
246
253
  "username": g.user.username
247
254
  }, user_id=g.user.id, run_id=run_id, release_id=release.id, test_id=test.id)
@@ -254,7 +261,7 @@ class TestRunService:
254
261
  raise Exception("Unable to delete other user comments")
255
262
  comment.delete()
256
263
 
257
- self.create_run_event(kind=ArgusEventTypes.TestRunCommentDeleted, body={
264
+ EventService.create_run_event(kind=ArgusEventTypes.TestRunCommentDeleted, body={
258
265
  "message": "A comment was deleted by {username}",
259
266
  "username": g.user.username
260
267
  }, user_id=g.user.id, run_id=run_id, release_id=comment.release_id, test_id=test_id)
@@ -270,25 +277,13 @@ class TestRunService:
270
277
  comment.mentions = mentions
271
278
  comment.save()
272
279
 
273
- self.create_run_event(kind=ArgusEventTypes.TestRunCommentUpdated, body={
280
+ EventService.create_run_event(kind=ArgusEventTypes.TestRunCommentUpdated, body={
274
281
  "message": "A comment was edited by {username}",
275
282
  "username": g.user.username
276
283
  }, user_id=g.user.id, run_id=run_id, release_id=comment.release_id, test_id=test_id)
277
284
 
278
285
  return self.get_run_comments(run_id=run_id)
279
286
 
280
- def create_run_event(self, kind: ArgusEventTypes, body: dict, user_id=None, run_id=None, release_id=None, group_id=None, test_id=None):
281
- event = ArgusEvent()
282
- event.release_id = release_id
283
- event.group_id = group_id
284
- event.test_id = test_id
285
- event.user_id = user_id
286
- event.run_id = run_id
287
- event.body = json.dumps(body, ensure_ascii=True, separators=(',', ':'))
288
- event.kind = kind.value
289
- event.created_at = datetime.utcnow()
290
- event.save()
291
-
292
287
  def get_run_events(self, run_id: UUID):
293
288
  response = {}
294
289
  all_events = ArgusEvent.filter(run_id=run_id).all()
@@ -355,7 +350,7 @@ class TestRunService:
355
350
  new_issue.last_status = issue_state.get("state")
356
351
  new_issue.save()
357
352
 
358
- self.create_run_event(
353
+ EventService.create_run_event(
359
354
  kind=ArgusEventTypes.TestRunIssueAdded,
360
355
  body={
361
356
  "message": "An issue titled \"{title}\" was added by {username}",
@@ -406,7 +401,7 @@ class TestRunService:
406
401
  def delete_github_issue(self, issue_id: UUID) -> dict:
407
402
  issue: ArgusGithubIssue = ArgusGithubIssue.get(id=issue_id)
408
403
 
409
- self.create_run_event(
404
+ EventService.create_run_event(
410
405
  kind=ArgusEventTypes.TestRunIssueRemoved,
411
406
  body={
412
407
  "message": "An issue titled \"{title}\" was removed by {username}",
@@ -445,7 +440,7 @@ class TestRunService:
445
440
  run.status = TestStatus.ABORTED.value
446
441
  run.save()
447
442
 
448
- self.create_run_event(
443
+ EventService.create_run_event(
449
444
  kind=ArgusEventTypes.TestRunStatusChanged,
450
445
  body={
451
446
  "message": "Run was automatically terminated due to not responding for more than 45 minutes "
@@ -462,3 +457,46 @@ class TestRunService:
462
457
  )
463
458
 
464
459
  return len(all_stuck_runs)
460
+
461
+ def ignore_jobs(self, test_id: UUID, reason: str):
462
+ test: ArgusTest = ArgusTest.get(id=test_id)
463
+ plugin = self.get_plugin(plugin_name=test.plugin_name)
464
+
465
+ if not reason:
466
+ raise TestRunServiceException("Reason for ignore cannot be empty")
467
+
468
+ cluster = ScyllaCluster.get()
469
+ batch = BatchStatement(consistency_level=ConsistencyLevel.QUORUM)
470
+ event_batch = BatchQuery()
471
+ jobs_affected = 0
472
+ for job in plugin.model.get_jobs_meta_by_test_id(test.id):
473
+ if job["status"] != TestStatus.PASSED and job["investigation_status"] == TestInvestigationStatus.NOT_INVESTIGATED:
474
+ batch.add(
475
+ plugin.model.prepare_investigation_status_update_query(
476
+ build_id=job["build_id"],
477
+ start_time=job["start_time"],
478
+ new_status=TestInvestigationStatus.IGNORED
479
+ )
480
+ )
481
+
482
+ ArgusEvent.batch(event_batch).create(
483
+ release_id = job["release_id"],
484
+ group_id = job["group_id"],
485
+ test_id = test_id,
486
+ user_id = g.user.id,
487
+ run_id = job["id"],
488
+ body = json.dumps({
489
+ "message": "Run was marked as ignored by {username} due to the following reason: {reason}",
490
+ "username": g.user.username,
491
+ "reason": reason,
492
+ }, ensure_ascii=True, separators=(',', ':')),
493
+ kind = ArgusEventTypes.TestRunBatchInvestigationStatusChange.value,
494
+ created_at = datetime.utcnow(),
495
+ )
496
+
497
+ jobs_affected += 1
498
+
499
+ cluster.session.execute(batch)
500
+ event_batch.execute()
501
+
502
+ return jobs_affected
@@ -1,4 +1,5 @@
1
1
  from datetime import datetime
2
+ import logging
2
3
  from json.encoder import JSONEncoder
3
4
  from uuid import UUID
4
5
 
@@ -6,6 +7,8 @@ import cassandra.cqlengine.usertype as ut
6
7
  import cassandra.cqlengine.models as m
7
8
 
8
9
 
10
+ LOGGER = logging.getLogger(__name__)
11
+
9
12
  class ArgusJSONEncoder(JSONEncoder):
10
13
  def default(self, o):
11
14
  match o:
@@ -25,6 +25,7 @@ class TestInvestigationStatus(str, Enum):
25
25
  NOT_INVESTIGATED = "not_investigated"
26
26
  IN_PROGRESS = "in_progress"
27
27
  INVESTIGATED = "investigated"
28
+ IGNORED = "ignored"
28
29
 
29
30
 
30
31
  class ResourceState(str, Enum):
@@ -16,6 +16,7 @@ from argus.backend.plugins.driver_matrix_tests.raw_types import RawMatrixTestRes
16
16
 
17
17
  LOGGER = logging.getLogger(__name__)
18
18
 
19
+ TestTypeType = Literal['java', 'cpp', 'python', 'gocql']
19
20
 
20
21
  class AdaptedXUnitData(TypedDict):
21
22
  timestamp: str
@@ -52,6 +53,14 @@ def cpp_driver_matrix_adapter(xml: ElementTree.ElementTree) -> AdaptedXUnitData:
52
53
  }
53
54
 
54
55
 
56
+ def gocql_driver_matrix_adapter(xml: ElementTree.ElementTree) -> AdaptedXUnitData:
57
+ testsuites = list(xml.getroot().iter("testsuite"))
58
+
59
+ return {
60
+ "timestamp": testsuites[0].attrib.get("timestamp"),
61
+ }
62
+
63
+
55
64
  def generic_adapter(xml: ElementTree.ElementTree) -> AdaptedXUnitData:
56
65
  return {
57
66
  "timestamp": datetime.utcnow().isoformat()
@@ -66,6 +75,7 @@ class ArgusDriverMatrixClient(ArgusClient):
66
75
  "java": java_driver_matrix_adapter,
67
76
  "cpp": cpp_driver_matrix_adapter,
68
77
  "python": python_driver_matrix_adapter,
78
+ "gocql": gocql_driver_matrix_adapter,
69
79
  }
70
80
 
71
81
  def __init__(self, run_id: UUID, auth_token: str, base_url: str, api_version="v1") -> None:
@@ -107,8 +117,11 @@ class ArgusDriverMatrixClient(ArgusClient):
107
117
 
108
118
  return raw_cases
109
119
 
110
- def get_driver_info(self, xml_name: str) -> dict[str, str]:
111
- filename_re = r"(?P<name>[\w]*)\.(?P<driver_name>[\w]*)\.(?P<proto>v\d)\.(?P<version>[\d\.]*)\.xml"
120
+ def get_driver_info(self, xml_name: str, test_type: TestTypeType) -> dict[str, str]:
121
+ if test_type == "cpp":
122
+ filename_re = r"TEST-(?P<driver_name>[\w]*)-(?P<version>[\d\.]*)\.xml"
123
+ else:
124
+ filename_re = r"(?P<name>[\w]*)\.(?P<driver_name>[\w]*)\.(?P<proto>v\d)\.(?P<version>[\d\.]*)\.xml"
112
125
 
113
126
  match = re.match(filename_re, xml_name)
114
127
 
@@ -124,14 +137,14 @@ class ArgusDriverMatrixClient(ArgusClient):
124
137
 
125
138
  return total - errors - skipped - failures
126
139
 
127
- def parse_result_xml(self, xml_path: Path, test_type: Literal['java', 'cpp', 'python']) -> RawMatrixTestResult:
140
+ def parse_result_xml(self, xml_path: Path, test_type: TestTypeType) -> RawMatrixTestResult:
128
141
  with xml_path.open(mode="rt", encoding="utf-8") as xml_file:
129
142
  xml = ElementTree.parse(source=xml_file)
130
143
  LOGGER.info("%s", pformat(xml))
131
144
  testsuites = xml.getroot()
132
145
  adapted_data = self.TEST_ADAPTERS.get(test_type, generic_adapter)(xml)
133
146
 
134
- driver_info = self.get_driver_info(xml_path.name)
147
+ driver_info = self.get_driver_info(xml_path.name, test_type)
135
148
  test_collection = {
136
149
  "timestamp": adapted_data["timestamp"],
137
150
  "name": xml_path.stem,
@@ -171,7 +184,7 @@ class ArgusDriverMatrixClient(ArgusClient):
171
184
  "suites": all_suites
172
185
  }
173
186
 
174
- def get_results(self, result_path: str, test_type: Literal['java', 'cpp', 'python']) -> list[RawMatrixTestResult]:
187
+ def get_results(self, result_path: str, test_type: TestTypeType) -> list[RawMatrixTestResult]:
175
188
  xmls = glob(f"{result_path}/**/*.xml", recursive=True)
176
189
  LOGGER.info("Will use following XMLs: %s", pformat(xmls))
177
190
  results = []
@@ -180,7 +193,7 @@ class ArgusDriverMatrixClient(ArgusClient):
180
193
  results.append(result)
181
194
  return results
182
195
 
183
- def submit(self, build_id: str, build_url: str, env_path: str, result_path: str, test_type: Literal['java', 'cpp', 'python']):
196
+ def submit(self, build_id: str, build_url: str, env_path: str, result_path: str, test_type: TestTypeType):
184
197
  env = self.parse_build_environment(env_path)
185
198
  results = self.get_results(result_path, test_type)
186
199
 
@@ -1,5 +1,6 @@
1
1
  from uuid import UUID
2
2
  from dataclasses import asdict
3
+ from argus.backend.plugins.sct.types import GeminiResultsRequest, PerformanceResultsRequest
3
4
  from argus.backend.util.enums import ResourceState, TestStatus
4
5
  from argus.client.base import ArgusClient
5
6
  from argus.client.sct.types import EventsInfo, LogLink, Package
@@ -14,8 +15,11 @@ class ArgusSCTClient(ArgusClient):
14
15
  SUBMIT_SCREENSHOTS = "/sct/$id/screenshots/submit"
15
16
  CREATE_RESOURCE = "/sct/$id/resource/create"
16
17
  TERMINATE_RESOURCE = "/sct/$id/resource/$name/terminate"
18
+ SET_SCT_RUNNER = "/sct/$id/sct_runner/set"
17
19
  UPDATE_SHARDS_FOR_RESOURCE = "/sct/$id/resource/$name/shards"
18
20
  SUBMIT_NEMESIS = "/sct/$id/nemesis/submit"
21
+ SUBMIT_GEMINI_RESULTS = "/sct/$id/gemini/submit"
22
+ SUBMIT_PERFORMANCE_RESULTS = "/sct/$id/performance/submit"
19
23
  FINALIZE_NEMESIS = "/sct/$id/nemesis/finalize"
20
24
  SUBMIT_EVENTS = "/sct/$id/events/submit"
21
25
 
@@ -23,8 +27,7 @@ class ArgusSCTClient(ArgusClient):
23
27
  super().__init__(auth_token, base_url, api_version)
24
28
  self.run_id = run_id
25
29
 
26
- def submit_sct_run(self, job_name: str, job_url: str, started_by: str, commit_id: str,
27
- runner_public_ip: str, runner_private_ip: str, sct_config: dict) -> None:
30
+ def submit_sct_run(self, job_name: str, job_url: str, started_by: str, commit_id: str, sct_config: dict) -> None:
28
31
  """
29
32
  Submits an SCT run to argus.
30
33
  """
@@ -34,8 +37,6 @@ class ArgusSCTClient(ArgusClient):
34
37
  "job_url": job_url,
35
38
  "started_by": started_by,
36
39
  "commit_id": commit_id,
37
- "runner_public_ip": runner_public_ip,
38
- "runner_private_ip": runner_private_ip,
39
40
  "sct_config": sct_config,
40
41
  })
41
42
 
@@ -48,6 +49,23 @@ class ArgusSCTClient(ArgusClient):
48
49
  response = super().set_status(run_type=self.test_type, run_id=self.run_id, new_status=new_status)
49
50
  self.check_response(response)
50
51
 
52
+ def set_sct_runner(self, public_ip: str, private_ip: str, region: str, backend: str) -> None:
53
+ """
54
+ Sets runner information for an SCT run.
55
+ """
56
+ response = self.post(
57
+ endpoint=self.Routes.SET_SCT_RUNNER,
58
+ location_params={"id": str(self.run_id)},
59
+ body={
60
+ **self.generic_body,
61
+ "public_ip": public_ip,
62
+ "private_ip": private_ip,
63
+ "region": region,
64
+ "backend": backend,
65
+ }
66
+ )
67
+ self.check_response(response)
68
+
51
69
  def update_scylla_version(self, version: str) -> None:
52
70
  """
53
71
  Updates scylla server version used for filtering test results by version.
@@ -98,6 +116,34 @@ class ArgusSCTClient(ArgusClient):
98
116
  )
99
117
  self.check_response(response)
100
118
 
119
+ def submit_gemini_results(self, gemini_data: GeminiResultsRequest) -> None:
120
+ """
121
+ Submits gemini results such as oracle node information and gemini command & its results
122
+ """
123
+ response = self.post(
124
+ endpoint=self.Routes.SUBMIT_GEMINI_RESULTS,
125
+ location_params={"id": str(self.run_id)},
126
+ body={
127
+ **self.generic_body,
128
+ "gemini_data": gemini_data,
129
+ }
130
+ )
131
+ self.check_response(response)
132
+
133
+ def submit_performance_results(self, performance_results: PerformanceResultsRequest) -> None:
134
+ """
135
+ Submits results of a performance run. Things such as throughput stats, overall and per op
136
+ """
137
+ response = self.post(
138
+ endpoint=self.Routes.SUBMIT_PERFORMANCE_RESULTS,
139
+ location_params={"id": str(self.run_id)},
140
+ body={
141
+ **self.generic_body,
142
+ "performance_results": performance_results,
143
+ }
144
+ )
145
+ self.check_response(response)
146
+
101
147
  def create_resource(self, name: str, resource_type: str, public_ip: str, private_ip: str,
102
148
  region: str, provider: str, shards_amount: int, state=ResourceState.RUNNING) -> None:
103
149
  """
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: argus-alm
3
- Version: 0.11.3
3
+ Version: 0.11.6
4
4
  Summary: Argus
5
5
  Home-page: https://github.com/scylladb/argus
6
6
  License: Apache-2.0