contentctl 4.4.7__py3-none-any.whl → 5.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (123) hide show
  1. contentctl/__init__.py +1 -1
  2. contentctl/actions/build.py +102 -57
  3. contentctl/actions/deploy_acs.py +29 -24
  4. contentctl/actions/detection_testing/DetectionTestingManager.py +66 -42
  5. contentctl/actions/detection_testing/GitService.py +134 -76
  6. contentctl/actions/detection_testing/generate_detection_coverage_badge.py +48 -30
  7. contentctl/actions/detection_testing/infrastructures/DetectionTestingInfrastructure.py +192 -147
  8. contentctl/actions/detection_testing/infrastructures/DetectionTestingInfrastructureContainer.py +45 -32
  9. contentctl/actions/detection_testing/progress_bar.py +9 -6
  10. contentctl/actions/detection_testing/views/DetectionTestingView.py +16 -19
  11. contentctl/actions/detection_testing/views/DetectionTestingViewCLI.py +1 -5
  12. contentctl/actions/detection_testing/views/DetectionTestingViewFile.py +2 -2
  13. contentctl/actions/detection_testing/views/DetectionTestingViewWeb.py +1 -4
  14. contentctl/actions/doc_gen.py +9 -5
  15. contentctl/actions/initialize.py +45 -33
  16. contentctl/actions/inspect.py +118 -61
  17. contentctl/actions/new_content.py +155 -108
  18. contentctl/actions/release_notes.py +276 -146
  19. contentctl/actions/reporting.py +23 -19
  20. contentctl/actions/test.py +33 -28
  21. contentctl/actions/validate.py +55 -34
  22. contentctl/api.py +54 -45
  23. contentctl/contentctl.py +124 -90
  24. contentctl/enrichments/attack_enrichment.py +112 -72
  25. contentctl/enrichments/cve_enrichment.py +34 -28
  26. contentctl/enrichments/splunk_app_enrichment.py +38 -36
  27. contentctl/helper/link_validator.py +101 -78
  28. contentctl/helper/splunk_app.py +69 -41
  29. contentctl/helper/utils.py +58 -53
  30. contentctl/input/director.py +68 -36
  31. contentctl/input/new_content_questions.py +27 -35
  32. contentctl/input/yml_reader.py +28 -18
  33. contentctl/objects/abstract_security_content_objects/detection_abstract.py +303 -259
  34. contentctl/objects/abstract_security_content_objects/security_content_object_abstract.py +115 -52
  35. contentctl/objects/alert_action.py +10 -9
  36. contentctl/objects/annotated_types.py +1 -1
  37. contentctl/objects/atomic.py +65 -54
  38. contentctl/objects/base_test.py +5 -3
  39. contentctl/objects/base_test_result.py +19 -11
  40. contentctl/objects/baseline.py +62 -30
  41. contentctl/objects/baseline_tags.py +30 -24
  42. contentctl/objects/config.py +790 -597
  43. contentctl/objects/constants.py +33 -56
  44. contentctl/objects/correlation_search.py +150 -136
  45. contentctl/objects/dashboard.py +55 -41
  46. contentctl/objects/data_source.py +16 -17
  47. contentctl/objects/deployment.py +43 -44
  48. contentctl/objects/deployment_email.py +3 -2
  49. contentctl/objects/deployment_notable.py +4 -2
  50. contentctl/objects/deployment_phantom.py +7 -6
  51. contentctl/objects/deployment_rba.py +3 -2
  52. contentctl/objects/deployment_scheduling.py +3 -2
  53. contentctl/objects/deployment_slack.py +3 -2
  54. contentctl/objects/detection.py +5 -2
  55. contentctl/objects/detection_metadata.py +1 -0
  56. contentctl/objects/detection_stanza.py +7 -2
  57. contentctl/objects/detection_tags.py +58 -103
  58. contentctl/objects/drilldown.py +66 -34
  59. contentctl/objects/enums.py +81 -100
  60. contentctl/objects/errors.py +16 -24
  61. contentctl/objects/integration_test.py +3 -3
  62. contentctl/objects/integration_test_result.py +1 -0
  63. contentctl/objects/investigation.py +59 -36
  64. contentctl/objects/investigation_tags.py +30 -19
  65. contentctl/objects/lookup.py +304 -101
  66. contentctl/objects/macro.py +55 -39
  67. contentctl/objects/manual_test.py +3 -3
  68. contentctl/objects/manual_test_result.py +1 -0
  69. contentctl/objects/mitre_attack_enrichment.py +17 -16
  70. contentctl/objects/notable_action.py +2 -1
  71. contentctl/objects/notable_event.py +1 -3
  72. contentctl/objects/playbook.py +37 -35
  73. contentctl/objects/playbook_tags.py +23 -13
  74. contentctl/objects/rba.py +96 -0
  75. contentctl/objects/risk_analysis_action.py +15 -11
  76. contentctl/objects/risk_event.py +110 -160
  77. contentctl/objects/risk_object.py +1 -0
  78. contentctl/objects/savedsearches_conf.py +9 -7
  79. contentctl/objects/security_content_object.py +5 -2
  80. contentctl/objects/story.py +54 -49
  81. contentctl/objects/story_tags.py +56 -45
  82. contentctl/objects/test_attack_data.py +2 -1
  83. contentctl/objects/test_group.py +5 -2
  84. contentctl/objects/threat_object.py +1 -0
  85. contentctl/objects/throttling.py +27 -18
  86. contentctl/objects/unit_test.py +3 -4
  87. contentctl/objects/unit_test_baseline.py +5 -5
  88. contentctl/objects/unit_test_result.py +6 -6
  89. contentctl/output/api_json_output.py +233 -220
  90. contentctl/output/attack_nav_output.py +21 -21
  91. contentctl/output/attack_nav_writer.py +29 -37
  92. contentctl/output/conf_output.py +235 -172
  93. contentctl/output/conf_writer.py +201 -125
  94. contentctl/output/data_source_writer.py +38 -26
  95. contentctl/output/doc_md_output.py +53 -27
  96. contentctl/output/jinja_writer.py +19 -15
  97. contentctl/output/json_writer.py +21 -11
  98. contentctl/output/svg_output.py +56 -38
  99. contentctl/output/templates/analyticstories_detections.j2 +2 -2
  100. contentctl/output/templates/analyticstories_stories.j2 +1 -1
  101. contentctl/output/templates/collections.j2 +1 -1
  102. contentctl/output/templates/doc_detections.j2 +0 -5
  103. contentctl/output/templates/es_investigations_investigations.j2 +1 -1
  104. contentctl/output/templates/es_investigations_stories.j2 +1 -1
  105. contentctl/output/templates/savedsearches_baselines.j2 +2 -2
  106. contentctl/output/templates/savedsearches_detections.j2 +10 -11
  107. contentctl/output/templates/savedsearches_investigations.j2 +2 -2
  108. contentctl/output/templates/transforms.j2 +6 -8
  109. contentctl/output/yml_writer.py +29 -20
  110. contentctl/templates/detections/endpoint/anomalous_usage_of_7zip.yml +16 -34
  111. contentctl/templates/stories/cobalt_strike.yml +1 -0
  112. {contentctl-4.4.7.dist-info → contentctl-5.0.0.dist-info}/METADATA +5 -4
  113. contentctl-5.0.0.dist-info/RECORD +168 -0
  114. {contentctl-4.4.7.dist-info → contentctl-5.0.0.dist-info}/WHEEL +1 -1
  115. contentctl/actions/initialize_old.py +0 -245
  116. contentctl/objects/event_source.py +0 -11
  117. contentctl/objects/observable.py +0 -37
  118. contentctl/output/detection_writer.py +0 -28
  119. contentctl/output/new_content_yml_output.py +0 -56
  120. contentctl/output/yml_output.py +0 -66
  121. contentctl-4.4.7.dist-info/RECORD +0 -173
  122. {contentctl-4.4.7.dist-info → contentctl-5.0.0.dist-info}/LICENSE.md +0 -0
  123. {contentctl-4.4.7.dist-info → contentctl-5.0.0.dist-info}/entry_points.txt +0 -0
@@ -2,34 +2,33 @@ import logging
2
2
  import time
3
3
  import json
4
4
  from typing import Any
5
- from enum import Enum
5
+ from enum import StrEnum, IntEnum
6
6
  from functools import cached_property
7
7
 
8
8
  from pydantic import ConfigDict, BaseModel, computed_field, Field, PrivateAttr
9
- from splunklib.results import JSONResultsReader, Message # type: ignore
10
- from splunklib.binding import HTTPError, ResponseReader # type: ignore
11
- import splunklib.client as splunklib # type: ignore
12
- from tqdm import tqdm # type: ignore
9
+ from splunklib.results import JSONResultsReader, Message # type: ignore
10
+ from splunklib.binding import HTTPError, ResponseReader # type: ignore
11
+ import splunklib.client as splunklib # type: ignore
12
+ from tqdm import tqdm # type: ignore
13
13
 
14
14
  from contentctl.objects.risk_analysis_action import RiskAnalysisAction
15
15
  from contentctl.objects.notable_action import NotableAction
16
16
  from contentctl.objects.base_test_result import TestResultStatus
17
17
  from contentctl.objects.integration_test_result import IntegrationTestResult
18
18
  from contentctl.actions.detection_testing.progress_bar import (
19
- format_pbar_string, # type: ignore
19
+ format_pbar_string, # type: ignore
20
20
  TestReportingType,
21
- TestingStates
21
+ TestingStates,
22
22
  )
23
23
  from contentctl.objects.errors import (
24
24
  IntegrationTestingError,
25
25
  ServerError,
26
26
  ClientError,
27
- ValidationFailed
27
+ ValidationFailed,
28
28
  )
29
29
  from contentctl.objects.detection import Detection
30
30
  from contentctl.objects.risk_event import RiskEvent
31
31
  from contentctl.objects.notable_event import NotableEvent
32
- from contentctl.objects.observable import Observable
33
32
 
34
33
 
35
34
  # Suppress logging by default; enable for local testing
@@ -66,7 +65,9 @@ def get_logger() -> logging.Logger:
66
65
  handler = logging.NullHandler()
67
66
 
68
67
  # Format our output
69
- formatter = logging.Formatter('%(asctime)s - %(levelname)s:%(name)s - %(message)s')
68
+ formatter = logging.Formatter(
69
+ "%(asctime)s - %(levelname)s:%(name)s - %(message)s"
70
+ )
70
71
  handler.setFormatter(formatter)
71
72
 
72
73
  # Set handler level and add to logger
@@ -76,10 +77,11 @@ def get_logger() -> logging.Logger:
76
77
  return logger
77
78
 
78
79
 
79
- class SavedSearchKeys(str, Enum):
80
+ class SavedSearchKeys(StrEnum):
80
81
  """
81
82
  Various keys into the SavedSearch content
82
83
  """
84
+
83
85
  # setup the names of the keys we expect to access in content
84
86
  EARLIEST_TIME_KEY = "dispatch.earliest_time"
85
87
  LATEST_TIME_KEY = "dispatch.latest_time"
@@ -89,19 +91,21 @@ class SavedSearchKeys(str, Enum):
89
91
  DISBALED_KEY = "disabled"
90
92
 
91
93
 
92
- class Indexes(str, Enum):
94
+ class Indexes(StrEnum):
93
95
  """
94
96
  Indexes we search against
95
97
  """
98
+
96
99
  # setup the names of the risk and notable indexes
97
100
  RISK_INDEX = "risk"
98
101
  NOTABLE_INDEX = "notable"
99
102
 
100
103
 
101
- class TimeoutConfig(int, Enum):
104
+ class TimeoutConfig(IntEnum):
102
105
  """
103
106
  Configuration values for the exponential backoff timer
104
107
  """
108
+
105
109
  # base amount to sleep for before beginning exponential backoff during testing
106
110
  BASE_SLEEP = 60
107
111
 
@@ -115,10 +119,11 @@ class TimeoutConfig(int, Enum):
115
119
 
116
120
  # TODO (#226): evaluate sane defaults for timeframe for integration testing (e.g. 5y is good
117
121
  # now, but maybe not always...); maybe set latest/earliest to None?
118
- class ScheduleConfig(str, Enum):
122
+ class ScheduleConfig(StrEnum):
119
123
  """
120
124
  Configuraton values for the saved search schedule
121
125
  """
126
+
122
127
  EARLIEST_TIME = "-5y@y"
123
128
  LATEST_TIME = "-1m@m"
124
129
  CRON_SCHEDULE = "*/1 * * * *"
@@ -133,11 +138,10 @@ class ResultIterator:
133
138
  :param response_reader: a ResponseReader object
134
139
  :param logger: a Logger object
135
140
  """
141
+
136
142
  def __init__(self, response_reader: ResponseReader) -> None:
137
143
  # init the results reader
138
- self.results_reader: JSONResultsReader = JSONResultsReader(
139
- response_reader
140
- )
144
+ self.results_reader: JSONResultsReader = JSONResultsReader(response_reader)
141
145
 
142
146
  # get logger
143
147
  self.logger: logging.Logger = get_logger()
@@ -145,24 +149,24 @@ class ResultIterator:
145
149
  def __iter__(self) -> "ResultIterator":
146
150
  return self
147
151
 
148
- def __next__(self) -> dict:
152
+ def __next__(self) -> dict[Any, Any]:
149
153
  # Use a reader for JSON format so we can iterate over our results
150
154
  for result in self.results_reader:
151
155
  # log messages, or raise if error
152
156
  if isinstance(result, Message):
153
157
  # convert level string to level int
154
- level_name = result.type.strip().upper()
158
+ level_name = result.type.strip().upper() # type: ignore
155
159
  level: int = logging.getLevelName(level_name)
156
160
 
157
161
  # log message at appropriate level and raise if needed
158
- message = f"SPLUNK: {result.message}"
162
+ message = f"SPLUNK: {result.message}" # type: ignore
159
163
  self.logger.log(level, message)
160
164
  if level == logging.ERROR:
161
165
  raise ServerError(message)
162
166
 
163
167
  # if dict, just return
164
168
  elif isinstance(result, dict):
165
- return result
169
+ return result # type: ignore
166
170
 
167
171
  # raise for any unexpected types
168
172
  else:
@@ -179,14 +183,13 @@ class PbarData(BaseModel):
179
183
  :param fq_test_name: the fully qualifed (fq) test name ("<detection_name>:<test_name>") used for logging
180
184
  :param start_time: the start time used for logging
181
185
  """
182
- pbar: tqdm # type: ignore
186
+
187
+ pbar: tqdm # type: ignore
183
188
  fq_test_name: str
184
189
  start_time: float
185
190
 
186
191
  # needed to support the tqdm type
187
- model_config = ConfigDict(
188
- arbitrary_types_allowed=True
189
- )
192
+ model_config = ConfigDict(arbitrary_types_allowed=True)
190
193
 
191
194
 
192
195
  class CorrelationSearch(BaseModel):
@@ -199,6 +202,7 @@ class CorrelationSearch(BaseModel):
199
202
  :param pbar_data: the encapsulated info needed for logging w/ pbar
200
203
  :param test_index: the index attack data is forwarded to for testing (optionally used in cleanup)
201
204
  """
205
+
202
206
  # the detection associated with the correlation search (e.g. "Windows Modify Registry EnableLinkedConnections")
203
207
  detection: Detection = Field(...)
204
208
 
@@ -233,10 +237,7 @@ class CorrelationSearch(BaseModel):
233
237
 
234
238
  # Need arbitrary types to allow fields w/ types like SavedSearch; we also want to forbid
235
239
  # unexpected fields
236
- model_config = ConfigDict(
237
- arbitrary_types_allowed=True,
238
- extra='forbid'
239
- )
240
+ model_config = ConfigDict(arbitrary_types_allowed=True, extra="forbid")
240
241
 
241
242
  def model_post_init(self, __context: Any) -> None:
242
243
  super().model_post_init(__context)
@@ -310,9 +311,11 @@ class CorrelationSearch(BaseModel):
310
311
  The earliest time configured for the saved search
311
312
  """
312
313
  if self.saved_search is not None:
313
- return self.saved_search.content[SavedSearchKeys.EARLIEST_TIME_KEY.value]
314
+ return self.saved_search.content[SavedSearchKeys.EARLIEST_TIME_KEY] # type: ignore
314
315
  else:
315
- raise ClientError("Something unexpected went wrong in initialization; saved_search was not populated")
316
+ raise ClientError(
317
+ "Something unexpected went wrong in initialization; saved_search was not populated"
318
+ )
316
319
 
317
320
  @property
318
321
  def latest_time(self) -> str:
@@ -320,9 +323,11 @@ class CorrelationSearch(BaseModel):
320
323
  The latest time configured for the saved search
321
324
  """
322
325
  if self.saved_search is not None:
323
- return self.saved_search.content[SavedSearchKeys.LATEST_TIME_KEY.value]
326
+ return self.saved_search.content[SavedSearchKeys.LATEST_TIME_KEY] # type: ignore
324
327
  else:
325
- raise ClientError("Something unexpected went wrong in initialization; saved_search was not populated")
328
+ raise ClientError(
329
+ "Something unexpected went wrong in initialization; saved_search was not populated"
330
+ )
326
331
 
327
332
  @property
328
333
  def cron_schedule(self) -> str:
@@ -330,9 +335,11 @@ class CorrelationSearch(BaseModel):
330
335
  The cron schedule configured for the saved search
331
336
  """
332
337
  if self.saved_search is not None:
333
- return self.saved_search.content[SavedSearchKeys.CRON_SCHEDULE_KEY.value]
338
+ return self.saved_search.content[SavedSearchKeys.CRON_SCHEDULE_KEY] # type: ignore
334
339
  else:
335
- raise ClientError("Something unexpected went wrong in initialization; saved_search was not populated")
340
+ raise ClientError(
341
+ "Something unexpected went wrong in initialization; saved_search was not populated"
342
+ )
336
343
 
337
344
  @property
338
345
  def enabled(self) -> bool:
@@ -340,14 +347,16 @@ class CorrelationSearch(BaseModel):
340
347
  Whether the saved search is enabled
341
348
  """
342
349
  if self.saved_search is not None:
343
- if int(self.saved_search.content[SavedSearchKeys.DISBALED_KEY.value]):
350
+ if int(self.saved_search.content[SavedSearchKeys.DISBALED_KEY]): # type: ignore
344
351
  return False
345
352
  else:
346
353
  return True
347
354
  else:
348
- raise ClientError("Something unexpected went wrong in initialization; saved_search was not populated")
355
+ raise ClientError(
356
+ "Something unexpected went wrong in initialization; saved_search was not populated"
357
+ )
349
358
 
350
- @ property
359
+ @property
351
360
  def has_risk_analysis_action(self) -> bool:
352
361
  """Whether the correlation search has an associated risk analysis Adaptive Response Action
353
362
  :return: a boolean indicating whether it has a risk analysis Adaptive Response Action
@@ -368,7 +377,7 @@ class CorrelationSearch(BaseModel):
368
377
  :param content: a dict of strings to values
369
378
  :returns: a RiskAnalysisAction, or None if none exists
370
379
  """
371
- if int(content[SavedSearchKeys.RISK_ACTION_KEY.value]):
380
+ if int(content[SavedSearchKeys.RISK_ACTION_KEY]):
372
381
  try:
373
382
  return RiskAnalysisAction.parse_from_dict(content)
374
383
  except ValueError as e:
@@ -383,23 +392,10 @@ class CorrelationSearch(BaseModel):
383
392
  :returns: a NotableAction, or None if none exists
384
393
  """
385
394
  # grab notable details if present
386
- if int(content[SavedSearchKeys.NOTABLE_ACTION_KEY.value]):
395
+ if int(content[SavedSearchKeys.NOTABLE_ACTION_KEY]):
387
396
  return NotableAction.parse_from_dict(content)
388
397
  return None
389
398
 
390
- @staticmethod
391
- def _get_relevant_observables(observables: list[Observable]) -> list[Observable]:
392
- """
393
- Given a list of observables, identify the subset of those relevant for risk matching
394
- :param observables: the Observable objects to filter
395
- :returns: the filtered list of relevant observables
396
- """
397
- relevant = []
398
- for observable in observables:
399
- if not RiskEvent.ignore_observable(observable):
400
- relevant.append(observable)
401
- return relevant
402
-
403
399
  def _parse_risk_and_notable_actions(self) -> None:
404
400
  """Parses the risk/notable metadata we care about from self.saved_search.content
405
401
 
@@ -411,11 +407,13 @@ class CorrelationSearch(BaseModel):
411
407
  """
412
408
  # grab risk details if present
413
409
  self._risk_analysis_action = CorrelationSearch._get_risk_analysis_action(
414
- self.saved_search.content # type: ignore
410
+ self.saved_search.content # type: ignore
415
411
  )
416
412
 
417
413
  # grab notable details if present
418
- self._notable_action = CorrelationSearch._get_notable_action(self.saved_search.content) # type: ignore
414
+ self._notable_action = CorrelationSearch._get_notable_action(
415
+ self.saved_search.content
416
+ ) # type: ignore
419
417
 
420
418
  def refresh(self) -> None:
421
419
  """Refreshes the metadata in the SavedSearch entity, and re-parses the fields we care about
@@ -423,10 +421,9 @@ class CorrelationSearch(BaseModel):
423
421
  After operations we expect to alter the state of the SavedSearch, we call refresh so that we have a local
424
422
  representation of the new state; then we extrat what we care about into this instance
425
423
  """
426
- self.logger.debug(
427
- f"Refreshing SavedSearch metadata for {self.name}...")
424
+ self.logger.debug(f"Refreshing SavedSearch metadata for {self.name}...")
428
425
  try:
429
- self.saved_search.refresh() # type: ignore
426
+ self.saved_search.refresh() # type: ignore
430
427
  except HTTPError as e:
431
428
  raise ServerError(f"HTTP error encountered during refresh: {e}")
432
429
  self._parse_risk_and_notable_actions()
@@ -440,7 +437,7 @@ class CorrelationSearch(BaseModel):
440
437
  """
441
438
  self.logger.debug(f"Enabling {self.name}...")
442
439
  try:
443
- self.saved_search.enable() # type: ignore
440
+ self.saved_search.enable() # type: ignore
444
441
  except HTTPError as e:
445
442
  raise ServerError(f"HTTP error encountered while enabling detection: {e}")
446
443
  if refresh:
@@ -455,7 +452,7 @@ class CorrelationSearch(BaseModel):
455
452
  """
456
453
  self.logger.debug(f"Disabling {self.name}...")
457
454
  try:
458
- self.saved_search.disable() # type: ignore
455
+ self.saved_search.disable() # type: ignore
459
456
  except HTTPError as e:
460
457
  raise ServerError(f"HTTP error encountered while disabling detection: {e}")
461
458
  if refresh:
@@ -463,10 +460,10 @@ class CorrelationSearch(BaseModel):
463
460
 
464
461
  def update_timeframe(
465
462
  self,
466
- earliest_time: str = ScheduleConfig.EARLIEST_TIME.value,
467
- latest_time: str = ScheduleConfig.LATEST_TIME.value,
468
- cron_schedule: str = ScheduleConfig.CRON_SCHEDULE.value,
469
- refresh: bool = True
463
+ earliest_time: str = ScheduleConfig.EARLIEST_TIME,
464
+ latest_time: str = ScheduleConfig.LATEST_TIME,
465
+ cron_schedule: str = ScheduleConfig.CRON_SCHEDULE,
466
+ refresh: bool = True,
470
467
  ) -> None:
471
468
  """Updates the correlation search timeframe to work with test data
472
469
 
@@ -481,21 +478,21 @@ class CorrelationSearch(BaseModel):
481
478
  """
482
479
  # update the SavedSearch accordingly
483
480
  data = {
484
- SavedSearchKeys.EARLIEST_TIME_KEY.value: earliest_time,
485
- SavedSearchKeys.LATEST_TIME_KEY.value: latest_time,
486
- SavedSearchKeys.CRON_SCHEDULE_KEY.value: cron_schedule
481
+ SavedSearchKeys.EARLIEST_TIME_KEY: earliest_time,
482
+ SavedSearchKeys.LATEST_TIME_KEY: latest_time,
483
+ SavedSearchKeys.CRON_SCHEDULE_KEY: cron_schedule,
487
484
  }
488
485
  self.logger.info(data)
489
486
  self.logger.info(f"Updating timeframe for '{self.name}': {data}")
490
487
  try:
491
- self.saved_search.update(**data) # type: ignore
488
+ self.saved_search.update(**data) # type: ignore
492
489
  except HTTPError as e:
493
490
  raise ServerError(f"HTTP error encountered while updating timeframe: {e}")
494
491
 
495
492
  if refresh:
496
493
  self.refresh()
497
494
 
498
- def force_run(self, refresh=True) -> None:
495
+ def force_run(self, refresh: bool = True) -> None:
499
496
  """Forces a detection run
500
497
 
501
498
  Enables the detection, adjusts the cron schedule to run every 1 minute, and widens the earliest/latest window
@@ -506,7 +503,7 @@ class CorrelationSearch(BaseModel):
506
503
  if not self.enabled:
507
504
  self.enable(refresh=False)
508
505
  else:
509
- self.logger.warn(f"Detection '{self.name}' was already enabled")
506
+ self.logger.warning(f"Detection '{self.name}' was already enabled")
510
507
 
511
508
  if refresh:
512
509
  self.refresh()
@@ -537,7 +534,9 @@ class CorrelationSearch(BaseModel):
537
534
 
538
535
  # Use the cached risk_events unless we're forcing an update
539
536
  if self._risk_events is not None:
540
- self.logger.debug(f"Using cached risk events ({len(self._risk_events)} total).")
537
+ self.logger.debug(
538
+ f"Using cached risk events ({len(self._risk_events)} total)."
539
+ )
541
540
  return self._risk_events
542
541
 
543
542
  # TODO (#248): Refactor risk/notable querying to pin to a single savedsearch ID
@@ -554,12 +553,14 @@ class CorrelationSearch(BaseModel):
554
553
  for result in result_iterator:
555
554
  # sanity check that this result from the iterator is a risk event and not some
556
555
  # other metadata
557
- if result["index"] == Indexes.RISK_INDEX.value:
556
+ if result["index"] == Indexes.RISK_INDEX:
558
557
  try:
559
558
  parsed_raw = json.loads(result["_raw"])
560
- event = RiskEvent.parse_obj(parsed_raw)
559
+ event = RiskEvent.model_validate(parsed_raw)
561
560
  except Exception:
562
- self.logger.error(f"Failed to parse RiskEvent from search result: {result}")
561
+ self.logger.error(
562
+ f"Failed to parse RiskEvent from search result: {result}"
563
+ )
563
564
  raise
564
565
  events.append(event)
565
566
  self.logger.debug(f"Found risk event for '{self.name}': {event}")
@@ -603,7 +604,9 @@ class CorrelationSearch(BaseModel):
603
604
 
604
605
  # Use the cached notable_events unless we're forcing an update
605
606
  if self._notable_events is not None:
606
- self.logger.debug(f"Using cached notable events ({len(self._notable_events)} total).")
607
+ self.logger.debug(
608
+ f"Using cached notable events ({len(self._notable_events)} total)."
609
+ )
607
610
  return self._notable_events
608
611
 
609
612
  # Search for all notable events from a single scheduled search (indicated by orig_sid)
@@ -619,12 +622,14 @@ class CorrelationSearch(BaseModel):
619
622
  for result in result_iterator:
620
623
  # sanity check that this result from the iterator is a notable event and not some
621
624
  # other metadata
622
- if result["index"] == Indexes.NOTABLE_INDEX.value:
625
+ if result["index"] == Indexes.NOTABLE_INDEX:
623
626
  try:
624
627
  parsed_raw = json.loads(result["_raw"])
625
- event = NotableEvent.parse_obj(parsed_raw)
628
+ event = NotableEvent.model_validate(parsed_raw)
626
629
  except Exception:
627
- self.logger.error(f"Failed to parse NotableEvent from search result: {result}")
630
+ self.logger.error(
631
+ f"Failed to parse NotableEvent from search result: {result}"
632
+ )
628
633
  raise
629
634
  events.append(event)
630
635
  self.logger.debug(f"Found notable event for '{self.name}': {event}")
@@ -646,24 +651,23 @@ class CorrelationSearch(BaseModel):
646
651
  """Validates the existence of any expected risk events
647
652
 
648
653
  First ensure the risk event exists, and if it does validate its risk message and make sure
649
- any events align with the specified observables. Also adds the risk index to the purge list
654
+ any events align with the specified risk object. Also adds the risk index to the purge list
650
655
  if risk events existed
651
656
  :param elapsed_sleep_time: an int representing the amount of time slept thus far waiting to
652
657
  check the risks/notables
653
658
  :returns: an IntegrationTestResult on failure; None on success
654
659
  """
655
- # Create a mapping of the relevant observables to counters
656
- observables = CorrelationSearch._get_relevant_observables(self.detection.tags.observable)
657
- observable_counts: dict[str, int] = {str(x): 0 for x in observables}
658
-
659
- # NOTE: we intentionally want this to be an error state and not a failure state, as
660
- # ultimately this validation should be handled during the build process
661
- if len(observables) != len(observable_counts):
662
- raise ClientError(
663
- f"At least two observables in '{self.detection.name}' have the same name; "
664
- "each observable for a detection should be unique."
660
+ # Ensure the rba object is defined
661
+ if self.detection.rba is None:
662
+ raise ValidationFailed(
663
+ f"Unexpected error: Detection '{self.detection.name}' has no RBA objects associated"
664
+ " with it; cannot validate."
665
665
  )
666
666
 
667
+ risk_object_counts: dict[int, int] = {
668
+ id(x): 0 for x in self.detection.rba.risk_objects
669
+ }
670
+
667
671
  # Get the risk events; note that we use the cached risk events, expecting they were
668
672
  # saved by a prior call to risk_event_exists
669
673
  events = self.get_risk_events()
@@ -673,63 +677,68 @@ class CorrelationSearch(BaseModel):
673
677
  for event in events:
674
678
  c += 1
675
679
  self.logger.debug(
676
- f"Validating risk event ({event.risk_object}, {event.risk_object_type}): "
680
+ f"Validating risk event ({event.es_risk_object}, {event.es_risk_object_type}): "
677
681
  f"{c}/{len(events)}"
678
682
  )
679
683
  event.validate_against_detection(self.detection)
680
684
 
681
- # Update observable count based on match
682
- matched_observable = event.get_matched_observable(self.detection.tags.observable)
685
+ # Update risk object count based on match
686
+ matched_risk_object = event.get_matched_risk_object(
687
+ self.detection.rba.risk_objects
688
+ )
683
689
  self.logger.debug(
684
- f"Matched risk event (object={event.risk_object}, type={event.risk_object_type}) "
685
- f"to observable (name={matched_observable.name}, type={matched_observable.type}, "
686
- f"role={matched_observable.role}) using the source field "
690
+ f"Matched risk event (object={event.es_risk_object}, type={event.es_risk_object_type}) "
691
+ f"to detection's risk object (name={matched_risk_object.field}, "
692
+ f"type={matched_risk_object.type.value}) using the source field "
687
693
  f"'{event.source_field_name}'"
688
694
  )
689
- observable_counts[str(matched_observable)] += 1
695
+ risk_object_counts[id(matched_risk_object)] += 1
690
696
 
691
- # Report any observables which did not have at least one match to a risk event
692
- for observable in observables:
697
+ # Report any risk objects which did not have at least one match to a risk event
698
+ for risk_object in self.detection.rba.risk_objects:
693
699
  self.logger.debug(
694
- f"Matched observable (name={observable.name}, type={observable.type}, "
695
- f"role={observable.role}) to {observable_counts[str(observable)]} risk events."
700
+ f"Matched risk object (name={risk_object.field}, type={risk_object.type.value} "
701
+ f"to {risk_object_counts[id(risk_object)]} risk events."
696
702
  )
697
- if observable_counts[str(observable)] == 0:
703
+ if risk_object_counts[id(risk_object)] == 0:
698
704
  raise ValidationFailed(
699
- f"Observable (name={observable.name}, type={observable.type}, "
700
- f"role={observable.role}) was not matched to any risk events."
705
+ f"Risk object (name={risk_object.field}, type={risk_object.type.value}) "
706
+ "was not matched to any risk events."
701
707
  )
702
708
 
703
709
  # TODO (#250): Re-enable and refactor code that validates the specific risk counts
704
710
  # Validate risk events in aggregate; we should have an equal amount of risk events for each
705
- # relevant observable, and the total count should match the total number of events
711
+ # relevant risk object, and the total count should match the total number of events
706
712
  # individual_count: int | None = None
707
713
  # total_count = 0
708
- # for observable_str in observable_counts:
714
+ # for risk_object_id in risk_object_counts:
709
715
  # self.logger.debug(
710
- # f"Observable <{observable_str}> match count: {observable_counts[observable_str]}"
716
+ # f"Risk object <{risk_object_id}> match count: {risk_object_counts[risk_object_id]}"
711
717
  # )
712
718
 
713
719
  # # Grab the first value encountered if not set yet
714
720
  # if individual_count is None:
715
- # individual_count = observable_counts[observable_str]
721
+ # individual_count = risk_object_counts[risk_object_id]
716
722
  # else:
717
- # # Confirm that the count for the current observable matches the count of the others
718
- # if observable_counts[observable_str] != individual_count:
723
+ # # Confirm that the count for the current risk object matches the count of the
724
+ # # others
725
+ # if risk_object_counts[risk_object_id] != individual_count:
719
726
  # raise ValidationFailed(
720
- # f"Count of risk events matching observable <\"{observable_str}\"> "
721
- # f"({observable_counts[observable_str]}) does not match the count of those "
722
- # f"matching other observables ({individual_count})."
727
+ # f"Count of risk events matching detection's risk object <\"{risk_object_id}\"> "
728
+ # f"({risk_object_counts[risk_object_id]}) does not match the count of those "
729
+ # f"matching other risk objects ({individual_count})."
723
730
  # )
724
731
 
725
- # # Aggregate total count of events matched to observables
726
- # total_count += observable_counts[observable_str]
732
+ # # Aggregate total count of events matched to risk objects
733
+ # total_count += risk_object_counts[risk_object_id]
727
734
 
728
- # # Raise if the the number of events doesn't match the number of those matched to observables
735
+ # # Raise if the the number of events doesn't match the number of those matched to risk
736
+ # # objects
729
737
  # if len(events) != total_count:
730
738
  # raise ValidationFailed(
731
739
  # f"The total number of risk events {len(events)} does not match the number of "
732
- # f"risk events we were able to match against observables ({total_count})."
740
+ # "risk events we were able to match against risk objects from the detection "
741
+ # f"({total_count})."
733
742
  # )
734
743
 
735
744
  # TODO (PEX-434): implement deeper notable validation
@@ -746,7 +755,9 @@ class CorrelationSearch(BaseModel):
746
755
 
747
756
  # NOTE: it would be more ideal to switch this to a system which gets the handle of the saved search job and polls
748
757
  # it for completion, but that seems more tricky
749
- def test(self, max_sleep: int = TimeoutConfig.MAX_SLEEP.value, raise_on_exc: bool = False) -> IntegrationTestResult:
758
+ def test(
759
+ self, max_sleep: int = TimeoutConfig.MAX_SLEEP, raise_on_exc: bool = False
760
+ ) -> IntegrationTestResult:
750
761
  """Execute the integration test
751
762
 
752
763
  Executes an integration test for this CorrelationSearch. First, ensures no matching risk/notables already exist
@@ -760,10 +771,10 @@ class CorrelationSearch(BaseModel):
760
771
  """
761
772
  # max_sleep must be greater than the base value we must wait for the scheduled searchjob to run (jobs run every
762
773
  # 60s)
763
- if max_sleep < TimeoutConfig.BASE_SLEEP.value:
774
+ if max_sleep < TimeoutConfig.BASE_SLEEP:
764
775
  raise ClientError(
765
776
  f"max_sleep value of {max_sleep} is less than the base sleep required "
766
- f"({TimeoutConfig.BASE_SLEEP.value})"
777
+ f"({TimeoutConfig.BASE_SLEEP})"
767
778
  )
768
779
 
769
780
  # initialize result as None
@@ -774,20 +785,18 @@ class CorrelationSearch(BaseModel):
774
785
  num_tries = 0
775
786
 
776
787
  # set the initial base sleep time
777
- time_to_sleep = TimeoutConfig.BASE_SLEEP.value
788
+ time_to_sleep = TimeoutConfig.BASE_SLEEP
778
789
 
779
790
  try:
780
791
  # first make sure the indexes are currently empty and the detection is starting from a disabled state
781
- self.logger.debug(
782
- "Cleaning up any pre-existing risk/notable events..."
783
- )
792
+ self.logger.debug("Cleaning up any pre-existing risk/notable events...")
784
793
  self.update_pbar(TestingStates.PRE_CLEANUP)
785
794
  if self.risk_event_exists():
786
- self.logger.warn(
795
+ self.logger.warning(
787
796
  f"Risk events matching '{self.name}' already exist; marking for deletion"
788
797
  )
789
798
  if self.notable_event_exists():
790
- self.logger.warn(
799
+ self.logger.warning(
791
800
  f"Notable events matching '{self.name}' already exist; marking for deletion"
792
801
  )
793
802
  self.cleanup()
@@ -812,7 +821,9 @@ class CorrelationSearch(BaseModel):
812
821
  # loop so long as the elapsed time is less than max_sleep
813
822
  while elapsed_sleep_time < max_sleep:
814
823
  # sleep so the detection job can finish
815
- self.logger.info(f"Waiting {time_to_sleep} for {self.name} so it can finish")
824
+ self.logger.info(
825
+ f"Waiting {time_to_sleep} for {self.name} so it can finish"
826
+ )
816
827
  self.update_pbar(TestingStates.VALIDATING)
817
828
  time.sleep(time_to_sleep)
818
829
  elapsed_sleep_time += time_to_sleep
@@ -901,7 +912,7 @@ class CorrelationSearch(BaseModel):
901
912
  wait_duration=elapsed_sleep_time,
902
913
  exception=e,
903
914
  )
904
- self.logger.exception(result.message) # type: ignore
915
+ self.logger.exception(result.message) # type: ignore
905
916
  else:
906
917
  raise e
907
918
  except Exception as e:
@@ -911,7 +922,10 @@ class CorrelationSearch(BaseModel):
911
922
 
912
923
  # log based on result status
913
924
  if result is not None:
914
- if result.status == TestResultStatus.PASS or result.status == TestResultStatus.SKIP:
925
+ if (
926
+ result.status == TestResultStatus.PASS
927
+ or result.status == TestResultStatus.SKIP
928
+ ):
915
929
  self.logger.info(f"{result.status.name}: {result.message}")
916
930
  elif result.status == TestResultStatus.FAIL:
917
931
  self.logger.error(f"{result.status.name}: {result.message}")
@@ -934,11 +948,11 @@ class CorrelationSearch(BaseModel):
934
948
  :param query: the SPL string to run
935
949
  """
936
950
  self.logger.debug(f"Executing query: `{query}`")
937
- job = self.service.search(query, exec_mode="blocking")
951
+ job = self.service.search(query, exec_mode="blocking") # type: ignore
938
952
 
939
953
  # query the results, catching any HTTP status code errors
940
954
  try:
941
- response_reader: ResponseReader = job.results(output_mode="json")
955
+ response_reader: ResponseReader = job.results(output_mode="json") # type: ignore
942
956
  except HTTPError as e:
943
957
  # e.g. -> HTTP 400 Bad Request -- b'{"messages":[{"type":"FATAL","text":"Error in \'delete\' command: You
944
958
  # have insufficient privileges to delete events."}]}'
@@ -946,7 +960,7 @@ class CorrelationSearch(BaseModel):
946
960
  self.logger.error(message)
947
961
  raise ServerError(message)
948
962
 
949
- return ResultIterator(response_reader)
963
+ return ResultIterator(response_reader) # type: ignore
950
964
 
951
965
  def _delete_index(self, index: str) -> None:
952
966
  """Deletes events in a given index
@@ -979,7 +993,7 @@ class CorrelationSearch(BaseModel):
979
993
  message = f"No result returned showing deletion in index {index}"
980
994
  raise ServerError(message)
981
995
 
982
- def cleanup(self, delete_test_index=False) -> None:
996
+ def cleanup(self, delete_test_index: bool = False) -> None:
983
997
  """Cleans up after an integration test
984
998
 
985
999
  First, disable the detection; then dump the risk, notable, and (optionally) test indexes. The test index is
@@ -997,11 +1011,11 @@ class CorrelationSearch(BaseModel):
997
1011
 
998
1012
  # Add indexes to purge
999
1013
  if delete_test_index:
1000
- self.indexes_to_purge.add(self.test_index) # type: ignore
1014
+ self.indexes_to_purge.add(self.test_index) # type: ignore
1001
1015
  if self._risk_events is not None:
1002
- self.indexes_to_purge.add(Indexes.RISK_INDEX.value)
1016
+ self.indexes_to_purge.add(Indexes.RISK_INDEX)
1003
1017
  if self._notable_events is not None:
1004
- self.indexes_to_purge.add(Indexes.NOTABLE_INDEX.value)
1018
+ self.indexes_to_purge.add(Indexes.NOTABLE_INDEX)
1005
1019
 
1006
1020
  # delete the indexes
1007
1021
  for index in self.indexes_to_purge:
@@ -1025,5 +1039,5 @@ class CorrelationSearch(BaseModel):
1025
1039
  self.pbar_data.fq_test_name,
1026
1040
  state,
1027
1041
  self.pbar_data.start_time,
1028
- True
1042
+ True,
1029
1043
  )