contentctl 5.0.0a0__py3-none-any.whl → 5.0.0a3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (107) hide show
  1. contentctl/__init__.py +1 -1
  2. contentctl/actions/build.py +88 -55
  3. contentctl/actions/deploy_acs.py +29 -24
  4. contentctl/actions/detection_testing/DetectionTestingManager.py +66 -41
  5. contentctl/actions/detection_testing/GitService.py +134 -76
  6. contentctl/actions/detection_testing/generate_detection_coverage_badge.py +48 -30
  7. contentctl/actions/detection_testing/infrastructures/DetectionTestingInfrastructure.py +163 -124
  8. contentctl/actions/detection_testing/infrastructures/DetectionTestingInfrastructureContainer.py +45 -32
  9. contentctl/actions/detection_testing/progress_bar.py +3 -0
  10. contentctl/actions/detection_testing/views/DetectionTestingView.py +15 -18
  11. contentctl/actions/detection_testing/views/DetectionTestingViewCLI.py +1 -5
  12. contentctl/actions/detection_testing/views/DetectionTestingViewFile.py +2 -2
  13. contentctl/actions/detection_testing/views/DetectionTestingViewWeb.py +1 -4
  14. contentctl/actions/doc_gen.py +9 -5
  15. contentctl/actions/initialize.py +45 -33
  16. contentctl/actions/inspect.py +118 -61
  17. contentctl/actions/new_content.py +78 -50
  18. contentctl/actions/release_notes.py +276 -146
  19. contentctl/actions/reporting.py +23 -19
  20. contentctl/actions/test.py +31 -25
  21. contentctl/actions/validate.py +54 -34
  22. contentctl/api.py +54 -45
  23. contentctl/contentctl.py +12 -13
  24. contentctl/enrichments/attack_enrichment.py +112 -72
  25. contentctl/enrichments/cve_enrichment.py +34 -28
  26. contentctl/enrichments/splunk_app_enrichment.py +38 -36
  27. contentctl/helper/link_validator.py +101 -78
  28. contentctl/helper/splunk_app.py +69 -41
  29. contentctl/helper/utils.py +58 -39
  30. contentctl/input/director.py +69 -37
  31. contentctl/input/new_content_questions.py +26 -34
  32. contentctl/input/yml_reader.py +22 -17
  33. contentctl/objects/abstract_security_content_objects/detection_abstract.py +250 -314
  34. contentctl/objects/abstract_security_content_objects/security_content_object_abstract.py +58 -36
  35. contentctl/objects/alert_action.py +8 -8
  36. contentctl/objects/annotated_types.py +1 -1
  37. contentctl/objects/atomic.py +64 -54
  38. contentctl/objects/base_test.py +2 -1
  39. contentctl/objects/base_test_result.py +16 -8
  40. contentctl/objects/baseline.py +41 -30
  41. contentctl/objects/baseline_tags.py +29 -22
  42. contentctl/objects/config.py +772 -560
  43. contentctl/objects/constants.py +29 -58
  44. contentctl/objects/correlation_search.py +75 -55
  45. contentctl/objects/dashboard.py +55 -41
  46. contentctl/objects/data_source.py +13 -13
  47. contentctl/objects/deployment.py +44 -37
  48. contentctl/objects/deployment_email.py +1 -1
  49. contentctl/objects/deployment_notable.py +2 -1
  50. contentctl/objects/deployment_phantom.py +5 -5
  51. contentctl/objects/deployment_rba.py +1 -1
  52. contentctl/objects/deployment_scheduling.py +1 -1
  53. contentctl/objects/deployment_slack.py +1 -1
  54. contentctl/objects/detection.py +5 -2
  55. contentctl/objects/detection_metadata.py +1 -0
  56. contentctl/objects/detection_stanza.py +7 -2
  57. contentctl/objects/detection_tags.py +54 -64
  58. contentctl/objects/drilldown.py +66 -35
  59. contentctl/objects/enums.py +61 -43
  60. contentctl/objects/errors.py +16 -24
  61. contentctl/objects/integration_test.py +3 -3
  62. contentctl/objects/integration_test_result.py +1 -0
  63. contentctl/objects/investigation.py +41 -26
  64. contentctl/objects/investigation_tags.py +29 -17
  65. contentctl/objects/lookup.py +234 -113
  66. contentctl/objects/macro.py +55 -38
  67. contentctl/objects/manual_test.py +3 -3
  68. contentctl/objects/manual_test_result.py +1 -0
  69. contentctl/objects/mitre_attack_enrichment.py +17 -16
  70. contentctl/objects/notable_action.py +2 -1
  71. contentctl/objects/notable_event.py +1 -3
  72. contentctl/objects/playbook.py +37 -35
  73. contentctl/objects/playbook_tags.py +22 -16
  74. contentctl/objects/rba.py +14 -8
  75. contentctl/objects/risk_analysis_action.py +15 -11
  76. contentctl/objects/risk_event.py +27 -20
  77. contentctl/objects/risk_object.py +1 -0
  78. contentctl/objects/savedsearches_conf.py +9 -7
  79. contentctl/objects/security_content_object.py +5 -2
  80. contentctl/objects/story.py +45 -44
  81. contentctl/objects/story_tags.py +56 -44
  82. contentctl/objects/test_group.py +5 -2
  83. contentctl/objects/threat_object.py +1 -0
  84. contentctl/objects/throttling.py +27 -18
  85. contentctl/objects/unit_test.py +3 -4
  86. contentctl/objects/unit_test_baseline.py +4 -5
  87. contentctl/objects/unit_test_result.py +6 -6
  88. contentctl/output/api_json_output.py +22 -22
  89. contentctl/output/attack_nav_output.py +21 -21
  90. contentctl/output/attack_nav_writer.py +29 -37
  91. contentctl/output/conf_output.py +230 -174
  92. contentctl/output/data_source_writer.py +38 -25
  93. contentctl/output/doc_md_output.py +53 -27
  94. contentctl/output/jinja_writer.py +19 -15
  95. contentctl/output/json_writer.py +20 -8
  96. contentctl/output/svg_output.py +56 -38
  97. contentctl/output/templates/savedsearches_detections.j2 +1 -1
  98. contentctl/output/templates/transforms.j2 +2 -2
  99. contentctl/output/yml_writer.py +18 -24
  100. {contentctl-5.0.0a0.dist-info → contentctl-5.0.0a3.dist-info}/METADATA +1 -1
  101. contentctl-5.0.0a3.dist-info/RECORD +168 -0
  102. contentctl/actions/initialize_old.py +0 -245
  103. contentctl/objects/observable.py +0 -39
  104. contentctl-5.0.0a0.dist-info/RECORD +0 -170
  105. {contentctl-5.0.0a0.dist-info → contentctl-5.0.0a3.dist-info}/LICENSE.md +0 -0
  106. {contentctl-5.0.0a0.dist-info → contentctl-5.0.0a3.dist-info}/WHEEL +0 -0
  107. {contentctl-5.0.0a0.dist-info → contentctl-5.0.0a3.dist-info}/entry_points.txt +0 -0
@@ -15,7 +15,7 @@ ATTACK_TACTICS_KILLCHAIN_MAPPING = {
15
15
  "Collection": "Exploitation",
16
16
  "Command And Control": "Command and Control",
17
17
  "Exfiltration": "Actions on Objectives",
18
- "Impact": "Actions on Objectives"
18
+ "Impact": "Actions on Objectives",
19
19
  }
20
20
 
21
21
  SES_CONTEXT_MAPPING = {
@@ -65,7 +65,7 @@ SES_CONTEXT_MAPPING = {
65
65
  "Other:Policy Violation": 82,
66
66
  "Other:Threat Intelligence": 83,
67
67
  "Other:Flight Risk": 84,
68
- "Other:Removable Storage": 85
68
+ "Other:Removable Storage": 85,
69
69
  }
70
70
 
71
71
  SES_KILL_CHAIN_MAPPINGS = {
@@ -76,49 +76,9 @@ SES_KILL_CHAIN_MAPPINGS = {
76
76
  "Exploitation": 4,
77
77
  "Installation": 5,
78
78
  "Command and Control": 6,
79
- "Actions on Objectives": 7
79
+ "Actions on Objectives": 7,
80
80
  }
81
81
 
82
- # TODO (cmcginley): @ljstella should this be removed? also referenced in new_content.py
83
- SES_OBSERVABLE_ROLE_MAPPING = {
84
- "Other": -1,
85
- "Unknown": 0,
86
- "Actor": 1,
87
- "Target": 2,
88
- "Attacker": 3,
89
- "Victim": 4,
90
- "Parent Process": 5,
91
- "Child Process": 6,
92
- "Known Bad": 7,
93
- "Data Loss": 8,
94
- "Observer": 9
95
- }
96
-
97
- # TODO (cmcginley): @ljstella should this be removed? also referenced in new_content.py
98
- SES_OBSERVABLE_TYPE_MAPPING = {
99
- "Unknown": 0,
100
- "Hostname": 1,
101
- "IP Address": 2,
102
- "MAC Address": 3,
103
- "User Name": 4,
104
- "Email Address": 5,
105
- "URL String": 6,
106
- "File Name": 7,
107
- "File Hash": 8,
108
- "Process Name": 9,
109
- "Resource UID": 10,
110
- "Endpoint": 20,
111
- "User": 21,
112
- "Email": 22,
113
- "Uniform Resource Locator": 23,
114
- "File": 24,
115
- "Process": 25,
116
- "Geo Location": 26,
117
- "Container": 27,
118
- "Registry Key": 28,
119
- "Registry Value": 29,
120
- "Other": 99
121
- }
122
82
 
123
83
  SES_ATTACK_TACTICS_ID_MAPPING = {
124
84
  "Reconnaissance": "TA0043",
@@ -134,24 +94,19 @@ SES_ATTACK_TACTICS_ID_MAPPING = {
134
94
  "Collection": "TA0009",
135
95
  "Command_and_Control": "TA0011",
136
96
  "Exfiltration": "TA0010",
137
- "Impact": "TA0040"
97
+ "Impact": "TA0040",
138
98
  }
139
99
 
140
- # TODO (cmcginley): is this just for the transition testing?
141
- RBA_OBSERVABLE_ROLE_MAPPING = {
142
- "Attacker": 0,
143
- "Victim": 1
144
- }
145
100
 
146
101
  # The relative path to the directory where any apps/packages will be downloaded
147
102
  DOWNLOADS_DIRECTORY = "downloads"
148
103
 
149
104
  # Maximum length of the name field for a search.
150
- # This number is derived from a limitation that exists in
105
+ # This number is derived from a limitation that exists in
151
106
  # ESCU where a search cannot be edited, due to validation
152
107
  # errors, if its name is longer than 99 characters.
153
108
  # When an saved search is cloned in Enterprise Security User Interface,
154
- # it is wrapped in the following:
109
+ # it is wrapped in the following:
155
110
  # {Detection.tags.security_domain} - {SEARCH_STANZA_NAME} - Rule
156
111
  # Similarly, when we generate the search stanza name in contentctl, it
157
112
  # is app.label - detection.name - Rule
@@ -160,16 +115,32 @@ DOWNLOADS_DIRECTORY = "downloads"
160
115
  # or in ESCU:
161
116
  # ESCU - {detection.name} - Rule,
162
117
  # this gives us a maximum length below.
163
- # When an ESCU search is cloned, it will
118
+ # When an ESCU search is cloned, it will
164
119
  # have a full name like (the following is NOT a typo):
165
120
  # Endpoint - ESCU - Name of Search From YML File - Rule - Rule
166
121
  # The math below accounts for all these caveats
167
122
  ES_MAX_STANZA_LENGTH = 99
168
- CONTENTCTL_DETECTION_STANZA_NAME_FORMAT_TEMPLATE = "{app_label} - {detection_name} - Rule"
123
+ CONTENTCTL_DETECTION_STANZA_NAME_FORMAT_TEMPLATE = (
124
+ "{app_label} - {detection_name} - Rule"
125
+ )
169
126
  CONTENTCTL_BASELINE_STANZA_NAME_FORMAT_TEMPLATE = "{app_label} - {detection_name}"
170
- CONTENTCTL_RESPONSE_TASK_NAME_FORMAT_TEMPLATE = "{app_label} - {detection_name} - Response Task"
127
+ CONTENTCTL_RESPONSE_TASK_NAME_FORMAT_TEMPLATE = (
128
+ "{app_label} - {detection_name} - Response Task"
129
+ )
171
130
 
172
- ES_SEARCH_STANZA_NAME_FORMAT_AFTER_CLONING_IN_PRODUCT_TEMPLATE = "{security_domain_value} - {search_name} - Rule"
173
- SECURITY_DOMAIN_MAX_LENGTH = max([len(SecurityDomain[value]) for value in SecurityDomain._member_map_])
174
- CONTENTCTL_MAX_STANZA_LENGTH = ES_MAX_STANZA_LENGTH - len(ES_SEARCH_STANZA_NAME_FORMAT_AFTER_CLONING_IN_PRODUCT_TEMPLATE.format(security_domain_value="X"*SECURITY_DOMAIN_MAX_LENGTH,search_name=""))
175
- CONTENTCTL_MAX_SEARCH_NAME_LENGTH = CONTENTCTL_MAX_STANZA_LENGTH - len(CONTENTCTL_DETECTION_STANZA_NAME_FORMAT_TEMPLATE.format(app_label="ESCU", detection_name=""))
131
+ ES_SEARCH_STANZA_NAME_FORMAT_AFTER_CLONING_IN_PRODUCT_TEMPLATE = (
132
+ "{security_domain_value} - {search_name} - Rule"
133
+ )
134
+ SECURITY_DOMAIN_MAX_LENGTH = max(
135
+ [len(SecurityDomain[value]) for value in SecurityDomain._member_map_]
136
+ )
137
+ CONTENTCTL_MAX_STANZA_LENGTH = ES_MAX_STANZA_LENGTH - len(
138
+ ES_SEARCH_STANZA_NAME_FORMAT_AFTER_CLONING_IN_PRODUCT_TEMPLATE.format(
139
+ security_domain_value="X" * SECURITY_DOMAIN_MAX_LENGTH, search_name=""
140
+ )
141
+ )
142
+ CONTENTCTL_MAX_SEARCH_NAME_LENGTH = CONTENTCTL_MAX_STANZA_LENGTH - len(
143
+ CONTENTCTL_DETECTION_STANZA_NAME_FORMAT_TEMPLATE.format(
144
+ app_label="ESCU", detection_name=""
145
+ )
146
+ )
@@ -6,25 +6,25 @@ from enum import StrEnum, IntEnum
6
6
  from functools import cached_property
7
7
 
8
8
  from pydantic import ConfigDict, BaseModel, computed_field, Field, PrivateAttr
9
- from splunklib.results import JSONResultsReader, Message # type: ignore
10
- from splunklib.binding import HTTPError, ResponseReader # type: ignore
11
- import splunklib.client as splunklib # type: ignore
12
- from tqdm import tqdm # type: ignore
9
+ from splunklib.results import JSONResultsReader, Message # type: ignore
10
+ from splunklib.binding import HTTPError, ResponseReader # type: ignore
11
+ import splunklib.client as splunklib # type: ignore
12
+ from tqdm import tqdm # type: ignore
13
13
 
14
14
  from contentctl.objects.risk_analysis_action import RiskAnalysisAction
15
15
  from contentctl.objects.notable_action import NotableAction
16
16
  from contentctl.objects.base_test_result import TestResultStatus
17
17
  from contentctl.objects.integration_test_result import IntegrationTestResult
18
18
  from contentctl.actions.detection_testing.progress_bar import (
19
- format_pbar_string, # type: ignore
19
+ format_pbar_string, # type: ignore
20
20
  TestReportingType,
21
- TestingStates
21
+ TestingStates,
22
22
  )
23
23
  from contentctl.objects.errors import (
24
24
  IntegrationTestingError,
25
25
  ServerError,
26
26
  ClientError,
27
- ValidationFailed
27
+ ValidationFailed,
28
28
  )
29
29
  from contentctl.objects.detection import Detection
30
30
  from contentctl.objects.risk_event import RiskEvent
@@ -65,7 +65,9 @@ def get_logger() -> logging.Logger:
65
65
  handler = logging.NullHandler()
66
66
 
67
67
  # Format our output
68
- formatter = logging.Formatter('%(asctime)s - %(levelname)s:%(name)s - %(message)s')
68
+ formatter = logging.Formatter(
69
+ "%(asctime)s - %(levelname)s:%(name)s - %(message)s"
70
+ )
69
71
  handler.setFormatter(formatter)
70
72
 
71
73
  # Set handler level and add to logger
@@ -79,6 +81,7 @@ class SavedSearchKeys(StrEnum):
79
81
  """
80
82
  Various keys into the SavedSearch content
81
83
  """
84
+
82
85
  # setup the names of the keys we expect to access in content
83
86
  EARLIEST_TIME_KEY = "dispatch.earliest_time"
84
87
  LATEST_TIME_KEY = "dispatch.latest_time"
@@ -92,6 +95,7 @@ class Indexes(StrEnum):
92
95
  """
93
96
  Indexes we search against
94
97
  """
98
+
95
99
  # setup the names of the risk and notable indexes
96
100
  RISK_INDEX = "risk"
97
101
  NOTABLE_INDEX = "notable"
@@ -101,6 +105,7 @@ class TimeoutConfig(IntEnum):
101
105
  """
102
106
  Configuration values for the exponential backoff timer
103
107
  """
108
+
104
109
  # base amount to sleep for before beginning exponential backoff during testing
105
110
  BASE_SLEEP = 60
106
111
 
@@ -118,6 +123,7 @@ class ScheduleConfig(StrEnum):
118
123
  """
119
124
  Configuraton values for the saved search schedule
120
125
  """
126
+
121
127
  EARLIEST_TIME = "-5y@y"
122
128
  LATEST_TIME = "-1m@m"
123
129
  CRON_SCHEDULE = "*/1 * * * *"
@@ -132,11 +138,10 @@ class ResultIterator:
132
138
  :param response_reader: a ResponseReader object
133
139
  :param logger: a Logger object
134
140
  """
141
+
135
142
  def __init__(self, response_reader: ResponseReader) -> None:
136
143
  # init the results reader
137
- self.results_reader: JSONResultsReader = JSONResultsReader(
138
- response_reader
139
- )
144
+ self.results_reader: JSONResultsReader = JSONResultsReader(response_reader)
140
145
 
141
146
  # get logger
142
147
  self.logger: logging.Logger = get_logger()
@@ -150,18 +155,18 @@ class ResultIterator:
150
155
  # log messages, or raise if error
151
156
  if isinstance(result, Message):
152
157
  # convert level string to level int
153
- level_name = result.type.strip().upper() # type: ignore
158
+ level_name = result.type.strip().upper() # type: ignore
154
159
  level: int = logging.getLevelName(level_name)
155
160
 
156
161
  # log message at appropriate level and raise if needed
157
- message = f"SPLUNK: {result.message}" # type: ignore
162
+ message = f"SPLUNK: {result.message}" # type: ignore
158
163
  self.logger.log(level, message)
159
164
  if level == logging.ERROR:
160
165
  raise ServerError(message)
161
166
 
162
167
  # if dict, just return
163
168
  elif isinstance(result, dict):
164
- return result # type: ignore
169
+ return result # type: ignore
165
170
 
166
171
  # raise for any unexpected types
167
172
  else:
@@ -178,14 +183,13 @@ class PbarData(BaseModel):
178
183
  :param fq_test_name: the fully qualifed (fq) test name ("<detection_name>:<test_name>") used for logging
179
184
  :param start_time: the start time used for logging
180
185
  """
181
- pbar: tqdm # type: ignore
186
+
187
+ pbar: tqdm # type: ignore
182
188
  fq_test_name: str
183
189
  start_time: float
184
190
 
185
191
  # needed to support the tqdm type
186
- model_config = ConfigDict(
187
- arbitrary_types_allowed=True
188
- )
192
+ model_config = ConfigDict(arbitrary_types_allowed=True)
189
193
 
190
194
 
191
195
  class CorrelationSearch(BaseModel):
@@ -198,6 +202,7 @@ class CorrelationSearch(BaseModel):
198
202
  :param pbar_data: the encapsulated info needed for logging w/ pbar
199
203
  :param test_index: the index attack data is forwarded to for testing (optionally used in cleanup)
200
204
  """
205
+
201
206
  # the detection associated with the correlation search (e.g. "Windows Modify Registry EnableLinkedConnections")
202
207
  detection: Detection = Field(...)
203
208
 
@@ -232,10 +237,7 @@ class CorrelationSearch(BaseModel):
232
237
 
233
238
  # Need arbitrary types to allow fields w/ types like SavedSearch; we also want to forbid
234
239
  # unexpected fields
235
- model_config = ConfigDict(
236
- arbitrary_types_allowed=True,
237
- extra='forbid'
238
- )
240
+ model_config = ConfigDict(arbitrary_types_allowed=True, extra="forbid")
239
241
 
240
242
  def model_post_init(self, __context: Any) -> None:
241
243
  super().model_post_init(__context)
@@ -309,7 +311,7 @@ class CorrelationSearch(BaseModel):
309
311
  The earliest time configured for the saved search
310
312
  """
311
313
  if self.saved_search is not None:
312
- return self.saved_search.content[SavedSearchKeys.EARLIEST_TIME_KEY] # type: ignore
314
+ return self.saved_search.content[SavedSearchKeys.EARLIEST_TIME_KEY] # type: ignore
313
315
  else:
314
316
  raise ClientError(
315
317
  "Something unexpected went wrong in initialization; saved_search was not populated"
@@ -321,7 +323,7 @@ class CorrelationSearch(BaseModel):
321
323
  The latest time configured for the saved search
322
324
  """
323
325
  if self.saved_search is not None:
324
- return self.saved_search.content[SavedSearchKeys.LATEST_TIME_KEY] # type: ignore
326
+ return self.saved_search.content[SavedSearchKeys.LATEST_TIME_KEY] # type: ignore
325
327
  else:
326
328
  raise ClientError(
327
329
  "Something unexpected went wrong in initialization; saved_search was not populated"
@@ -333,7 +335,7 @@ class CorrelationSearch(BaseModel):
333
335
  The cron schedule configured for the saved search
334
336
  """
335
337
  if self.saved_search is not None:
336
- return self.saved_search.content[SavedSearchKeys.CRON_SCHEDULE_KEY] # type: ignore
338
+ return self.saved_search.content[SavedSearchKeys.CRON_SCHEDULE_KEY] # type: ignore
337
339
  else:
338
340
  raise ClientError(
339
341
  "Something unexpected went wrong in initialization; saved_search was not populated"
@@ -345,7 +347,7 @@ class CorrelationSearch(BaseModel):
345
347
  Whether the saved search is enabled
346
348
  """
347
349
  if self.saved_search is not None:
348
- if int(self.saved_search.content[SavedSearchKeys.DISBALED_KEY]): # type: ignore
350
+ if int(self.saved_search.content[SavedSearchKeys.DISBALED_KEY]): # type: ignore
349
351
  return False
350
352
  else:
351
353
  return True
@@ -354,7 +356,7 @@ class CorrelationSearch(BaseModel):
354
356
  "Something unexpected went wrong in initialization; saved_search was not populated"
355
357
  )
356
358
 
357
- @ property
359
+ @property
358
360
  def has_risk_analysis_action(self) -> bool:
359
361
  """Whether the correlation search has an associated risk analysis Adaptive Response Action
360
362
  :return: a boolean indicating whether it has a risk analysis Adaptive Response Action
@@ -405,11 +407,13 @@ class CorrelationSearch(BaseModel):
405
407
  """
406
408
  # grab risk details if present
407
409
  self._risk_analysis_action = CorrelationSearch._get_risk_analysis_action(
408
- self.saved_search.content # type: ignore
410
+ self.saved_search.content # type: ignore
409
411
  )
410
412
 
411
413
  # grab notable details if present
412
- self._notable_action = CorrelationSearch._get_notable_action(self.saved_search.content) # type: ignore
414
+ self._notable_action = CorrelationSearch._get_notable_action(
415
+ self.saved_search.content
416
+ ) # type: ignore
413
417
 
414
418
  def refresh(self) -> None:
415
419
  """Refreshes the metadata in the SavedSearch entity, and re-parses the fields we care about
@@ -417,10 +421,9 @@ class CorrelationSearch(BaseModel):
417
421
  After operations we expect to alter the state of the SavedSearch, we call refresh so that we have a local
418
422
  representation of the new state; then we extrat what we care about into this instance
419
423
  """
420
- self.logger.debug(
421
- f"Refreshing SavedSearch metadata for {self.name}...")
424
+ self.logger.debug(f"Refreshing SavedSearch metadata for {self.name}...")
422
425
  try:
423
- self.saved_search.refresh() # type: ignore
426
+ self.saved_search.refresh() # type: ignore
424
427
  except HTTPError as e:
425
428
  raise ServerError(f"HTTP error encountered during refresh: {e}")
426
429
  self._parse_risk_and_notable_actions()
@@ -434,7 +437,7 @@ class CorrelationSearch(BaseModel):
434
437
  """
435
438
  self.logger.debug(f"Enabling {self.name}...")
436
439
  try:
437
- self.saved_search.enable() # type: ignore
440
+ self.saved_search.enable() # type: ignore
438
441
  except HTTPError as e:
439
442
  raise ServerError(f"HTTP error encountered while enabling detection: {e}")
440
443
  if refresh:
@@ -449,7 +452,7 @@ class CorrelationSearch(BaseModel):
449
452
  """
450
453
  self.logger.debug(f"Disabling {self.name}...")
451
454
  try:
452
- self.saved_search.disable() # type: ignore
455
+ self.saved_search.disable() # type: ignore
453
456
  except HTTPError as e:
454
457
  raise ServerError(f"HTTP error encountered while disabling detection: {e}")
455
458
  if refresh:
@@ -460,7 +463,7 @@ class CorrelationSearch(BaseModel):
460
463
  earliest_time: str = ScheduleConfig.EARLIEST_TIME,
461
464
  latest_time: str = ScheduleConfig.LATEST_TIME,
462
465
  cron_schedule: str = ScheduleConfig.CRON_SCHEDULE,
463
- refresh: bool = True
466
+ refresh: bool = True,
464
467
  ) -> None:
465
468
  """Updates the correlation search timeframe to work with test data
466
469
 
@@ -477,12 +480,12 @@ class CorrelationSearch(BaseModel):
477
480
  data = {
478
481
  SavedSearchKeys.EARLIEST_TIME_KEY: earliest_time,
479
482
  SavedSearchKeys.LATEST_TIME_KEY: latest_time,
480
- SavedSearchKeys.CRON_SCHEDULE_KEY: cron_schedule
483
+ SavedSearchKeys.CRON_SCHEDULE_KEY: cron_schedule,
481
484
  }
482
485
  self.logger.info(data)
483
486
  self.logger.info(f"Updating timeframe for '{self.name}': {data}")
484
487
  try:
485
- self.saved_search.update(**data) # type: ignore
488
+ self.saved_search.update(**data) # type: ignore
486
489
  except HTTPError as e:
487
490
  raise ServerError(f"HTTP error encountered while updating timeframe: {e}")
488
491
 
@@ -531,7 +534,9 @@ class CorrelationSearch(BaseModel):
531
534
 
532
535
  # Use the cached risk_events unless we're forcing an update
533
536
  if self._risk_events is not None:
534
- self.logger.debug(f"Using cached risk events ({len(self._risk_events)} total).")
537
+ self.logger.debug(
538
+ f"Using cached risk events ({len(self._risk_events)} total)."
539
+ )
535
540
  return self._risk_events
536
541
 
537
542
  # TODO (#248): Refactor risk/notable querying to pin to a single savedsearch ID
@@ -553,7 +558,9 @@ class CorrelationSearch(BaseModel):
553
558
  parsed_raw = json.loads(result["_raw"])
554
559
  event = RiskEvent.model_validate(parsed_raw)
555
560
  except Exception:
556
- self.logger.error(f"Failed to parse RiskEvent from search result: {result}")
561
+ self.logger.error(
562
+ f"Failed to parse RiskEvent from search result: {result}"
563
+ )
557
564
  raise
558
565
  events.append(event)
559
566
  self.logger.debug(f"Found risk event for '{self.name}': {event}")
@@ -597,7 +604,9 @@ class CorrelationSearch(BaseModel):
597
604
 
598
605
  # Use the cached notable_events unless we're forcing an update
599
606
  if self._notable_events is not None:
600
- self.logger.debug(f"Using cached notable events ({len(self._notable_events)} total).")
607
+ self.logger.debug(
608
+ f"Using cached notable events ({len(self._notable_events)} total)."
609
+ )
601
610
  return self._notable_events
602
611
 
603
612
  # Search for all notable events from a single scheduled search (indicated by orig_sid)
@@ -618,7 +627,9 @@ class CorrelationSearch(BaseModel):
618
627
  parsed_raw = json.loads(result["_raw"])
619
628
  event = NotableEvent.model_validate(parsed_raw)
620
629
  except Exception:
621
- self.logger.error(f"Failed to parse NotableEvent from search result: {result}")
630
+ self.logger.error(
631
+ f"Failed to parse NotableEvent from search result: {result}"
632
+ )
622
633
  raise
623
634
  events.append(event)
624
635
  self.logger.debug(f"Found notable event for '{self.name}': {event}")
@@ -653,7 +664,9 @@ class CorrelationSearch(BaseModel):
653
664
  " with it; cannot validate."
654
665
  )
655
666
 
656
- risk_object_counts: dict[int, int] = {id(x): 0 for x in self.detection.rba.risk_objects}
667
+ risk_object_counts: dict[int, int] = {
668
+ id(x): 0 for x in self.detection.rba.risk_objects
669
+ }
657
670
 
658
671
  # Get the risk events; note that we use the cached risk events, expecting they were
659
672
  # saved by a prior call to risk_event_exists
@@ -670,7 +683,9 @@ class CorrelationSearch(BaseModel):
670
683
  event.validate_against_detection(self.detection)
671
684
 
672
685
  # Update risk object count based on match
673
- matched_risk_object = event.get_matched_risk_object(self.detection.rba.risk_objects)
686
+ matched_risk_object = event.get_matched_risk_object(
687
+ self.detection.rba.risk_objects
688
+ )
674
689
  self.logger.debug(
675
690
  f"Matched risk event (object={event.es_risk_object}, type={event.es_risk_object_type}) "
676
691
  f"to detection's risk object (name={matched_risk_object.field}, "
@@ -740,7 +755,9 @@ class CorrelationSearch(BaseModel):
740
755
 
741
756
  # NOTE: it would be more ideal to switch this to a system which gets the handle of the saved search job and polls
742
757
  # it for completion, but that seems more tricky
743
- def test(self, max_sleep: int = TimeoutConfig.MAX_SLEEP, raise_on_exc: bool = False) -> IntegrationTestResult:
758
+ def test(
759
+ self, max_sleep: int = TimeoutConfig.MAX_SLEEP, raise_on_exc: bool = False
760
+ ) -> IntegrationTestResult:
744
761
  """Execute the integration test
745
762
 
746
763
  Executes an integration test for this CorrelationSearch. First, ensures no matching risk/notables already exist
@@ -772,9 +789,7 @@ class CorrelationSearch(BaseModel):
772
789
 
773
790
  try:
774
791
  # first make sure the indexes are currently empty and the detection is starting from a disabled state
775
- self.logger.debug(
776
- "Cleaning up any pre-existing risk/notable events..."
777
- )
792
+ self.logger.debug("Cleaning up any pre-existing risk/notable events...")
778
793
  self.update_pbar(TestingStates.PRE_CLEANUP)
779
794
  if self.risk_event_exists():
780
795
  self.logger.warning(
@@ -806,7 +821,9 @@ class CorrelationSearch(BaseModel):
806
821
  # loop so long as the elapsed time is less than max_sleep
807
822
  while elapsed_sleep_time < max_sleep:
808
823
  # sleep so the detection job can finish
809
- self.logger.info(f"Waiting {time_to_sleep} for {self.name} so it can finish")
824
+ self.logger.info(
825
+ f"Waiting {time_to_sleep} for {self.name} so it can finish"
826
+ )
810
827
  self.update_pbar(TestingStates.VALIDATING)
811
828
  time.sleep(time_to_sleep)
812
829
  elapsed_sleep_time += time_to_sleep
@@ -895,7 +912,7 @@ class CorrelationSearch(BaseModel):
895
912
  wait_duration=elapsed_sleep_time,
896
913
  exception=e,
897
914
  )
898
- self.logger.exception(result.message) # type: ignore
915
+ self.logger.exception(result.message) # type: ignore
899
916
  else:
900
917
  raise e
901
918
  except Exception as e:
@@ -905,7 +922,10 @@ class CorrelationSearch(BaseModel):
905
922
 
906
923
  # log based on result status
907
924
  if result is not None:
908
- if result.status == TestResultStatus.PASS or result.status == TestResultStatus.SKIP:
925
+ if (
926
+ result.status == TestResultStatus.PASS
927
+ or result.status == TestResultStatus.SKIP
928
+ ):
909
929
  self.logger.info(f"{result.status.name}: {result.message}")
910
930
  elif result.status == TestResultStatus.FAIL:
911
931
  self.logger.error(f"{result.status.name}: {result.message}")
@@ -928,11 +948,11 @@ class CorrelationSearch(BaseModel):
928
948
  :param query: the SPL string to run
929
949
  """
930
950
  self.logger.debug(f"Executing query: `{query}`")
931
- job = self.service.search(query, exec_mode="blocking") # type: ignore
951
+ job = self.service.search(query, exec_mode="blocking") # type: ignore
932
952
 
933
953
  # query the results, catching any HTTP status code errors
934
954
  try:
935
- response_reader: ResponseReader = job.results(output_mode="json") # type: ignore
955
+ response_reader: ResponseReader = job.results(output_mode="json") # type: ignore
936
956
  except HTTPError as e:
937
957
  # e.g. -> HTTP 400 Bad Request -- b'{"messages":[{"type":"FATAL","text":"Error in \'delete\' command: You
938
958
  # have insufficient privileges to delete events."}]}'
@@ -940,7 +960,7 @@ class CorrelationSearch(BaseModel):
940
960
  self.logger.error(message)
941
961
  raise ServerError(message)
942
962
 
943
- return ResultIterator(response_reader) # type: ignore
963
+ return ResultIterator(response_reader) # type: ignore
944
964
 
945
965
  def _delete_index(self, index: str) -> None:
946
966
  """Deletes events in a given index
@@ -991,7 +1011,7 @@ class CorrelationSearch(BaseModel):
991
1011
 
992
1012
  # Add indexes to purge
993
1013
  if delete_test_index:
994
- self.indexes_to_purge.add(self.test_index) # type: ignore
1014
+ self.indexes_to_purge.add(self.test_index) # type: ignore
995
1015
  if self._risk_events is not None:
996
1016
  self.indexes_to_purge.add(Indexes.RISK_INDEX)
997
1017
  if self._notable_events is not None:
@@ -1019,5 +1039,5 @@ class CorrelationSearch(BaseModel):
1019
1039
  self.pbar_data.fq_test_name,
1020
1040
  state,
1021
1041
  self.pbar_data.start_time,
1022
- True
1042
+ True,
1023
1043
  )