contentctl 4.4.7__py3-none-any.whl → 5.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- contentctl/__init__.py +1 -1
- contentctl/actions/build.py +102 -57
- contentctl/actions/deploy_acs.py +29 -24
- contentctl/actions/detection_testing/DetectionTestingManager.py +66 -42
- contentctl/actions/detection_testing/GitService.py +134 -76
- contentctl/actions/detection_testing/generate_detection_coverage_badge.py +48 -30
- contentctl/actions/detection_testing/infrastructures/DetectionTestingInfrastructure.py +192 -147
- contentctl/actions/detection_testing/infrastructures/DetectionTestingInfrastructureContainer.py +45 -32
- contentctl/actions/detection_testing/progress_bar.py +9 -6
- contentctl/actions/detection_testing/views/DetectionTestingView.py +16 -19
- contentctl/actions/detection_testing/views/DetectionTestingViewCLI.py +1 -5
- contentctl/actions/detection_testing/views/DetectionTestingViewFile.py +2 -2
- contentctl/actions/detection_testing/views/DetectionTestingViewWeb.py +1 -4
- contentctl/actions/doc_gen.py +9 -5
- contentctl/actions/initialize.py +45 -33
- contentctl/actions/inspect.py +118 -61
- contentctl/actions/new_content.py +155 -108
- contentctl/actions/release_notes.py +276 -146
- contentctl/actions/reporting.py +23 -19
- contentctl/actions/test.py +33 -28
- contentctl/actions/validate.py +55 -34
- contentctl/api.py +54 -45
- contentctl/contentctl.py +124 -90
- contentctl/enrichments/attack_enrichment.py +112 -72
- contentctl/enrichments/cve_enrichment.py +34 -28
- contentctl/enrichments/splunk_app_enrichment.py +38 -36
- contentctl/helper/link_validator.py +101 -78
- contentctl/helper/splunk_app.py +69 -41
- contentctl/helper/utils.py +58 -53
- contentctl/input/director.py +68 -36
- contentctl/input/new_content_questions.py +27 -35
- contentctl/input/yml_reader.py +28 -18
- contentctl/objects/abstract_security_content_objects/detection_abstract.py +303 -259
- contentctl/objects/abstract_security_content_objects/security_content_object_abstract.py +115 -52
- contentctl/objects/alert_action.py +10 -9
- contentctl/objects/annotated_types.py +1 -1
- contentctl/objects/atomic.py +65 -54
- contentctl/objects/base_test.py +5 -3
- contentctl/objects/base_test_result.py +19 -11
- contentctl/objects/baseline.py +62 -30
- contentctl/objects/baseline_tags.py +30 -24
- contentctl/objects/config.py +790 -597
- contentctl/objects/constants.py +33 -56
- contentctl/objects/correlation_search.py +150 -136
- contentctl/objects/dashboard.py +55 -41
- contentctl/objects/data_source.py +16 -17
- contentctl/objects/deployment.py +43 -44
- contentctl/objects/deployment_email.py +3 -2
- contentctl/objects/deployment_notable.py +4 -2
- contentctl/objects/deployment_phantom.py +7 -6
- contentctl/objects/deployment_rba.py +3 -2
- contentctl/objects/deployment_scheduling.py +3 -2
- contentctl/objects/deployment_slack.py +3 -2
- contentctl/objects/detection.py +5 -2
- contentctl/objects/detection_metadata.py +1 -0
- contentctl/objects/detection_stanza.py +7 -2
- contentctl/objects/detection_tags.py +58 -103
- contentctl/objects/drilldown.py +66 -34
- contentctl/objects/enums.py +81 -100
- contentctl/objects/errors.py +16 -24
- contentctl/objects/integration_test.py +3 -3
- contentctl/objects/integration_test_result.py +1 -0
- contentctl/objects/investigation.py +59 -36
- contentctl/objects/investigation_tags.py +30 -19
- contentctl/objects/lookup.py +304 -101
- contentctl/objects/macro.py +55 -39
- contentctl/objects/manual_test.py +3 -3
- contentctl/objects/manual_test_result.py +1 -0
- contentctl/objects/mitre_attack_enrichment.py +17 -16
- contentctl/objects/notable_action.py +2 -1
- contentctl/objects/notable_event.py +1 -3
- contentctl/objects/playbook.py +37 -35
- contentctl/objects/playbook_tags.py +23 -13
- contentctl/objects/rba.py +96 -0
- contentctl/objects/risk_analysis_action.py +15 -11
- contentctl/objects/risk_event.py +110 -160
- contentctl/objects/risk_object.py +1 -0
- contentctl/objects/savedsearches_conf.py +9 -7
- contentctl/objects/security_content_object.py +5 -2
- contentctl/objects/story.py +54 -49
- contentctl/objects/story_tags.py +56 -45
- contentctl/objects/test_attack_data.py +2 -1
- contentctl/objects/test_group.py +5 -2
- contentctl/objects/threat_object.py +1 -0
- contentctl/objects/throttling.py +27 -18
- contentctl/objects/unit_test.py +3 -4
- contentctl/objects/unit_test_baseline.py +5 -5
- contentctl/objects/unit_test_result.py +6 -6
- contentctl/output/api_json_output.py +233 -220
- contentctl/output/attack_nav_output.py +21 -21
- contentctl/output/attack_nav_writer.py +29 -37
- contentctl/output/conf_output.py +235 -172
- contentctl/output/conf_writer.py +201 -125
- contentctl/output/data_source_writer.py +38 -26
- contentctl/output/doc_md_output.py +53 -27
- contentctl/output/jinja_writer.py +19 -15
- contentctl/output/json_writer.py +21 -11
- contentctl/output/svg_output.py +56 -38
- contentctl/output/templates/analyticstories_detections.j2 +2 -2
- contentctl/output/templates/analyticstories_stories.j2 +1 -1
- contentctl/output/templates/collections.j2 +1 -1
- contentctl/output/templates/doc_detections.j2 +0 -5
- contentctl/output/templates/es_investigations_investigations.j2 +1 -1
- contentctl/output/templates/es_investigations_stories.j2 +1 -1
- contentctl/output/templates/savedsearches_baselines.j2 +2 -2
- contentctl/output/templates/savedsearches_detections.j2 +10 -11
- contentctl/output/templates/savedsearches_investigations.j2 +2 -2
- contentctl/output/templates/transforms.j2 +6 -8
- contentctl/output/yml_writer.py +29 -20
- contentctl/templates/detections/endpoint/anomalous_usage_of_7zip.yml +16 -34
- contentctl/templates/stories/cobalt_strike.yml +1 -0
- {contentctl-4.4.7.dist-info → contentctl-5.0.0.dist-info}/METADATA +5 -4
- contentctl-5.0.0.dist-info/RECORD +168 -0
- {contentctl-4.4.7.dist-info → contentctl-5.0.0.dist-info}/WHEEL +1 -1
- contentctl/actions/initialize_old.py +0 -245
- contentctl/objects/event_source.py +0 -11
- contentctl/objects/observable.py +0 -37
- contentctl/output/detection_writer.py +0 -28
- contentctl/output/new_content_yml_output.py +0 -56
- contentctl/output/yml_output.py +0 -66
- contentctl-4.4.7.dist-info/RECORD +0 -173
- {contentctl-4.4.7.dist-info → contentctl-5.0.0.dist-info}/LICENSE.md +0 -0
- {contentctl-4.4.7.dist-info → contentctl-5.0.0.dist-info}/entry_points.txt +0 -0
|
@@ -2,34 +2,33 @@ import logging
|
|
|
2
2
|
import time
|
|
3
3
|
import json
|
|
4
4
|
from typing import Any
|
|
5
|
-
from enum import
|
|
5
|
+
from enum import StrEnum, IntEnum
|
|
6
6
|
from functools import cached_property
|
|
7
7
|
|
|
8
8
|
from pydantic import ConfigDict, BaseModel, computed_field, Field, PrivateAttr
|
|
9
|
-
from splunklib.results import JSONResultsReader, Message
|
|
10
|
-
from splunklib.binding import HTTPError, ResponseReader
|
|
11
|
-
import splunklib.client as splunklib
|
|
12
|
-
from tqdm import tqdm
|
|
9
|
+
from splunklib.results import JSONResultsReader, Message # type: ignore
|
|
10
|
+
from splunklib.binding import HTTPError, ResponseReader # type: ignore
|
|
11
|
+
import splunklib.client as splunklib # type: ignore
|
|
12
|
+
from tqdm import tqdm # type: ignore
|
|
13
13
|
|
|
14
14
|
from contentctl.objects.risk_analysis_action import RiskAnalysisAction
|
|
15
15
|
from contentctl.objects.notable_action import NotableAction
|
|
16
16
|
from contentctl.objects.base_test_result import TestResultStatus
|
|
17
17
|
from contentctl.objects.integration_test_result import IntegrationTestResult
|
|
18
18
|
from contentctl.actions.detection_testing.progress_bar import (
|
|
19
|
-
format_pbar_string,
|
|
19
|
+
format_pbar_string, # type: ignore
|
|
20
20
|
TestReportingType,
|
|
21
|
-
TestingStates
|
|
21
|
+
TestingStates,
|
|
22
22
|
)
|
|
23
23
|
from contentctl.objects.errors import (
|
|
24
24
|
IntegrationTestingError,
|
|
25
25
|
ServerError,
|
|
26
26
|
ClientError,
|
|
27
|
-
ValidationFailed
|
|
27
|
+
ValidationFailed,
|
|
28
28
|
)
|
|
29
29
|
from contentctl.objects.detection import Detection
|
|
30
30
|
from contentctl.objects.risk_event import RiskEvent
|
|
31
31
|
from contentctl.objects.notable_event import NotableEvent
|
|
32
|
-
from contentctl.objects.observable import Observable
|
|
33
32
|
|
|
34
33
|
|
|
35
34
|
# Suppress logging by default; enable for local testing
|
|
@@ -66,7 +65,9 @@ def get_logger() -> logging.Logger:
|
|
|
66
65
|
handler = logging.NullHandler()
|
|
67
66
|
|
|
68
67
|
# Format our output
|
|
69
|
-
formatter = logging.Formatter(
|
|
68
|
+
formatter = logging.Formatter(
|
|
69
|
+
"%(asctime)s - %(levelname)s:%(name)s - %(message)s"
|
|
70
|
+
)
|
|
70
71
|
handler.setFormatter(formatter)
|
|
71
72
|
|
|
72
73
|
# Set handler level and add to logger
|
|
@@ -76,10 +77,11 @@ def get_logger() -> logging.Logger:
|
|
|
76
77
|
return logger
|
|
77
78
|
|
|
78
79
|
|
|
79
|
-
class SavedSearchKeys(
|
|
80
|
+
class SavedSearchKeys(StrEnum):
|
|
80
81
|
"""
|
|
81
82
|
Various keys into the SavedSearch content
|
|
82
83
|
"""
|
|
84
|
+
|
|
83
85
|
# setup the names of the keys we expect to access in content
|
|
84
86
|
EARLIEST_TIME_KEY = "dispatch.earliest_time"
|
|
85
87
|
LATEST_TIME_KEY = "dispatch.latest_time"
|
|
@@ -89,19 +91,21 @@ class SavedSearchKeys(str, Enum):
|
|
|
89
91
|
DISBALED_KEY = "disabled"
|
|
90
92
|
|
|
91
93
|
|
|
92
|
-
class Indexes(
|
|
94
|
+
class Indexes(StrEnum):
|
|
93
95
|
"""
|
|
94
96
|
Indexes we search against
|
|
95
97
|
"""
|
|
98
|
+
|
|
96
99
|
# setup the names of the risk and notable indexes
|
|
97
100
|
RISK_INDEX = "risk"
|
|
98
101
|
NOTABLE_INDEX = "notable"
|
|
99
102
|
|
|
100
103
|
|
|
101
|
-
class TimeoutConfig(
|
|
104
|
+
class TimeoutConfig(IntEnum):
|
|
102
105
|
"""
|
|
103
106
|
Configuration values for the exponential backoff timer
|
|
104
107
|
"""
|
|
108
|
+
|
|
105
109
|
# base amount to sleep for before beginning exponential backoff during testing
|
|
106
110
|
BASE_SLEEP = 60
|
|
107
111
|
|
|
@@ -115,10 +119,11 @@ class TimeoutConfig(int, Enum):
|
|
|
115
119
|
|
|
116
120
|
# TODO (#226): evaluate sane defaults for timeframe for integration testing (e.g. 5y is good
|
|
117
121
|
# now, but maybe not always...); maybe set latest/earliest to None?
|
|
118
|
-
class ScheduleConfig(
|
|
122
|
+
class ScheduleConfig(StrEnum):
|
|
119
123
|
"""
|
|
120
124
|
Configuraton values for the saved search schedule
|
|
121
125
|
"""
|
|
126
|
+
|
|
122
127
|
EARLIEST_TIME = "-5y@y"
|
|
123
128
|
LATEST_TIME = "-1m@m"
|
|
124
129
|
CRON_SCHEDULE = "*/1 * * * *"
|
|
@@ -133,11 +138,10 @@ class ResultIterator:
|
|
|
133
138
|
:param response_reader: a ResponseReader object
|
|
134
139
|
:param logger: a Logger object
|
|
135
140
|
"""
|
|
141
|
+
|
|
136
142
|
def __init__(self, response_reader: ResponseReader) -> None:
|
|
137
143
|
# init the results reader
|
|
138
|
-
self.results_reader: JSONResultsReader = JSONResultsReader(
|
|
139
|
-
response_reader
|
|
140
|
-
)
|
|
144
|
+
self.results_reader: JSONResultsReader = JSONResultsReader(response_reader)
|
|
141
145
|
|
|
142
146
|
# get logger
|
|
143
147
|
self.logger: logging.Logger = get_logger()
|
|
@@ -145,24 +149,24 @@ class ResultIterator:
|
|
|
145
149
|
def __iter__(self) -> "ResultIterator":
|
|
146
150
|
return self
|
|
147
151
|
|
|
148
|
-
def __next__(self) -> dict:
|
|
152
|
+
def __next__(self) -> dict[Any, Any]:
|
|
149
153
|
# Use a reader for JSON format so we can iterate over our results
|
|
150
154
|
for result in self.results_reader:
|
|
151
155
|
# log messages, or raise if error
|
|
152
156
|
if isinstance(result, Message):
|
|
153
157
|
# convert level string to level int
|
|
154
|
-
level_name = result.type.strip().upper()
|
|
158
|
+
level_name = result.type.strip().upper() # type: ignore
|
|
155
159
|
level: int = logging.getLevelName(level_name)
|
|
156
160
|
|
|
157
161
|
# log message at appropriate level and raise if needed
|
|
158
|
-
message = f"SPLUNK: {result.message}"
|
|
162
|
+
message = f"SPLUNK: {result.message}" # type: ignore
|
|
159
163
|
self.logger.log(level, message)
|
|
160
164
|
if level == logging.ERROR:
|
|
161
165
|
raise ServerError(message)
|
|
162
166
|
|
|
163
167
|
# if dict, just return
|
|
164
168
|
elif isinstance(result, dict):
|
|
165
|
-
return result
|
|
169
|
+
return result # type: ignore
|
|
166
170
|
|
|
167
171
|
# raise for any unexpected types
|
|
168
172
|
else:
|
|
@@ -179,14 +183,13 @@ class PbarData(BaseModel):
|
|
|
179
183
|
:param fq_test_name: the fully qualifed (fq) test name ("<detection_name>:<test_name>") used for logging
|
|
180
184
|
:param start_time: the start time used for logging
|
|
181
185
|
"""
|
|
182
|
-
|
|
186
|
+
|
|
187
|
+
pbar: tqdm # type: ignore
|
|
183
188
|
fq_test_name: str
|
|
184
189
|
start_time: float
|
|
185
190
|
|
|
186
191
|
# needed to support the tqdm type
|
|
187
|
-
model_config = ConfigDict(
|
|
188
|
-
arbitrary_types_allowed=True
|
|
189
|
-
)
|
|
192
|
+
model_config = ConfigDict(arbitrary_types_allowed=True)
|
|
190
193
|
|
|
191
194
|
|
|
192
195
|
class CorrelationSearch(BaseModel):
|
|
@@ -199,6 +202,7 @@ class CorrelationSearch(BaseModel):
|
|
|
199
202
|
:param pbar_data: the encapsulated info needed for logging w/ pbar
|
|
200
203
|
:param test_index: the index attack data is forwarded to for testing (optionally used in cleanup)
|
|
201
204
|
"""
|
|
205
|
+
|
|
202
206
|
# the detection associated with the correlation search (e.g. "Windows Modify Registry EnableLinkedConnections")
|
|
203
207
|
detection: Detection = Field(...)
|
|
204
208
|
|
|
@@ -233,10 +237,7 @@ class CorrelationSearch(BaseModel):
|
|
|
233
237
|
|
|
234
238
|
# Need arbitrary types to allow fields w/ types like SavedSearch; we also want to forbid
|
|
235
239
|
# unexpected fields
|
|
236
|
-
model_config = ConfigDict(
|
|
237
|
-
arbitrary_types_allowed=True,
|
|
238
|
-
extra='forbid'
|
|
239
|
-
)
|
|
240
|
+
model_config = ConfigDict(arbitrary_types_allowed=True, extra="forbid")
|
|
240
241
|
|
|
241
242
|
def model_post_init(self, __context: Any) -> None:
|
|
242
243
|
super().model_post_init(__context)
|
|
@@ -310,9 +311,11 @@ class CorrelationSearch(BaseModel):
|
|
|
310
311
|
The earliest time configured for the saved search
|
|
311
312
|
"""
|
|
312
313
|
if self.saved_search is not None:
|
|
313
|
-
return self.saved_search.content[SavedSearchKeys.EARLIEST_TIME_KEY
|
|
314
|
+
return self.saved_search.content[SavedSearchKeys.EARLIEST_TIME_KEY] # type: ignore
|
|
314
315
|
else:
|
|
315
|
-
raise ClientError(
|
|
316
|
+
raise ClientError(
|
|
317
|
+
"Something unexpected went wrong in initialization; saved_search was not populated"
|
|
318
|
+
)
|
|
316
319
|
|
|
317
320
|
@property
|
|
318
321
|
def latest_time(self) -> str:
|
|
@@ -320,9 +323,11 @@ class CorrelationSearch(BaseModel):
|
|
|
320
323
|
The latest time configured for the saved search
|
|
321
324
|
"""
|
|
322
325
|
if self.saved_search is not None:
|
|
323
|
-
return self.saved_search.content[SavedSearchKeys.LATEST_TIME_KEY
|
|
326
|
+
return self.saved_search.content[SavedSearchKeys.LATEST_TIME_KEY] # type: ignore
|
|
324
327
|
else:
|
|
325
|
-
raise ClientError(
|
|
328
|
+
raise ClientError(
|
|
329
|
+
"Something unexpected went wrong in initialization; saved_search was not populated"
|
|
330
|
+
)
|
|
326
331
|
|
|
327
332
|
@property
|
|
328
333
|
def cron_schedule(self) -> str:
|
|
@@ -330,9 +335,11 @@ class CorrelationSearch(BaseModel):
|
|
|
330
335
|
The cron schedule configured for the saved search
|
|
331
336
|
"""
|
|
332
337
|
if self.saved_search is not None:
|
|
333
|
-
return self.saved_search.content[SavedSearchKeys.CRON_SCHEDULE_KEY
|
|
338
|
+
return self.saved_search.content[SavedSearchKeys.CRON_SCHEDULE_KEY] # type: ignore
|
|
334
339
|
else:
|
|
335
|
-
raise ClientError(
|
|
340
|
+
raise ClientError(
|
|
341
|
+
"Something unexpected went wrong in initialization; saved_search was not populated"
|
|
342
|
+
)
|
|
336
343
|
|
|
337
344
|
@property
|
|
338
345
|
def enabled(self) -> bool:
|
|
@@ -340,14 +347,16 @@ class CorrelationSearch(BaseModel):
|
|
|
340
347
|
Whether the saved search is enabled
|
|
341
348
|
"""
|
|
342
349
|
if self.saved_search is not None:
|
|
343
|
-
if int(self.saved_search.content[SavedSearchKeys.DISBALED_KEY
|
|
350
|
+
if int(self.saved_search.content[SavedSearchKeys.DISBALED_KEY]): # type: ignore
|
|
344
351
|
return False
|
|
345
352
|
else:
|
|
346
353
|
return True
|
|
347
354
|
else:
|
|
348
|
-
raise ClientError(
|
|
355
|
+
raise ClientError(
|
|
356
|
+
"Something unexpected went wrong in initialization; saved_search was not populated"
|
|
357
|
+
)
|
|
349
358
|
|
|
350
|
-
@
|
|
359
|
+
@property
|
|
351
360
|
def has_risk_analysis_action(self) -> bool:
|
|
352
361
|
"""Whether the correlation search has an associated risk analysis Adaptive Response Action
|
|
353
362
|
:return: a boolean indicating whether it has a risk analysis Adaptive Response Action
|
|
@@ -368,7 +377,7 @@ class CorrelationSearch(BaseModel):
|
|
|
368
377
|
:param content: a dict of strings to values
|
|
369
378
|
:returns: a RiskAnalysisAction, or None if none exists
|
|
370
379
|
"""
|
|
371
|
-
if int(content[SavedSearchKeys.RISK_ACTION_KEY
|
|
380
|
+
if int(content[SavedSearchKeys.RISK_ACTION_KEY]):
|
|
372
381
|
try:
|
|
373
382
|
return RiskAnalysisAction.parse_from_dict(content)
|
|
374
383
|
except ValueError as e:
|
|
@@ -383,23 +392,10 @@ class CorrelationSearch(BaseModel):
|
|
|
383
392
|
:returns: a NotableAction, or None if none exists
|
|
384
393
|
"""
|
|
385
394
|
# grab notable details if present
|
|
386
|
-
if int(content[SavedSearchKeys.NOTABLE_ACTION_KEY
|
|
395
|
+
if int(content[SavedSearchKeys.NOTABLE_ACTION_KEY]):
|
|
387
396
|
return NotableAction.parse_from_dict(content)
|
|
388
397
|
return None
|
|
389
398
|
|
|
390
|
-
@staticmethod
|
|
391
|
-
def _get_relevant_observables(observables: list[Observable]) -> list[Observable]:
|
|
392
|
-
"""
|
|
393
|
-
Given a list of observables, identify the subset of those relevant for risk matching
|
|
394
|
-
:param observables: the Observable objects to filter
|
|
395
|
-
:returns: the filtered list of relevant observables
|
|
396
|
-
"""
|
|
397
|
-
relevant = []
|
|
398
|
-
for observable in observables:
|
|
399
|
-
if not RiskEvent.ignore_observable(observable):
|
|
400
|
-
relevant.append(observable)
|
|
401
|
-
return relevant
|
|
402
|
-
|
|
403
399
|
def _parse_risk_and_notable_actions(self) -> None:
|
|
404
400
|
"""Parses the risk/notable metadata we care about from self.saved_search.content
|
|
405
401
|
|
|
@@ -411,11 +407,13 @@ class CorrelationSearch(BaseModel):
|
|
|
411
407
|
"""
|
|
412
408
|
# grab risk details if present
|
|
413
409
|
self._risk_analysis_action = CorrelationSearch._get_risk_analysis_action(
|
|
414
|
-
self.saved_search.content
|
|
410
|
+
self.saved_search.content # type: ignore
|
|
415
411
|
)
|
|
416
412
|
|
|
417
413
|
# grab notable details if present
|
|
418
|
-
self._notable_action = CorrelationSearch._get_notable_action(
|
|
414
|
+
self._notable_action = CorrelationSearch._get_notable_action(
|
|
415
|
+
self.saved_search.content
|
|
416
|
+
) # type: ignore
|
|
419
417
|
|
|
420
418
|
def refresh(self) -> None:
|
|
421
419
|
"""Refreshes the metadata in the SavedSearch entity, and re-parses the fields we care about
|
|
@@ -423,10 +421,9 @@ class CorrelationSearch(BaseModel):
|
|
|
423
421
|
After operations we expect to alter the state of the SavedSearch, we call refresh so that we have a local
|
|
424
422
|
representation of the new state; then we extrat what we care about into this instance
|
|
425
423
|
"""
|
|
426
|
-
self.logger.debug(
|
|
427
|
-
f"Refreshing SavedSearch metadata for {self.name}...")
|
|
424
|
+
self.logger.debug(f"Refreshing SavedSearch metadata for {self.name}...")
|
|
428
425
|
try:
|
|
429
|
-
self.saved_search.refresh()
|
|
426
|
+
self.saved_search.refresh() # type: ignore
|
|
430
427
|
except HTTPError as e:
|
|
431
428
|
raise ServerError(f"HTTP error encountered during refresh: {e}")
|
|
432
429
|
self._parse_risk_and_notable_actions()
|
|
@@ -440,7 +437,7 @@ class CorrelationSearch(BaseModel):
|
|
|
440
437
|
"""
|
|
441
438
|
self.logger.debug(f"Enabling {self.name}...")
|
|
442
439
|
try:
|
|
443
|
-
self.saved_search.enable()
|
|
440
|
+
self.saved_search.enable() # type: ignore
|
|
444
441
|
except HTTPError as e:
|
|
445
442
|
raise ServerError(f"HTTP error encountered while enabling detection: {e}")
|
|
446
443
|
if refresh:
|
|
@@ -455,7 +452,7 @@ class CorrelationSearch(BaseModel):
|
|
|
455
452
|
"""
|
|
456
453
|
self.logger.debug(f"Disabling {self.name}...")
|
|
457
454
|
try:
|
|
458
|
-
self.saved_search.disable()
|
|
455
|
+
self.saved_search.disable() # type: ignore
|
|
459
456
|
except HTTPError as e:
|
|
460
457
|
raise ServerError(f"HTTP error encountered while disabling detection: {e}")
|
|
461
458
|
if refresh:
|
|
@@ -463,10 +460,10 @@ class CorrelationSearch(BaseModel):
|
|
|
463
460
|
|
|
464
461
|
def update_timeframe(
|
|
465
462
|
self,
|
|
466
|
-
earliest_time: str = ScheduleConfig.EARLIEST_TIME
|
|
467
|
-
latest_time: str = ScheduleConfig.LATEST_TIME
|
|
468
|
-
cron_schedule: str = ScheduleConfig.CRON_SCHEDULE
|
|
469
|
-
refresh: bool = True
|
|
463
|
+
earliest_time: str = ScheduleConfig.EARLIEST_TIME,
|
|
464
|
+
latest_time: str = ScheduleConfig.LATEST_TIME,
|
|
465
|
+
cron_schedule: str = ScheduleConfig.CRON_SCHEDULE,
|
|
466
|
+
refresh: bool = True,
|
|
470
467
|
) -> None:
|
|
471
468
|
"""Updates the correlation search timeframe to work with test data
|
|
472
469
|
|
|
@@ -481,21 +478,21 @@ class CorrelationSearch(BaseModel):
|
|
|
481
478
|
"""
|
|
482
479
|
# update the SavedSearch accordingly
|
|
483
480
|
data = {
|
|
484
|
-
SavedSearchKeys.EARLIEST_TIME_KEY
|
|
485
|
-
SavedSearchKeys.LATEST_TIME_KEY
|
|
486
|
-
SavedSearchKeys.CRON_SCHEDULE_KEY
|
|
481
|
+
SavedSearchKeys.EARLIEST_TIME_KEY: earliest_time,
|
|
482
|
+
SavedSearchKeys.LATEST_TIME_KEY: latest_time,
|
|
483
|
+
SavedSearchKeys.CRON_SCHEDULE_KEY: cron_schedule,
|
|
487
484
|
}
|
|
488
485
|
self.logger.info(data)
|
|
489
486
|
self.logger.info(f"Updating timeframe for '{self.name}': {data}")
|
|
490
487
|
try:
|
|
491
|
-
self.saved_search.update(**data)
|
|
488
|
+
self.saved_search.update(**data) # type: ignore
|
|
492
489
|
except HTTPError as e:
|
|
493
490
|
raise ServerError(f"HTTP error encountered while updating timeframe: {e}")
|
|
494
491
|
|
|
495
492
|
if refresh:
|
|
496
493
|
self.refresh()
|
|
497
494
|
|
|
498
|
-
def force_run(self, refresh=True) -> None:
|
|
495
|
+
def force_run(self, refresh: bool = True) -> None:
|
|
499
496
|
"""Forces a detection run
|
|
500
497
|
|
|
501
498
|
Enables the detection, adjusts the cron schedule to run every 1 minute, and widens the earliest/latest window
|
|
@@ -506,7 +503,7 @@ class CorrelationSearch(BaseModel):
|
|
|
506
503
|
if not self.enabled:
|
|
507
504
|
self.enable(refresh=False)
|
|
508
505
|
else:
|
|
509
|
-
self.logger.
|
|
506
|
+
self.logger.warning(f"Detection '{self.name}' was already enabled")
|
|
510
507
|
|
|
511
508
|
if refresh:
|
|
512
509
|
self.refresh()
|
|
@@ -537,7 +534,9 @@ class CorrelationSearch(BaseModel):
|
|
|
537
534
|
|
|
538
535
|
# Use the cached risk_events unless we're forcing an update
|
|
539
536
|
if self._risk_events is not None:
|
|
540
|
-
self.logger.debug(
|
|
537
|
+
self.logger.debug(
|
|
538
|
+
f"Using cached risk events ({len(self._risk_events)} total)."
|
|
539
|
+
)
|
|
541
540
|
return self._risk_events
|
|
542
541
|
|
|
543
542
|
# TODO (#248): Refactor risk/notable querying to pin to a single savedsearch ID
|
|
@@ -554,12 +553,14 @@ class CorrelationSearch(BaseModel):
|
|
|
554
553
|
for result in result_iterator:
|
|
555
554
|
# sanity check that this result from the iterator is a risk event and not some
|
|
556
555
|
# other metadata
|
|
557
|
-
if result["index"] == Indexes.RISK_INDEX
|
|
556
|
+
if result["index"] == Indexes.RISK_INDEX:
|
|
558
557
|
try:
|
|
559
558
|
parsed_raw = json.loads(result["_raw"])
|
|
560
|
-
event = RiskEvent.
|
|
559
|
+
event = RiskEvent.model_validate(parsed_raw)
|
|
561
560
|
except Exception:
|
|
562
|
-
self.logger.error(
|
|
561
|
+
self.logger.error(
|
|
562
|
+
f"Failed to parse RiskEvent from search result: {result}"
|
|
563
|
+
)
|
|
563
564
|
raise
|
|
564
565
|
events.append(event)
|
|
565
566
|
self.logger.debug(f"Found risk event for '{self.name}': {event}")
|
|
@@ -603,7 +604,9 @@ class CorrelationSearch(BaseModel):
|
|
|
603
604
|
|
|
604
605
|
# Use the cached notable_events unless we're forcing an update
|
|
605
606
|
if self._notable_events is not None:
|
|
606
|
-
self.logger.debug(
|
|
607
|
+
self.logger.debug(
|
|
608
|
+
f"Using cached notable events ({len(self._notable_events)} total)."
|
|
609
|
+
)
|
|
607
610
|
return self._notable_events
|
|
608
611
|
|
|
609
612
|
# Search for all notable events from a single scheduled search (indicated by orig_sid)
|
|
@@ -619,12 +622,14 @@ class CorrelationSearch(BaseModel):
|
|
|
619
622
|
for result in result_iterator:
|
|
620
623
|
# sanity check that this result from the iterator is a notable event and not some
|
|
621
624
|
# other metadata
|
|
622
|
-
if result["index"] == Indexes.NOTABLE_INDEX
|
|
625
|
+
if result["index"] == Indexes.NOTABLE_INDEX:
|
|
623
626
|
try:
|
|
624
627
|
parsed_raw = json.loads(result["_raw"])
|
|
625
|
-
event = NotableEvent.
|
|
628
|
+
event = NotableEvent.model_validate(parsed_raw)
|
|
626
629
|
except Exception:
|
|
627
|
-
self.logger.error(
|
|
630
|
+
self.logger.error(
|
|
631
|
+
f"Failed to parse NotableEvent from search result: {result}"
|
|
632
|
+
)
|
|
628
633
|
raise
|
|
629
634
|
events.append(event)
|
|
630
635
|
self.logger.debug(f"Found notable event for '{self.name}': {event}")
|
|
@@ -646,24 +651,23 @@ class CorrelationSearch(BaseModel):
|
|
|
646
651
|
"""Validates the existence of any expected risk events
|
|
647
652
|
|
|
648
653
|
First ensure the risk event exists, and if it does validate its risk message and make sure
|
|
649
|
-
any events align with the specified
|
|
654
|
+
any events align with the specified risk object. Also adds the risk index to the purge list
|
|
650
655
|
if risk events existed
|
|
651
656
|
:param elapsed_sleep_time: an int representing the amount of time slept thus far waiting to
|
|
652
657
|
check the risks/notables
|
|
653
658
|
:returns: an IntegrationTestResult on failure; None on success
|
|
654
659
|
"""
|
|
655
|
-
#
|
|
656
|
-
|
|
657
|
-
|
|
658
|
-
|
|
659
|
-
|
|
660
|
-
# ultimately this validation should be handled during the build process
|
|
661
|
-
if len(observables) != len(observable_counts):
|
|
662
|
-
raise ClientError(
|
|
663
|
-
f"At least two observables in '{self.detection.name}' have the same name; "
|
|
664
|
-
"each observable for a detection should be unique."
|
|
660
|
+
# Ensure the rba object is defined
|
|
661
|
+
if self.detection.rba is None:
|
|
662
|
+
raise ValidationFailed(
|
|
663
|
+
f"Unexpected error: Detection '{self.detection.name}' has no RBA objects associated"
|
|
664
|
+
" with it; cannot validate."
|
|
665
665
|
)
|
|
666
666
|
|
|
667
|
+
risk_object_counts: dict[int, int] = {
|
|
668
|
+
id(x): 0 for x in self.detection.rba.risk_objects
|
|
669
|
+
}
|
|
670
|
+
|
|
667
671
|
# Get the risk events; note that we use the cached risk events, expecting they were
|
|
668
672
|
# saved by a prior call to risk_event_exists
|
|
669
673
|
events = self.get_risk_events()
|
|
@@ -673,63 +677,68 @@ class CorrelationSearch(BaseModel):
|
|
|
673
677
|
for event in events:
|
|
674
678
|
c += 1
|
|
675
679
|
self.logger.debug(
|
|
676
|
-
f"Validating risk event ({event.
|
|
680
|
+
f"Validating risk event ({event.es_risk_object}, {event.es_risk_object_type}): "
|
|
677
681
|
f"{c}/{len(events)}"
|
|
678
682
|
)
|
|
679
683
|
event.validate_against_detection(self.detection)
|
|
680
684
|
|
|
681
|
-
# Update
|
|
682
|
-
|
|
685
|
+
# Update risk object count based on match
|
|
686
|
+
matched_risk_object = event.get_matched_risk_object(
|
|
687
|
+
self.detection.rba.risk_objects
|
|
688
|
+
)
|
|
683
689
|
self.logger.debug(
|
|
684
|
-
f"Matched risk event (object={event.
|
|
685
|
-
f"to
|
|
686
|
-
f"
|
|
690
|
+
f"Matched risk event (object={event.es_risk_object}, type={event.es_risk_object_type}) "
|
|
691
|
+
f"to detection's risk object (name={matched_risk_object.field}, "
|
|
692
|
+
f"type={matched_risk_object.type.value}) using the source field "
|
|
687
693
|
f"'{event.source_field_name}'"
|
|
688
694
|
)
|
|
689
|
-
|
|
695
|
+
risk_object_counts[id(matched_risk_object)] += 1
|
|
690
696
|
|
|
691
|
-
# Report any
|
|
692
|
-
for
|
|
697
|
+
# Report any risk objects which did not have at least one match to a risk event
|
|
698
|
+
for risk_object in self.detection.rba.risk_objects:
|
|
693
699
|
self.logger.debug(
|
|
694
|
-
f"Matched
|
|
695
|
-
f"
|
|
700
|
+
f"Matched risk object (name={risk_object.field}, type={risk_object.type.value} "
|
|
701
|
+
f"to {risk_object_counts[id(risk_object)]} risk events."
|
|
696
702
|
)
|
|
697
|
-
if
|
|
703
|
+
if risk_object_counts[id(risk_object)] == 0:
|
|
698
704
|
raise ValidationFailed(
|
|
699
|
-
f"
|
|
700
|
-
|
|
705
|
+
f"Risk object (name={risk_object.field}, type={risk_object.type.value}) "
|
|
706
|
+
"was not matched to any risk events."
|
|
701
707
|
)
|
|
702
708
|
|
|
703
709
|
# TODO (#250): Re-enable and refactor code that validates the specific risk counts
|
|
704
710
|
# Validate risk events in aggregate; we should have an equal amount of risk events for each
|
|
705
|
-
# relevant
|
|
711
|
+
# relevant risk object, and the total count should match the total number of events
|
|
706
712
|
# individual_count: int | None = None
|
|
707
713
|
# total_count = 0
|
|
708
|
-
# for
|
|
714
|
+
# for risk_object_id in risk_object_counts:
|
|
709
715
|
# self.logger.debug(
|
|
710
|
-
# f"
|
|
716
|
+
# f"Risk object <{risk_object_id}> match count: {risk_object_counts[risk_object_id]}"
|
|
711
717
|
# )
|
|
712
718
|
|
|
713
719
|
# # Grab the first value encountered if not set yet
|
|
714
720
|
# if individual_count is None:
|
|
715
|
-
# individual_count =
|
|
721
|
+
# individual_count = risk_object_counts[risk_object_id]
|
|
716
722
|
# else:
|
|
717
|
-
# # Confirm that the count for the current
|
|
718
|
-
#
|
|
723
|
+
# # Confirm that the count for the current risk object matches the count of the
|
|
724
|
+
# # others
|
|
725
|
+
# if risk_object_counts[risk_object_id] != individual_count:
|
|
719
726
|
# raise ValidationFailed(
|
|
720
|
-
# f"Count of risk events matching
|
|
721
|
-
# f"({
|
|
722
|
-
# f"matching other
|
|
727
|
+
# f"Count of risk events matching detection's risk object <\"{risk_object_id}\"> "
|
|
728
|
+
# f"({risk_object_counts[risk_object_id]}) does not match the count of those "
|
|
729
|
+
# f"matching other risk objects ({individual_count})."
|
|
723
730
|
# )
|
|
724
731
|
|
|
725
|
-
# # Aggregate total count of events matched to
|
|
726
|
-
# total_count +=
|
|
732
|
+
# # Aggregate total count of events matched to risk objects
|
|
733
|
+
# total_count += risk_object_counts[risk_object_id]
|
|
727
734
|
|
|
728
|
-
# # Raise if the the number of events doesn't match the number of those matched to
|
|
735
|
+
# # Raise if the the number of events doesn't match the number of those matched to risk
|
|
736
|
+
# # objects
|
|
729
737
|
# if len(events) != total_count:
|
|
730
738
|
# raise ValidationFailed(
|
|
731
739
|
# f"The total number of risk events {len(events)} does not match the number of "
|
|
732
|
-
#
|
|
740
|
+
# "risk events we were able to match against risk objects from the detection "
|
|
741
|
+
# f"({total_count})."
|
|
733
742
|
# )
|
|
734
743
|
|
|
735
744
|
# TODO (PEX-434): implement deeper notable validation
|
|
@@ -746,7 +755,9 @@ class CorrelationSearch(BaseModel):
|
|
|
746
755
|
|
|
747
756
|
# NOTE: it would be more ideal to switch this to a system which gets the handle of the saved search job and polls
|
|
748
757
|
# it for completion, but that seems more tricky
|
|
749
|
-
def test(
|
|
758
|
+
def test(
|
|
759
|
+
self, max_sleep: int = TimeoutConfig.MAX_SLEEP, raise_on_exc: bool = False
|
|
760
|
+
) -> IntegrationTestResult:
|
|
750
761
|
"""Execute the integration test
|
|
751
762
|
|
|
752
763
|
Executes an integration test for this CorrelationSearch. First, ensures no matching risk/notables already exist
|
|
@@ -760,10 +771,10 @@ class CorrelationSearch(BaseModel):
|
|
|
760
771
|
"""
|
|
761
772
|
# max_sleep must be greater than the base value we must wait for the scheduled searchjob to run (jobs run every
|
|
762
773
|
# 60s)
|
|
763
|
-
if max_sleep < TimeoutConfig.BASE_SLEEP
|
|
774
|
+
if max_sleep < TimeoutConfig.BASE_SLEEP:
|
|
764
775
|
raise ClientError(
|
|
765
776
|
f"max_sleep value of {max_sleep} is less than the base sleep required "
|
|
766
|
-
f"({TimeoutConfig.BASE_SLEEP
|
|
777
|
+
f"({TimeoutConfig.BASE_SLEEP})"
|
|
767
778
|
)
|
|
768
779
|
|
|
769
780
|
# initialize result as None
|
|
@@ -774,20 +785,18 @@ class CorrelationSearch(BaseModel):
|
|
|
774
785
|
num_tries = 0
|
|
775
786
|
|
|
776
787
|
# set the initial base sleep time
|
|
777
|
-
time_to_sleep = TimeoutConfig.BASE_SLEEP
|
|
788
|
+
time_to_sleep = TimeoutConfig.BASE_SLEEP
|
|
778
789
|
|
|
779
790
|
try:
|
|
780
791
|
# first make sure the indexes are currently empty and the detection is starting from a disabled state
|
|
781
|
-
self.logger.debug(
|
|
782
|
-
"Cleaning up any pre-existing risk/notable events..."
|
|
783
|
-
)
|
|
792
|
+
self.logger.debug("Cleaning up any pre-existing risk/notable events...")
|
|
784
793
|
self.update_pbar(TestingStates.PRE_CLEANUP)
|
|
785
794
|
if self.risk_event_exists():
|
|
786
|
-
self.logger.
|
|
795
|
+
self.logger.warning(
|
|
787
796
|
f"Risk events matching '{self.name}' already exist; marking for deletion"
|
|
788
797
|
)
|
|
789
798
|
if self.notable_event_exists():
|
|
790
|
-
self.logger.
|
|
799
|
+
self.logger.warning(
|
|
791
800
|
f"Notable events matching '{self.name}' already exist; marking for deletion"
|
|
792
801
|
)
|
|
793
802
|
self.cleanup()
|
|
@@ -812,7 +821,9 @@ class CorrelationSearch(BaseModel):
|
|
|
812
821
|
# loop so long as the elapsed time is less than max_sleep
|
|
813
822
|
while elapsed_sleep_time < max_sleep:
|
|
814
823
|
# sleep so the detection job can finish
|
|
815
|
-
self.logger.info(
|
|
824
|
+
self.logger.info(
|
|
825
|
+
f"Waiting {time_to_sleep} for {self.name} so it can finish"
|
|
826
|
+
)
|
|
816
827
|
self.update_pbar(TestingStates.VALIDATING)
|
|
817
828
|
time.sleep(time_to_sleep)
|
|
818
829
|
elapsed_sleep_time += time_to_sleep
|
|
@@ -901,7 +912,7 @@ class CorrelationSearch(BaseModel):
|
|
|
901
912
|
wait_duration=elapsed_sleep_time,
|
|
902
913
|
exception=e,
|
|
903
914
|
)
|
|
904
|
-
self.logger.exception(result.message)
|
|
915
|
+
self.logger.exception(result.message) # type: ignore
|
|
905
916
|
else:
|
|
906
917
|
raise e
|
|
907
918
|
except Exception as e:
|
|
@@ -911,7 +922,10 @@ class CorrelationSearch(BaseModel):
|
|
|
911
922
|
|
|
912
923
|
# log based on result status
|
|
913
924
|
if result is not None:
|
|
914
|
-
if
|
|
925
|
+
if (
|
|
926
|
+
result.status == TestResultStatus.PASS
|
|
927
|
+
or result.status == TestResultStatus.SKIP
|
|
928
|
+
):
|
|
915
929
|
self.logger.info(f"{result.status.name}: {result.message}")
|
|
916
930
|
elif result.status == TestResultStatus.FAIL:
|
|
917
931
|
self.logger.error(f"{result.status.name}: {result.message}")
|
|
@@ -934,11 +948,11 @@ class CorrelationSearch(BaseModel):
|
|
|
934
948
|
:param query: the SPL string to run
|
|
935
949
|
"""
|
|
936
950
|
self.logger.debug(f"Executing query: `{query}`")
|
|
937
|
-
job = self.service.search(query, exec_mode="blocking")
|
|
951
|
+
job = self.service.search(query, exec_mode="blocking") # type: ignore
|
|
938
952
|
|
|
939
953
|
# query the results, catching any HTTP status code errors
|
|
940
954
|
try:
|
|
941
|
-
response_reader: ResponseReader = job.results(output_mode="json")
|
|
955
|
+
response_reader: ResponseReader = job.results(output_mode="json") # type: ignore
|
|
942
956
|
except HTTPError as e:
|
|
943
957
|
# e.g. -> HTTP 400 Bad Request -- b'{"messages":[{"type":"FATAL","text":"Error in \'delete\' command: You
|
|
944
958
|
# have insufficient privileges to delete events."}]}'
|
|
@@ -946,7 +960,7 @@ class CorrelationSearch(BaseModel):
|
|
|
946
960
|
self.logger.error(message)
|
|
947
961
|
raise ServerError(message)
|
|
948
962
|
|
|
949
|
-
return ResultIterator(response_reader)
|
|
963
|
+
return ResultIterator(response_reader) # type: ignore
|
|
950
964
|
|
|
951
965
|
def _delete_index(self, index: str) -> None:
|
|
952
966
|
"""Deletes events in a given index
|
|
@@ -979,7 +993,7 @@ class CorrelationSearch(BaseModel):
|
|
|
979
993
|
message = f"No result returned showing deletion in index {index}"
|
|
980
994
|
raise ServerError(message)
|
|
981
995
|
|
|
982
|
-
def cleanup(self, delete_test_index=False) -> None:
|
|
996
|
+
def cleanup(self, delete_test_index: bool = False) -> None:
|
|
983
997
|
"""Cleans up after an integration test
|
|
984
998
|
|
|
985
999
|
First, disable the detection; then dump the risk, notable, and (optionally) test indexes. The test index is
|
|
@@ -997,11 +1011,11 @@ class CorrelationSearch(BaseModel):
|
|
|
997
1011
|
|
|
998
1012
|
# Add indexes to purge
|
|
999
1013
|
if delete_test_index:
|
|
1000
|
-
self.indexes_to_purge.add(self.test_index)
|
|
1014
|
+
self.indexes_to_purge.add(self.test_index) # type: ignore
|
|
1001
1015
|
if self._risk_events is not None:
|
|
1002
|
-
self.indexes_to_purge.add(Indexes.RISK_INDEX
|
|
1016
|
+
self.indexes_to_purge.add(Indexes.RISK_INDEX)
|
|
1003
1017
|
if self._notable_events is not None:
|
|
1004
|
-
self.indexes_to_purge.add(Indexes.NOTABLE_INDEX
|
|
1018
|
+
self.indexes_to_purge.add(Indexes.NOTABLE_INDEX)
|
|
1005
1019
|
|
|
1006
1020
|
# delete the indexes
|
|
1007
1021
|
for index in self.indexes_to_purge:
|
|
@@ -1025,5 +1039,5 @@ class CorrelationSearch(BaseModel):
|
|
|
1025
1039
|
self.pbar_data.fq_test_name,
|
|
1026
1040
|
state,
|
|
1027
1041
|
self.pbar_data.start_time,
|
|
1028
|
-
True
|
|
1042
|
+
True,
|
|
1029
1043
|
)
|