contentctl 5.0.0a2__py3-none-any.whl → 5.0.0a3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- contentctl/__init__.py +1 -1
- contentctl/actions/build.py +88 -55
- contentctl/actions/deploy_acs.py +29 -24
- contentctl/actions/detection_testing/DetectionTestingManager.py +66 -41
- contentctl/actions/detection_testing/GitService.py +2 -4
- contentctl/actions/detection_testing/generate_detection_coverage_badge.py +48 -30
- contentctl/actions/detection_testing/infrastructures/DetectionTestingInfrastructure.py +163 -124
- contentctl/actions/detection_testing/infrastructures/DetectionTestingInfrastructureContainer.py +45 -32
- contentctl/actions/detection_testing/progress_bar.py +3 -0
- contentctl/actions/detection_testing/views/DetectionTestingView.py +15 -18
- contentctl/actions/detection_testing/views/DetectionTestingViewCLI.py +1 -5
- contentctl/actions/detection_testing/views/DetectionTestingViewFile.py +2 -2
- contentctl/actions/detection_testing/views/DetectionTestingViewWeb.py +1 -4
- contentctl/actions/doc_gen.py +9 -5
- contentctl/actions/initialize.py +45 -33
- contentctl/actions/inspect.py +118 -61
- contentctl/actions/new_content.py +78 -50
- contentctl/actions/release_notes.py +276 -146
- contentctl/actions/reporting.py +23 -19
- contentctl/actions/test.py +31 -25
- contentctl/actions/validate.py +54 -34
- contentctl/api.py +54 -45
- contentctl/contentctl.py +10 -10
- contentctl/enrichments/attack_enrichment.py +112 -72
- contentctl/enrichments/cve_enrichment.py +34 -28
- contentctl/enrichments/splunk_app_enrichment.py +38 -36
- contentctl/helper/link_validator.py +101 -78
- contentctl/helper/splunk_app.py +69 -41
- contentctl/helper/utils.py +58 -39
- contentctl/input/director.py +69 -37
- contentctl/input/new_content_questions.py +26 -34
- contentctl/input/yml_reader.py +22 -17
- contentctl/objects/abstract_security_content_objects/detection_abstract.py +250 -314
- contentctl/objects/abstract_security_content_objects/security_content_object_abstract.py +58 -36
- contentctl/objects/alert_action.py +8 -8
- contentctl/objects/annotated_types.py +1 -1
- contentctl/objects/atomic.py +64 -54
- contentctl/objects/base_test.py +2 -1
- contentctl/objects/base_test_result.py +16 -8
- contentctl/objects/baseline.py +41 -30
- contentctl/objects/baseline_tags.py +29 -22
- contentctl/objects/config.py +1 -1
- contentctl/objects/constants.py +29 -58
- contentctl/objects/correlation_search.py +75 -55
- contentctl/objects/dashboard.py +55 -41
- contentctl/objects/data_source.py +13 -13
- contentctl/objects/deployment.py +44 -37
- contentctl/objects/deployment_email.py +1 -1
- contentctl/objects/deployment_notable.py +2 -1
- contentctl/objects/deployment_phantom.py +5 -5
- contentctl/objects/deployment_rba.py +1 -1
- contentctl/objects/deployment_scheduling.py +1 -1
- contentctl/objects/deployment_slack.py +1 -1
- contentctl/objects/detection.py +5 -2
- contentctl/objects/detection_metadata.py +1 -0
- contentctl/objects/detection_stanza.py +7 -2
- contentctl/objects/detection_tags.py +54 -64
- contentctl/objects/drilldown.py +66 -35
- contentctl/objects/enums.py +61 -43
- contentctl/objects/errors.py +16 -24
- contentctl/objects/integration_test.py +3 -3
- contentctl/objects/integration_test_result.py +1 -0
- contentctl/objects/investigation.py +41 -26
- contentctl/objects/investigation_tags.py +29 -17
- contentctl/objects/lookup.py +234 -113
- contentctl/objects/macro.py +55 -38
- contentctl/objects/manual_test.py +3 -3
- contentctl/objects/manual_test_result.py +1 -0
- contentctl/objects/mitre_attack_enrichment.py +17 -16
- contentctl/objects/notable_action.py +2 -1
- contentctl/objects/notable_event.py +1 -3
- contentctl/objects/playbook.py +37 -35
- contentctl/objects/playbook_tags.py +22 -16
- contentctl/objects/rba.py +14 -8
- contentctl/objects/risk_analysis_action.py +15 -11
- contentctl/objects/risk_event.py +27 -20
- contentctl/objects/risk_object.py +1 -0
- contentctl/objects/savedsearches_conf.py +9 -7
- contentctl/objects/security_content_object.py +5 -2
- contentctl/objects/story.py +45 -44
- contentctl/objects/story_tags.py +56 -44
- contentctl/objects/test_group.py +5 -2
- contentctl/objects/threat_object.py +1 -0
- contentctl/objects/throttling.py +27 -18
- contentctl/objects/unit_test.py +3 -4
- contentctl/objects/unit_test_baseline.py +4 -5
- contentctl/objects/unit_test_result.py +6 -6
- contentctl/output/api_json_output.py +22 -22
- contentctl/output/attack_nav_output.py +21 -21
- contentctl/output/attack_nav_writer.py +29 -37
- contentctl/output/conf_output.py +230 -174
- contentctl/output/data_source_writer.py +38 -25
- contentctl/output/doc_md_output.py +53 -27
- contentctl/output/jinja_writer.py +19 -15
- contentctl/output/json_writer.py +20 -8
- contentctl/output/svg_output.py +56 -38
- contentctl/output/templates/transforms.j2 +2 -2
- contentctl/output/yml_writer.py +18 -24
- {contentctl-5.0.0a2.dist-info → contentctl-5.0.0a3.dist-info}/METADATA +1 -1
- contentctl-5.0.0a3.dist-info/RECORD +168 -0
- contentctl/actions/initialize_old.py +0 -245
- contentctl/objects/observable.py +0 -39
- contentctl-5.0.0a2.dist-info/RECORD +0 -170
- {contentctl-5.0.0a2.dist-info → contentctl-5.0.0a3.dist-info}/LICENSE.md +0 -0
- {contentctl-5.0.0a2.dist-info → contentctl-5.0.0a3.dist-info}/WHEEL +0 -0
- {contentctl-5.0.0a2.dist-info → contentctl-5.0.0a3.dist-info}/entry_points.txt +0 -0
|
@@ -6,25 +6,25 @@ from enum import StrEnum, IntEnum
|
|
|
6
6
|
from functools import cached_property
|
|
7
7
|
|
|
8
8
|
from pydantic import ConfigDict, BaseModel, computed_field, Field, PrivateAttr
|
|
9
|
-
from splunklib.results import JSONResultsReader, Message
|
|
10
|
-
from splunklib.binding import HTTPError, ResponseReader
|
|
11
|
-
import splunklib.client as splunklib
|
|
12
|
-
from tqdm import tqdm
|
|
9
|
+
from splunklib.results import JSONResultsReader, Message # type: ignore
|
|
10
|
+
from splunklib.binding import HTTPError, ResponseReader # type: ignore
|
|
11
|
+
import splunklib.client as splunklib # type: ignore
|
|
12
|
+
from tqdm import tqdm # type: ignore
|
|
13
13
|
|
|
14
14
|
from contentctl.objects.risk_analysis_action import RiskAnalysisAction
|
|
15
15
|
from contentctl.objects.notable_action import NotableAction
|
|
16
16
|
from contentctl.objects.base_test_result import TestResultStatus
|
|
17
17
|
from contentctl.objects.integration_test_result import IntegrationTestResult
|
|
18
18
|
from contentctl.actions.detection_testing.progress_bar import (
|
|
19
|
-
format_pbar_string,
|
|
19
|
+
format_pbar_string, # type: ignore
|
|
20
20
|
TestReportingType,
|
|
21
|
-
TestingStates
|
|
21
|
+
TestingStates,
|
|
22
22
|
)
|
|
23
23
|
from contentctl.objects.errors import (
|
|
24
24
|
IntegrationTestingError,
|
|
25
25
|
ServerError,
|
|
26
26
|
ClientError,
|
|
27
|
-
ValidationFailed
|
|
27
|
+
ValidationFailed,
|
|
28
28
|
)
|
|
29
29
|
from contentctl.objects.detection import Detection
|
|
30
30
|
from contentctl.objects.risk_event import RiskEvent
|
|
@@ -65,7 +65,9 @@ def get_logger() -> logging.Logger:
|
|
|
65
65
|
handler = logging.NullHandler()
|
|
66
66
|
|
|
67
67
|
# Format our output
|
|
68
|
-
formatter = logging.Formatter(
|
|
68
|
+
formatter = logging.Formatter(
|
|
69
|
+
"%(asctime)s - %(levelname)s:%(name)s - %(message)s"
|
|
70
|
+
)
|
|
69
71
|
handler.setFormatter(formatter)
|
|
70
72
|
|
|
71
73
|
# Set handler level and add to logger
|
|
@@ -79,6 +81,7 @@ class SavedSearchKeys(StrEnum):
|
|
|
79
81
|
"""
|
|
80
82
|
Various keys into the SavedSearch content
|
|
81
83
|
"""
|
|
84
|
+
|
|
82
85
|
# setup the names of the keys we expect to access in content
|
|
83
86
|
EARLIEST_TIME_KEY = "dispatch.earliest_time"
|
|
84
87
|
LATEST_TIME_KEY = "dispatch.latest_time"
|
|
@@ -92,6 +95,7 @@ class Indexes(StrEnum):
|
|
|
92
95
|
"""
|
|
93
96
|
Indexes we search against
|
|
94
97
|
"""
|
|
98
|
+
|
|
95
99
|
# setup the names of the risk and notable indexes
|
|
96
100
|
RISK_INDEX = "risk"
|
|
97
101
|
NOTABLE_INDEX = "notable"
|
|
@@ -101,6 +105,7 @@ class TimeoutConfig(IntEnum):
|
|
|
101
105
|
"""
|
|
102
106
|
Configuration values for the exponential backoff timer
|
|
103
107
|
"""
|
|
108
|
+
|
|
104
109
|
# base amount to sleep for before beginning exponential backoff during testing
|
|
105
110
|
BASE_SLEEP = 60
|
|
106
111
|
|
|
@@ -118,6 +123,7 @@ class ScheduleConfig(StrEnum):
|
|
|
118
123
|
"""
|
|
119
124
|
Configuraton values for the saved search schedule
|
|
120
125
|
"""
|
|
126
|
+
|
|
121
127
|
EARLIEST_TIME = "-5y@y"
|
|
122
128
|
LATEST_TIME = "-1m@m"
|
|
123
129
|
CRON_SCHEDULE = "*/1 * * * *"
|
|
@@ -132,11 +138,10 @@ class ResultIterator:
|
|
|
132
138
|
:param response_reader: a ResponseReader object
|
|
133
139
|
:param logger: a Logger object
|
|
134
140
|
"""
|
|
141
|
+
|
|
135
142
|
def __init__(self, response_reader: ResponseReader) -> None:
|
|
136
143
|
# init the results reader
|
|
137
|
-
self.results_reader: JSONResultsReader = JSONResultsReader(
|
|
138
|
-
response_reader
|
|
139
|
-
)
|
|
144
|
+
self.results_reader: JSONResultsReader = JSONResultsReader(response_reader)
|
|
140
145
|
|
|
141
146
|
# get logger
|
|
142
147
|
self.logger: logging.Logger = get_logger()
|
|
@@ -150,18 +155,18 @@ class ResultIterator:
|
|
|
150
155
|
# log messages, or raise if error
|
|
151
156
|
if isinstance(result, Message):
|
|
152
157
|
# convert level string to level int
|
|
153
|
-
level_name = result.type.strip().upper()
|
|
158
|
+
level_name = result.type.strip().upper() # type: ignore
|
|
154
159
|
level: int = logging.getLevelName(level_name)
|
|
155
160
|
|
|
156
161
|
# log message at appropriate level and raise if needed
|
|
157
|
-
message = f"SPLUNK: {result.message}"
|
|
162
|
+
message = f"SPLUNK: {result.message}" # type: ignore
|
|
158
163
|
self.logger.log(level, message)
|
|
159
164
|
if level == logging.ERROR:
|
|
160
165
|
raise ServerError(message)
|
|
161
166
|
|
|
162
167
|
# if dict, just return
|
|
163
168
|
elif isinstance(result, dict):
|
|
164
|
-
return result
|
|
169
|
+
return result # type: ignore
|
|
165
170
|
|
|
166
171
|
# raise for any unexpected types
|
|
167
172
|
else:
|
|
@@ -178,14 +183,13 @@ class PbarData(BaseModel):
|
|
|
178
183
|
:param fq_test_name: the fully qualifed (fq) test name ("<detection_name>:<test_name>") used for logging
|
|
179
184
|
:param start_time: the start time used for logging
|
|
180
185
|
"""
|
|
181
|
-
|
|
186
|
+
|
|
187
|
+
pbar: tqdm # type: ignore
|
|
182
188
|
fq_test_name: str
|
|
183
189
|
start_time: float
|
|
184
190
|
|
|
185
191
|
# needed to support the tqdm type
|
|
186
|
-
model_config = ConfigDict(
|
|
187
|
-
arbitrary_types_allowed=True
|
|
188
|
-
)
|
|
192
|
+
model_config = ConfigDict(arbitrary_types_allowed=True)
|
|
189
193
|
|
|
190
194
|
|
|
191
195
|
class CorrelationSearch(BaseModel):
|
|
@@ -198,6 +202,7 @@ class CorrelationSearch(BaseModel):
|
|
|
198
202
|
:param pbar_data: the encapsulated info needed for logging w/ pbar
|
|
199
203
|
:param test_index: the index attack data is forwarded to for testing (optionally used in cleanup)
|
|
200
204
|
"""
|
|
205
|
+
|
|
201
206
|
# the detection associated with the correlation search (e.g. "Windows Modify Registry EnableLinkedConnections")
|
|
202
207
|
detection: Detection = Field(...)
|
|
203
208
|
|
|
@@ -232,10 +237,7 @@ class CorrelationSearch(BaseModel):
|
|
|
232
237
|
|
|
233
238
|
# Need arbitrary types to allow fields w/ types like SavedSearch; we also want to forbid
|
|
234
239
|
# unexpected fields
|
|
235
|
-
model_config = ConfigDict(
|
|
236
|
-
arbitrary_types_allowed=True,
|
|
237
|
-
extra='forbid'
|
|
238
|
-
)
|
|
240
|
+
model_config = ConfigDict(arbitrary_types_allowed=True, extra="forbid")
|
|
239
241
|
|
|
240
242
|
def model_post_init(self, __context: Any) -> None:
|
|
241
243
|
super().model_post_init(__context)
|
|
@@ -309,7 +311,7 @@ class CorrelationSearch(BaseModel):
|
|
|
309
311
|
The earliest time configured for the saved search
|
|
310
312
|
"""
|
|
311
313
|
if self.saved_search is not None:
|
|
312
|
-
return self.saved_search.content[SavedSearchKeys.EARLIEST_TIME_KEY]
|
|
314
|
+
return self.saved_search.content[SavedSearchKeys.EARLIEST_TIME_KEY] # type: ignore
|
|
313
315
|
else:
|
|
314
316
|
raise ClientError(
|
|
315
317
|
"Something unexpected went wrong in initialization; saved_search was not populated"
|
|
@@ -321,7 +323,7 @@ class CorrelationSearch(BaseModel):
|
|
|
321
323
|
The latest time configured for the saved search
|
|
322
324
|
"""
|
|
323
325
|
if self.saved_search is not None:
|
|
324
|
-
return self.saved_search.content[SavedSearchKeys.LATEST_TIME_KEY]
|
|
326
|
+
return self.saved_search.content[SavedSearchKeys.LATEST_TIME_KEY] # type: ignore
|
|
325
327
|
else:
|
|
326
328
|
raise ClientError(
|
|
327
329
|
"Something unexpected went wrong in initialization; saved_search was not populated"
|
|
@@ -333,7 +335,7 @@ class CorrelationSearch(BaseModel):
|
|
|
333
335
|
The cron schedule configured for the saved search
|
|
334
336
|
"""
|
|
335
337
|
if self.saved_search is not None:
|
|
336
|
-
return self.saved_search.content[SavedSearchKeys.CRON_SCHEDULE_KEY]
|
|
338
|
+
return self.saved_search.content[SavedSearchKeys.CRON_SCHEDULE_KEY] # type: ignore
|
|
337
339
|
else:
|
|
338
340
|
raise ClientError(
|
|
339
341
|
"Something unexpected went wrong in initialization; saved_search was not populated"
|
|
@@ -345,7 +347,7 @@ class CorrelationSearch(BaseModel):
|
|
|
345
347
|
Whether the saved search is enabled
|
|
346
348
|
"""
|
|
347
349
|
if self.saved_search is not None:
|
|
348
|
-
if int(self.saved_search.content[SavedSearchKeys.DISBALED_KEY]):
|
|
350
|
+
if int(self.saved_search.content[SavedSearchKeys.DISBALED_KEY]): # type: ignore
|
|
349
351
|
return False
|
|
350
352
|
else:
|
|
351
353
|
return True
|
|
@@ -354,7 +356,7 @@ class CorrelationSearch(BaseModel):
|
|
|
354
356
|
"Something unexpected went wrong in initialization; saved_search was not populated"
|
|
355
357
|
)
|
|
356
358
|
|
|
357
|
-
@
|
|
359
|
+
@property
|
|
358
360
|
def has_risk_analysis_action(self) -> bool:
|
|
359
361
|
"""Whether the correlation search has an associated risk analysis Adaptive Response Action
|
|
360
362
|
:return: a boolean indicating whether it has a risk analysis Adaptive Response Action
|
|
@@ -405,11 +407,13 @@ class CorrelationSearch(BaseModel):
|
|
|
405
407
|
"""
|
|
406
408
|
# grab risk details if present
|
|
407
409
|
self._risk_analysis_action = CorrelationSearch._get_risk_analysis_action(
|
|
408
|
-
self.saved_search.content
|
|
410
|
+
self.saved_search.content # type: ignore
|
|
409
411
|
)
|
|
410
412
|
|
|
411
413
|
# grab notable details if present
|
|
412
|
-
self._notable_action = CorrelationSearch._get_notable_action(
|
|
414
|
+
self._notable_action = CorrelationSearch._get_notable_action(
|
|
415
|
+
self.saved_search.content
|
|
416
|
+
) # type: ignore
|
|
413
417
|
|
|
414
418
|
def refresh(self) -> None:
|
|
415
419
|
"""Refreshes the metadata in the SavedSearch entity, and re-parses the fields we care about
|
|
@@ -417,10 +421,9 @@ class CorrelationSearch(BaseModel):
|
|
|
417
421
|
After operations we expect to alter the state of the SavedSearch, we call refresh so that we have a local
|
|
418
422
|
representation of the new state; then we extrat what we care about into this instance
|
|
419
423
|
"""
|
|
420
|
-
self.logger.debug(
|
|
421
|
-
f"Refreshing SavedSearch metadata for {self.name}...")
|
|
424
|
+
self.logger.debug(f"Refreshing SavedSearch metadata for {self.name}...")
|
|
422
425
|
try:
|
|
423
|
-
self.saved_search.refresh()
|
|
426
|
+
self.saved_search.refresh() # type: ignore
|
|
424
427
|
except HTTPError as e:
|
|
425
428
|
raise ServerError(f"HTTP error encountered during refresh: {e}")
|
|
426
429
|
self._parse_risk_and_notable_actions()
|
|
@@ -434,7 +437,7 @@ class CorrelationSearch(BaseModel):
|
|
|
434
437
|
"""
|
|
435
438
|
self.logger.debug(f"Enabling {self.name}...")
|
|
436
439
|
try:
|
|
437
|
-
self.saved_search.enable()
|
|
440
|
+
self.saved_search.enable() # type: ignore
|
|
438
441
|
except HTTPError as e:
|
|
439
442
|
raise ServerError(f"HTTP error encountered while enabling detection: {e}")
|
|
440
443
|
if refresh:
|
|
@@ -449,7 +452,7 @@ class CorrelationSearch(BaseModel):
|
|
|
449
452
|
"""
|
|
450
453
|
self.logger.debug(f"Disabling {self.name}...")
|
|
451
454
|
try:
|
|
452
|
-
self.saved_search.disable()
|
|
455
|
+
self.saved_search.disable() # type: ignore
|
|
453
456
|
except HTTPError as e:
|
|
454
457
|
raise ServerError(f"HTTP error encountered while disabling detection: {e}")
|
|
455
458
|
if refresh:
|
|
@@ -460,7 +463,7 @@ class CorrelationSearch(BaseModel):
|
|
|
460
463
|
earliest_time: str = ScheduleConfig.EARLIEST_TIME,
|
|
461
464
|
latest_time: str = ScheduleConfig.LATEST_TIME,
|
|
462
465
|
cron_schedule: str = ScheduleConfig.CRON_SCHEDULE,
|
|
463
|
-
refresh: bool = True
|
|
466
|
+
refresh: bool = True,
|
|
464
467
|
) -> None:
|
|
465
468
|
"""Updates the correlation search timeframe to work with test data
|
|
466
469
|
|
|
@@ -477,12 +480,12 @@ class CorrelationSearch(BaseModel):
|
|
|
477
480
|
data = {
|
|
478
481
|
SavedSearchKeys.EARLIEST_TIME_KEY: earliest_time,
|
|
479
482
|
SavedSearchKeys.LATEST_TIME_KEY: latest_time,
|
|
480
|
-
SavedSearchKeys.CRON_SCHEDULE_KEY: cron_schedule
|
|
483
|
+
SavedSearchKeys.CRON_SCHEDULE_KEY: cron_schedule,
|
|
481
484
|
}
|
|
482
485
|
self.logger.info(data)
|
|
483
486
|
self.logger.info(f"Updating timeframe for '{self.name}': {data}")
|
|
484
487
|
try:
|
|
485
|
-
self.saved_search.update(**data)
|
|
488
|
+
self.saved_search.update(**data) # type: ignore
|
|
486
489
|
except HTTPError as e:
|
|
487
490
|
raise ServerError(f"HTTP error encountered while updating timeframe: {e}")
|
|
488
491
|
|
|
@@ -531,7 +534,9 @@ class CorrelationSearch(BaseModel):
|
|
|
531
534
|
|
|
532
535
|
# Use the cached risk_events unless we're forcing an update
|
|
533
536
|
if self._risk_events is not None:
|
|
534
|
-
self.logger.debug(
|
|
537
|
+
self.logger.debug(
|
|
538
|
+
f"Using cached risk events ({len(self._risk_events)} total)."
|
|
539
|
+
)
|
|
535
540
|
return self._risk_events
|
|
536
541
|
|
|
537
542
|
# TODO (#248): Refactor risk/notable querying to pin to a single savedsearch ID
|
|
@@ -553,7 +558,9 @@ class CorrelationSearch(BaseModel):
|
|
|
553
558
|
parsed_raw = json.loads(result["_raw"])
|
|
554
559
|
event = RiskEvent.model_validate(parsed_raw)
|
|
555
560
|
except Exception:
|
|
556
|
-
self.logger.error(
|
|
561
|
+
self.logger.error(
|
|
562
|
+
f"Failed to parse RiskEvent from search result: {result}"
|
|
563
|
+
)
|
|
557
564
|
raise
|
|
558
565
|
events.append(event)
|
|
559
566
|
self.logger.debug(f"Found risk event for '{self.name}': {event}")
|
|
@@ -597,7 +604,9 @@ class CorrelationSearch(BaseModel):
|
|
|
597
604
|
|
|
598
605
|
# Use the cached notable_events unless we're forcing an update
|
|
599
606
|
if self._notable_events is not None:
|
|
600
|
-
self.logger.debug(
|
|
607
|
+
self.logger.debug(
|
|
608
|
+
f"Using cached notable events ({len(self._notable_events)} total)."
|
|
609
|
+
)
|
|
601
610
|
return self._notable_events
|
|
602
611
|
|
|
603
612
|
# Search for all notable events from a single scheduled search (indicated by orig_sid)
|
|
@@ -618,7 +627,9 @@ class CorrelationSearch(BaseModel):
|
|
|
618
627
|
parsed_raw = json.loads(result["_raw"])
|
|
619
628
|
event = NotableEvent.model_validate(parsed_raw)
|
|
620
629
|
except Exception:
|
|
621
|
-
self.logger.error(
|
|
630
|
+
self.logger.error(
|
|
631
|
+
f"Failed to parse NotableEvent from search result: {result}"
|
|
632
|
+
)
|
|
622
633
|
raise
|
|
623
634
|
events.append(event)
|
|
624
635
|
self.logger.debug(f"Found notable event for '{self.name}': {event}")
|
|
@@ -653,7 +664,9 @@ class CorrelationSearch(BaseModel):
|
|
|
653
664
|
" with it; cannot validate."
|
|
654
665
|
)
|
|
655
666
|
|
|
656
|
-
risk_object_counts: dict[int, int] = {
|
|
667
|
+
risk_object_counts: dict[int, int] = {
|
|
668
|
+
id(x): 0 for x in self.detection.rba.risk_objects
|
|
669
|
+
}
|
|
657
670
|
|
|
658
671
|
# Get the risk events; note that we use the cached risk events, expecting they were
|
|
659
672
|
# saved by a prior call to risk_event_exists
|
|
@@ -670,7 +683,9 @@ class CorrelationSearch(BaseModel):
|
|
|
670
683
|
event.validate_against_detection(self.detection)
|
|
671
684
|
|
|
672
685
|
# Update risk object count based on match
|
|
673
|
-
matched_risk_object = event.get_matched_risk_object(
|
|
686
|
+
matched_risk_object = event.get_matched_risk_object(
|
|
687
|
+
self.detection.rba.risk_objects
|
|
688
|
+
)
|
|
674
689
|
self.logger.debug(
|
|
675
690
|
f"Matched risk event (object={event.es_risk_object}, type={event.es_risk_object_type}) "
|
|
676
691
|
f"to detection's risk object (name={matched_risk_object.field}, "
|
|
@@ -740,7 +755,9 @@ class CorrelationSearch(BaseModel):
|
|
|
740
755
|
|
|
741
756
|
# NOTE: it would be more ideal to switch this to a system which gets the handle of the saved search job and polls
|
|
742
757
|
# it for completion, but that seems more tricky
|
|
743
|
-
def test(
|
|
758
|
+
def test(
|
|
759
|
+
self, max_sleep: int = TimeoutConfig.MAX_SLEEP, raise_on_exc: bool = False
|
|
760
|
+
) -> IntegrationTestResult:
|
|
744
761
|
"""Execute the integration test
|
|
745
762
|
|
|
746
763
|
Executes an integration test for this CorrelationSearch. First, ensures no matching risk/notables already exist
|
|
@@ -772,9 +789,7 @@ class CorrelationSearch(BaseModel):
|
|
|
772
789
|
|
|
773
790
|
try:
|
|
774
791
|
# first make sure the indexes are currently empty and the detection is starting from a disabled state
|
|
775
|
-
self.logger.debug(
|
|
776
|
-
"Cleaning up any pre-existing risk/notable events..."
|
|
777
|
-
)
|
|
792
|
+
self.logger.debug("Cleaning up any pre-existing risk/notable events...")
|
|
778
793
|
self.update_pbar(TestingStates.PRE_CLEANUP)
|
|
779
794
|
if self.risk_event_exists():
|
|
780
795
|
self.logger.warning(
|
|
@@ -806,7 +821,9 @@ class CorrelationSearch(BaseModel):
|
|
|
806
821
|
# loop so long as the elapsed time is less than max_sleep
|
|
807
822
|
while elapsed_sleep_time < max_sleep:
|
|
808
823
|
# sleep so the detection job can finish
|
|
809
|
-
self.logger.info(
|
|
824
|
+
self.logger.info(
|
|
825
|
+
f"Waiting {time_to_sleep} for {self.name} so it can finish"
|
|
826
|
+
)
|
|
810
827
|
self.update_pbar(TestingStates.VALIDATING)
|
|
811
828
|
time.sleep(time_to_sleep)
|
|
812
829
|
elapsed_sleep_time += time_to_sleep
|
|
@@ -895,7 +912,7 @@ class CorrelationSearch(BaseModel):
|
|
|
895
912
|
wait_duration=elapsed_sleep_time,
|
|
896
913
|
exception=e,
|
|
897
914
|
)
|
|
898
|
-
self.logger.exception(result.message)
|
|
915
|
+
self.logger.exception(result.message) # type: ignore
|
|
899
916
|
else:
|
|
900
917
|
raise e
|
|
901
918
|
except Exception as e:
|
|
@@ -905,7 +922,10 @@ class CorrelationSearch(BaseModel):
|
|
|
905
922
|
|
|
906
923
|
# log based on result status
|
|
907
924
|
if result is not None:
|
|
908
|
-
if
|
|
925
|
+
if (
|
|
926
|
+
result.status == TestResultStatus.PASS
|
|
927
|
+
or result.status == TestResultStatus.SKIP
|
|
928
|
+
):
|
|
909
929
|
self.logger.info(f"{result.status.name}: {result.message}")
|
|
910
930
|
elif result.status == TestResultStatus.FAIL:
|
|
911
931
|
self.logger.error(f"{result.status.name}: {result.message}")
|
|
@@ -928,11 +948,11 @@ class CorrelationSearch(BaseModel):
|
|
|
928
948
|
:param query: the SPL string to run
|
|
929
949
|
"""
|
|
930
950
|
self.logger.debug(f"Executing query: `{query}`")
|
|
931
|
-
job = self.service.search(query, exec_mode="blocking")
|
|
951
|
+
job = self.service.search(query, exec_mode="blocking") # type: ignore
|
|
932
952
|
|
|
933
953
|
# query the results, catching any HTTP status code errors
|
|
934
954
|
try:
|
|
935
|
-
response_reader: ResponseReader = job.results(output_mode="json")
|
|
955
|
+
response_reader: ResponseReader = job.results(output_mode="json") # type: ignore
|
|
936
956
|
except HTTPError as e:
|
|
937
957
|
# e.g. -> HTTP 400 Bad Request -- b'{"messages":[{"type":"FATAL","text":"Error in \'delete\' command: You
|
|
938
958
|
# have insufficient privileges to delete events."}]}'
|
|
@@ -940,7 +960,7 @@ class CorrelationSearch(BaseModel):
|
|
|
940
960
|
self.logger.error(message)
|
|
941
961
|
raise ServerError(message)
|
|
942
962
|
|
|
943
|
-
return ResultIterator(response_reader)
|
|
963
|
+
return ResultIterator(response_reader) # type: ignore
|
|
944
964
|
|
|
945
965
|
def _delete_index(self, index: str) -> None:
|
|
946
966
|
"""Deletes events in a given index
|
|
@@ -991,7 +1011,7 @@ class CorrelationSearch(BaseModel):
|
|
|
991
1011
|
|
|
992
1012
|
# Add indexes to purge
|
|
993
1013
|
if delete_test_index:
|
|
994
|
-
self.indexes_to_purge.add(self.test_index)
|
|
1014
|
+
self.indexes_to_purge.add(self.test_index) # type: ignore
|
|
995
1015
|
if self._risk_events is not None:
|
|
996
1016
|
self.indexes_to_purge.add(Indexes.RISK_INDEX)
|
|
997
1017
|
if self._notable_events is not None:
|
|
@@ -1019,5 +1039,5 @@ class CorrelationSearch(BaseModel):
|
|
|
1019
1039
|
self.pbar_data.fq_test_name,
|
|
1020
1040
|
state,
|
|
1021
1041
|
self.pbar_data.start_time,
|
|
1022
|
-
True
|
|
1042
|
+
True,
|
|
1023
1043
|
)
|
contentctl/objects/dashboard.py
CHANGED
|
@@ -8,7 +8,7 @@ from contentctl.objects.security_content_object import SecurityContentObject
|
|
|
8
8
|
from contentctl.objects.config import build
|
|
9
9
|
from enum import StrEnum
|
|
10
10
|
|
|
11
|
-
DEFAULT_DASHBAORD_JINJA2_TEMPLATE =
|
|
11
|
+
DEFAULT_DASHBAORD_JINJA2_TEMPLATE = """<dashboard version="2" theme="{{ dashboard.theme }}">
|
|
12
12
|
<label>{{ dashboard.label(config) }}</label>
|
|
13
13
|
<description></description>
|
|
14
14
|
<definition><![CDATA[
|
|
@@ -21,28 +21,40 @@ DEFAULT_DASHBAORD_JINJA2_TEMPLATE = '''<dashboard version="2" theme="{{ dashboar
|
|
|
21
21
|
"hideExport": false
|
|
22
22
|
}
|
|
23
23
|
]]></meta>
|
|
24
|
-
</dashboard>
|
|
24
|
+
</dashboard>"""
|
|
25
|
+
|
|
25
26
|
|
|
26
27
|
class DashboardTheme(StrEnum):
|
|
27
28
|
light = "light"
|
|
28
29
|
dark = "dark"
|
|
29
30
|
|
|
31
|
+
|
|
30
32
|
class Dashboard(SecurityContentObject):
|
|
31
|
-
j2_template: str = Field(
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
33
|
+
j2_template: str = Field(
|
|
34
|
+
default=DEFAULT_DASHBAORD_JINJA2_TEMPLATE,
|
|
35
|
+
description="Jinja2 Template used to construct the dashboard",
|
|
36
|
+
)
|
|
37
|
+
description: str = Field(
|
|
38
|
+
...,
|
|
39
|
+
description="A description of the dashboard. This does not have to match "
|
|
40
|
+
"the description of the dashboard in the JSON file.",
|
|
41
|
+
max_length=10000,
|
|
42
|
+
)
|
|
43
|
+
theme: DashboardTheme = Field(
|
|
44
|
+
default=DashboardTheme.light,
|
|
45
|
+
description="The theme of the dashboard. Choose between 'light' and 'dark'.",
|
|
46
|
+
)
|
|
47
|
+
json_obj: Json[dict[str, Any]] = Field(
|
|
48
|
+
..., description="Valid JSON object that describes the dashboard"
|
|
49
|
+
)
|
|
50
|
+
|
|
51
|
+
def label(self, config: build) -> str:
|
|
40
52
|
return f"{config.app.label} - {self.name}"
|
|
41
|
-
|
|
53
|
+
|
|
42
54
|
@model_validator(mode="before")
|
|
43
55
|
@classmethod
|
|
44
|
-
def validate_fields_from_json(cls, data:Any)->Any:
|
|
45
|
-
yml_file_name:str|None = data.get("file_path", None)
|
|
56
|
+
def validate_fields_from_json(cls, data: Any) -> Any:
|
|
57
|
+
yml_file_name: str | None = data.get("file_path", None)
|
|
46
58
|
if yml_file_name is None:
|
|
47
59
|
raise ValueError("File name not passed to dashboard constructor")
|
|
48
60
|
yml_file_path = pathlib.Path(yml_file_name)
|
|
@@ -50,51 +62,53 @@ class Dashboard(SecurityContentObject):
|
|
|
50
62
|
|
|
51
63
|
if not json_file_path.is_file():
|
|
52
64
|
raise ValueError(f"Required file {json_file_path} does not exist.")
|
|
53
|
-
|
|
54
|
-
with open(json_file_path,
|
|
65
|
+
|
|
66
|
+
with open(json_file_path, "r") as jsonFilePointer:
|
|
55
67
|
try:
|
|
56
|
-
json_obj:dict[str,Any] = json.load(jsonFilePointer)
|
|
68
|
+
json_obj: dict[str, Any] = json.load(jsonFilePointer)
|
|
57
69
|
except Exception as e:
|
|
58
70
|
raise ValueError(f"Unable to load data from {json_file_path}: {str(e)}")
|
|
59
71
|
|
|
60
|
-
name_from_file = data.get("name",None)
|
|
61
|
-
name_from_json
|
|
72
|
+
name_from_file = data.get("name", None)
|
|
73
|
+
name_from_json = json_obj.get("title", None)
|
|
62
74
|
|
|
63
|
-
errors:list[str] = []
|
|
75
|
+
errors: list[str] = []
|
|
64
76
|
if name_from_json is None:
|
|
65
77
|
errors.append(f"'title' field is missing from {json_file_path}")
|
|
66
78
|
elif name_from_json != name_from_file:
|
|
67
|
-
errors.append(
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
79
|
+
errors.append(
|
|
80
|
+
f"The 'title' field in the JSON file [{json_file_path}] does not match the 'name' field in the YML object [{yml_file_path}]. These two MUST match:\n "
|
|
81
|
+
f"title in JSON : {name_from_json}\n "
|
|
82
|
+
f"title in YML : {name_from_file}\n "
|
|
83
|
+
)
|
|
84
|
+
|
|
85
|
+
description_from_json = json_obj.get("description", None)
|
|
72
86
|
if description_from_json is None:
|
|
73
87
|
errors.append("'description' field is missing from field 'json_object'")
|
|
74
|
-
|
|
75
|
-
if len(errors) > 0
|
|
88
|
+
|
|
89
|
+
if len(errors) > 0:
|
|
76
90
|
err_string = "\n - ".join(errors)
|
|
77
91
|
raise ValueError(f"Error(s) validating dashboard:\n - {err_string}")
|
|
78
|
-
|
|
79
|
-
data[
|
|
80
|
-
data[
|
|
92
|
+
|
|
93
|
+
data["name"] = name_from_file
|
|
94
|
+
data["json_obj"] = json.dumps(json_obj)
|
|
81
95
|
return data
|
|
82
96
|
|
|
83
|
-
|
|
84
97
|
def pretty_print_json_obj(self):
|
|
85
98
|
return json.dumps(self.json_obj, indent=4)
|
|
86
|
-
|
|
87
|
-
def getOutputFilepathRelativeToAppRoot(self, config:build)->pathlib.Path:
|
|
99
|
+
|
|
100
|
+
def getOutputFilepathRelativeToAppRoot(self, config: build) -> pathlib.Path:
|
|
88
101
|
filename = f"{self.file_path.stem}.xml".lower()
|
|
89
|
-
return pathlib.Path("default/data/ui/views")/filename
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
def writeDashboardFile(self, j2_env:Environment, config:build):
|
|
102
|
+
return pathlib.Path("default/data/ui/views") / filename
|
|
103
|
+
|
|
104
|
+
def writeDashboardFile(self, j2_env: Environment, config: build):
|
|
93
105
|
template = j2_env.from_string(self.j2_template)
|
|
94
106
|
dashboard_text = template.render(config=config, dashboard=self)
|
|
95
107
|
|
|
96
|
-
with open(
|
|
97
|
-
|
|
108
|
+
with open(
|
|
109
|
+
config.getPackageDirectoryPath()
|
|
110
|
+
/ self.getOutputFilepathRelativeToAppRoot(config),
|
|
111
|
+
"a",
|
|
112
|
+
) as f:
|
|
113
|
+
output_xml = dashboard_text.encode("utf-8", "ignore").decode("utf-8")
|
|
98
114
|
f.write(output_xml)
|
|
99
|
-
|
|
100
|
-
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
from __future__ import annotations
|
|
2
2
|
from typing import Optional, Any
|
|
3
|
-
from pydantic import Field, HttpUrl, model_serializer, BaseModel
|
|
3
|
+
from pydantic import Field, HttpUrl, model_serializer, BaseModel
|
|
4
4
|
from contentctl.objects.security_content_object import SecurityContentObject
|
|
5
5
|
|
|
6
6
|
|
|
@@ -8,6 +8,8 @@ class TA(BaseModel):
|
|
|
8
8
|
name: str
|
|
9
9
|
url: HttpUrl | None = None
|
|
10
10
|
version: str
|
|
11
|
+
|
|
12
|
+
|
|
11
13
|
class DataSource(SecurityContentObject):
|
|
12
14
|
source: str = Field(...)
|
|
13
15
|
sourcetype: str = Field(...)
|
|
@@ -19,14 +21,13 @@ class DataSource(SecurityContentObject):
|
|
|
19
21
|
convert_to_log_source: None | list = None
|
|
20
22
|
example_log: None | str = None
|
|
21
23
|
|
|
22
|
-
|
|
23
24
|
@model_serializer
|
|
24
25
|
def serialize_model(self):
|
|
25
|
-
#Call serializer for parent
|
|
26
|
+
# Call serializer for parent
|
|
26
27
|
super_fields = super().serialize_model()
|
|
27
|
-
|
|
28
|
-
#All fields custom to this model
|
|
29
|
-
model:dict[str,Any] = {
|
|
28
|
+
|
|
29
|
+
# All fields custom to this model
|
|
30
|
+
model: dict[str, Any] = {
|
|
30
31
|
"source": self.source,
|
|
31
32
|
"sourcetype": self.sourcetype,
|
|
32
33
|
"separator": self.separator,
|
|
@@ -35,12 +36,11 @@ class DataSource(SecurityContentObject):
|
|
|
35
36
|
"fields": self.fields,
|
|
36
37
|
"field_mappings": self.field_mappings,
|
|
37
38
|
"convert_to_log_source": self.convert_to_log_source,
|
|
38
|
-
"example_log":self.example_log
|
|
39
|
+
"example_log": self.example_log,
|
|
39
40
|
}
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
#Combine fields from this model with fields from parent
|
|
41
|
+
|
|
42
|
+
# Combine fields from this model with fields from parent
|
|
43
43
|
super_fields.update(model)
|
|
44
|
-
|
|
45
|
-
#return the model
|
|
46
|
-
return super_fields
|
|
44
|
+
|
|
45
|
+
# return the model
|
|
46
|
+
return super_fields
|