contentctl 4.2.2__py3-none-any.whl → 4.2.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- contentctl/actions/detection_testing/infrastructures/DetectionTestingInfrastructure.py +41 -47
- contentctl/actions/detection_testing/infrastructures/DetectionTestingInfrastructureContainer.py +1 -1
- contentctl/actions/detection_testing/views/DetectionTestingView.py +1 -4
- contentctl/actions/validate.py +40 -1
- contentctl/enrichments/attack_enrichment.py +6 -8
- contentctl/enrichments/cve_enrichment.py +3 -3
- contentctl/helper/splunk_app.py +263 -0
- contentctl/input/director.py +1 -1
- contentctl/input/ssa_detection_builder.py +8 -6
- contentctl/objects/abstract_security_content_objects/detection_abstract.py +362 -336
- contentctl/objects/abstract_security_content_objects/security_content_object_abstract.py +117 -103
- contentctl/objects/atomic.py +7 -10
- contentctl/objects/base_test.py +1 -1
- contentctl/objects/base_test_result.py +7 -5
- contentctl/objects/baseline_tags.py +2 -30
- contentctl/objects/config.py +5 -4
- contentctl/objects/correlation_search.py +316 -96
- contentctl/objects/data_source.py +7 -2
- contentctl/objects/detection_tags.py +128 -102
- contentctl/objects/errors.py +18 -0
- contentctl/objects/lookup.py +1 -0
- contentctl/objects/mitre_attack_enrichment.py +3 -3
- contentctl/objects/notable_event.py +20 -0
- contentctl/objects/observable.py +20 -26
- contentctl/objects/risk_analysis_action.py +2 -2
- contentctl/objects/risk_event.py +315 -0
- contentctl/objects/ssa_detection_tags.py +1 -1
- contentctl/objects/story_tags.py +2 -2
- contentctl/objects/unit_test.py +1 -9
- contentctl/output/data_source_writer.py +4 -4
- {contentctl-4.2.2.dist-info → contentctl-4.2.4.dist-info}/METADATA +5 -8
- {contentctl-4.2.2.dist-info → contentctl-4.2.4.dist-info}/RECORD +35 -31
- {contentctl-4.2.2.dist-info → contentctl-4.2.4.dist-info}/LICENSE.md +0 -0
- {contentctl-4.2.2.dist-info → contentctl-4.2.4.dist-info}/WHEEL +0 -0
- {contentctl-4.2.2.dist-info → contentctl-4.2.4.dist-info}/entry_points.txt +0 -0
|
@@ -1,9 +1,10 @@
|
|
|
1
1
|
import logging
|
|
2
2
|
import time
|
|
3
|
+
import json
|
|
3
4
|
from typing import Union, Optional, Any
|
|
4
5
|
from enum import Enum
|
|
5
6
|
|
|
6
|
-
from pydantic import BaseModel, validator, Field
|
|
7
|
+
from pydantic import BaseModel, validator, Field, PrivateAttr
|
|
7
8
|
from splunklib.results import JSONResultsReader, Message # type: ignore
|
|
8
9
|
from splunklib.binding import HTTPError, ResponseReader # type: ignore
|
|
9
10
|
import splunklib.client as splunklib # type: ignore
|
|
@@ -18,6 +19,16 @@ from contentctl.actions.detection_testing.progress_bar import (
|
|
|
18
19
|
TestReportingType,
|
|
19
20
|
TestingStates
|
|
20
21
|
)
|
|
22
|
+
from contentctl.objects.errors import (
|
|
23
|
+
IntegrationTestingError,
|
|
24
|
+
ServerError,
|
|
25
|
+
ClientError,
|
|
26
|
+
ValidationFailed
|
|
27
|
+
)
|
|
28
|
+
from contentctl.objects.detection import Detection
|
|
29
|
+
from contentctl.objects.risk_event import RiskEvent
|
|
30
|
+
from contentctl.objects.notable_event import NotableEvent
|
|
31
|
+
from contentctl.objects.observable import Observable
|
|
21
32
|
|
|
22
33
|
|
|
23
34
|
# Suppress logging by default; enable for local testing
|
|
@@ -64,21 +75,6 @@ def get_logger() -> logging.Logger:
|
|
|
64
75
|
return logger
|
|
65
76
|
|
|
66
77
|
|
|
67
|
-
class IntegrationTestingError(Exception):
|
|
68
|
-
"""Base exception class for integration testing"""
|
|
69
|
-
pass
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
class ServerError(IntegrationTestingError):
|
|
73
|
-
"""An error encounterd during integration testing, as provided by the server (Splunk instance)"""
|
|
74
|
-
pass
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
class ClientError(IntegrationTestingError):
|
|
78
|
-
"""An error encounterd during integration testing, on the client's side (locally)"""
|
|
79
|
-
pass
|
|
80
|
-
|
|
81
|
-
|
|
82
78
|
class SavedSearchKeys(str, Enum):
|
|
83
79
|
"""
|
|
84
80
|
Various keys into the SavedSearch content
|
|
@@ -112,15 +108,15 @@ class TimeoutConfig(int, Enum):
|
|
|
112
108
|
MAX_SLEEP = 210
|
|
113
109
|
|
|
114
110
|
|
|
115
|
-
# TODO (
|
|
111
|
+
# TODO (#226): evaluate sane defaults for timeframe for integration testing (e.g. 5y is good
|
|
116
112
|
# now, but maybe not always...); maybe set latest/earliest to None?
|
|
117
113
|
class ScheduleConfig(str, Enum):
|
|
118
114
|
"""
|
|
119
115
|
Configuraton values for the saved search schedule
|
|
120
116
|
"""
|
|
121
|
-
EARLIEST_TIME
|
|
122
|
-
LATEST_TIME
|
|
123
|
-
CRON_SCHEDULE
|
|
117
|
+
EARLIEST_TIME = "-5y@y"
|
|
118
|
+
LATEST_TIME = "-1m@m"
|
|
119
|
+
CRON_SCHEDULE = "*/1 * * * *"
|
|
124
120
|
|
|
125
121
|
|
|
126
122
|
class ResultIterator:
|
|
@@ -154,7 +150,7 @@ class ResultIterator:
|
|
|
154
150
|
level: int = logging.getLevelName(level_name)
|
|
155
151
|
|
|
156
152
|
# log message at appropriate level and raise if needed
|
|
157
|
-
message = f"
|
|
153
|
+
message = f"SPLUNK: {result.message}"
|
|
158
154
|
self.logger.log(level, message)
|
|
159
155
|
if level == logging.ERROR:
|
|
160
156
|
raise ServerError(message)
|
|
@@ -192,15 +188,15 @@ class CorrelationSearch(BaseModel):
|
|
|
192
188
|
|
|
193
189
|
In Enterprise Security, a correlation search is wrapper around the saved search entity. This search represents a
|
|
194
190
|
detection rule for our purposes.
|
|
195
|
-
:param
|
|
191
|
+
:param detection: a Detection model
|
|
196
192
|
:param service: a Service instance representing a connection to a Splunk instance
|
|
197
193
|
:param pbar_data: the encapsulated info needed for logging w/ pbar
|
|
198
194
|
:param test_index: the index attack data is forwarded to for testing (optionally used in cleanup)
|
|
199
195
|
"""
|
|
200
196
|
## The following three fields are explicitly needed at instantiation # noqa: E266
|
|
201
197
|
|
|
202
|
-
# the
|
|
203
|
-
|
|
198
|
+
# the detection associated with the correlation search (e.g. "Windows Modify Registry EnableLinkedConnections")
|
|
199
|
+
detection: Detection
|
|
204
200
|
|
|
205
201
|
# a Service instance representing a connection to a Splunk instance
|
|
206
202
|
service: splunklib.Service
|
|
@@ -240,25 +236,33 @@ class CorrelationSearch(BaseModel):
|
|
|
240
236
|
# The notable adaptive response action (if defined)
|
|
241
237
|
notable_action: Union[NotableAction, None] = None
|
|
242
238
|
|
|
239
|
+
# The list of risk events found
|
|
240
|
+
_risk_events: Optional[list[RiskEvent]] = PrivateAttr(default=None)
|
|
241
|
+
|
|
242
|
+
# The list of notable events found
|
|
243
|
+
_notable_events: Optional[list[NotableEvent]] = PrivateAttr(default=None)
|
|
244
|
+
|
|
243
245
|
class Config:
|
|
244
246
|
# needed to allow fields w/ types like SavedSearch
|
|
245
247
|
arbitrary_types_allowed = True
|
|
248
|
+
# We want to have more ridgid typing
|
|
249
|
+
extra = 'forbid'
|
|
246
250
|
|
|
247
251
|
@validator("name", always=True)
|
|
248
252
|
@classmethod
|
|
249
253
|
def _convert_detection_to_search_name(cls, v, values) -> str:
|
|
250
254
|
"""
|
|
251
|
-
Validate
|
|
255
|
+
Validate name and derive if None
|
|
252
256
|
"""
|
|
253
|
-
if "
|
|
254
|
-
raise ValueError("
|
|
257
|
+
if "detection" not in values:
|
|
258
|
+
raise ValueError("detection missing; name is dependent on detection")
|
|
255
259
|
|
|
256
|
-
expected_name = f"ESCU - {values['
|
|
260
|
+
expected_name = f"ESCU - {values['detection'].name} - Rule"
|
|
257
261
|
if v is not None and v != expected_name:
|
|
258
262
|
raise ValueError(
|
|
259
|
-
"name must be derived from
|
|
263
|
+
"name must be derived from detection; leave as None and it will be derived automatically"
|
|
260
264
|
)
|
|
261
|
-
return
|
|
265
|
+
return expected_name
|
|
262
266
|
|
|
263
267
|
@validator("splunk_path", always=True)
|
|
264
268
|
@classmethod
|
|
@@ -410,7 +414,20 @@ class CorrelationSearch(BaseModel):
|
|
|
410
414
|
return NotableAction.parse_from_dict(content)
|
|
411
415
|
return None
|
|
412
416
|
|
|
413
|
-
|
|
417
|
+
@staticmethod
|
|
418
|
+
def _get_relevant_observables(observables: list[Observable]) -> list[Observable]:
|
|
419
|
+
"""
|
|
420
|
+
Given a list of observables, identify the subset of those relevant for risk matching
|
|
421
|
+
:param observables: the Observable objects to filter
|
|
422
|
+
:returns: the filtered list of relevant observables
|
|
423
|
+
"""
|
|
424
|
+
relevant = []
|
|
425
|
+
for observable in observables:
|
|
426
|
+
if not RiskEvent.ignore_observable(observable):
|
|
427
|
+
relevant.append(observable)
|
|
428
|
+
return relevant
|
|
429
|
+
|
|
430
|
+
# TODO (PEX-484): ideally, we could handle this and the following init w/ a call to
|
|
414
431
|
# model_post_init, so that all the logic is encapsulated w/in _parse_risk_and_notable_actions
|
|
415
432
|
# but that is a pydantic v2 feature (see the init validators for risk/notable actions):
|
|
416
433
|
# https://docs.pydantic.dev/latest/api/base_model/#pydantic.main.BaseModel.model_post_init
|
|
@@ -418,7 +435,7 @@ class CorrelationSearch(BaseModel):
|
|
|
418
435
|
"""Parses the risk/notable metadata we care about from self.saved_search.content
|
|
419
436
|
|
|
420
437
|
:raises KeyError: if self.saved_search.content does not contain a required key
|
|
421
|
-
:raises json.JSONDecodeError: if the value at self.saved_search.content['
|
|
438
|
+
:raises json.JSONDecodeError: if the value at self.saved_search.content['action3.risk.param._risk'] can't be
|
|
422
439
|
decoded from JSON into a dict
|
|
423
440
|
:raises IntegrationTestingError: if the value at self.saved_search.content['action.risk.param._risk'] is
|
|
424
441
|
unpacked to be anything other than a singleton
|
|
@@ -525,57 +542,231 @@ class CorrelationSearch(BaseModel):
|
|
|
525
542
|
if refresh:
|
|
526
543
|
self.refresh()
|
|
527
544
|
|
|
528
|
-
# TODO (cmcginley): make the search for risk/notable events a more specific query based on the
|
|
529
|
-
# search in question (and update the docstring to relfect when you do)
|
|
530
545
|
def risk_event_exists(self) -> bool:
|
|
531
|
-
"""Whether
|
|
546
|
+
"""Whether at least one matching risk event exists
|
|
547
|
+
|
|
548
|
+
Queries the `risk` index and returns True if at least one matching risk event exists for
|
|
549
|
+
this search
|
|
550
|
+
:return: a bool indicating whether a risk event for this search exists in the risk index
|
|
551
|
+
"""
|
|
552
|
+
# We always force an update on the cache when checking if events exist
|
|
553
|
+
events = self.get_risk_events(force_update=True)
|
|
554
|
+
return len(events) > 0
|
|
555
|
+
|
|
556
|
+
def get_risk_events(self, force_update: bool = False) -> list[RiskEvent]:
|
|
557
|
+
"""Get risk events from the Splunk instance
|
|
532
558
|
|
|
533
|
-
Queries the `risk` index and returns
|
|
534
|
-
:
|
|
559
|
+
Queries the `risk` index and returns any matching risk events
|
|
560
|
+
:param force_update: whether the cached _risk_events should be forcibly updated if already
|
|
561
|
+
set
|
|
562
|
+
:return: a list of risk events
|
|
535
563
|
"""
|
|
536
|
-
#
|
|
537
|
-
|
|
564
|
+
# Reset the list of risk events if we're forcing an update
|
|
565
|
+
if force_update:
|
|
566
|
+
self.logger.debug("Resetting risk event cache.")
|
|
567
|
+
self._risk_events = None
|
|
568
|
+
|
|
569
|
+
# Use the cached risk_events unless we're forcing an update
|
|
570
|
+
if self._risk_events is not None:
|
|
571
|
+
self.logger.debug(f"Using cached risk events ({len(self._risk_events)} total).")
|
|
572
|
+
return self._risk_events
|
|
573
|
+
|
|
574
|
+
# Search for all risk events from a single scheduled search (indicated by orig_sid)
|
|
575
|
+
query = (
|
|
576
|
+
f'search index=risk search_name="{self.name}" [search index=risk search '
|
|
577
|
+
f'search_name="{self.name}" | head 1 | fields orig_sid] | tojson'
|
|
578
|
+
)
|
|
538
579
|
result_iterator = self._search(query)
|
|
580
|
+
|
|
581
|
+
# Iterate over the events, storing them in a list and checking for any errors
|
|
582
|
+
events: list[RiskEvent] = []
|
|
539
583
|
try:
|
|
540
584
|
for result in result_iterator:
|
|
541
|
-
#
|
|
542
|
-
#
|
|
585
|
+
# sanity check that this result from the iterator is a risk event and not some
|
|
586
|
+
# other metadata
|
|
543
587
|
if result["index"] == Indexes.RISK_INDEX.value:
|
|
544
|
-
|
|
545
|
-
|
|
546
|
-
|
|
588
|
+
try:
|
|
589
|
+
parsed_raw = json.loads(result["_raw"])
|
|
590
|
+
event = RiskEvent.parse_obj(parsed_raw)
|
|
591
|
+
except Exception:
|
|
592
|
+
self.logger.error(f"Failed to parse RiskEvent from search result: {result}")
|
|
593
|
+
raise
|
|
594
|
+
events.append(event)
|
|
595
|
+
self.logger.debug(f"Found risk event for '{self.name}': {event}")
|
|
547
596
|
except ServerError as e:
|
|
548
597
|
self.logger.error(f"Error returned from Splunk instance: {e}")
|
|
549
598
|
raise e
|
|
550
|
-
|
|
551
|
-
|
|
599
|
+
|
|
600
|
+
# Log if no events were found
|
|
601
|
+
if len(events) < 1:
|
|
602
|
+
self.logger.debug(f"No risk events found for '{self.name}'")
|
|
603
|
+
else:
|
|
604
|
+
# Set the cache if we found events
|
|
605
|
+
self._risk_events = events
|
|
606
|
+
self.logger.debug(f"Caching {len(self._risk_events)} risk events.")
|
|
607
|
+
|
|
608
|
+
return events
|
|
552
609
|
|
|
553
610
|
def notable_event_exists(self) -> bool:
|
|
554
611
|
"""Whether a notable event exists
|
|
555
612
|
|
|
556
|
-
Queries the `notable` index and returns True if a
|
|
557
|
-
:return: a bool indicating whether a
|
|
613
|
+
Queries the `notable` index and returns True if a notble event exists
|
|
614
|
+
:return: a bool indicating whether a notable event exists in the notable index
|
|
558
615
|
"""
|
|
559
|
-
# construct our query and issue our search job on the
|
|
560
|
-
|
|
616
|
+
# construct our query and issue our search job on the notsble index
|
|
617
|
+
# We always force an update on the cache when checking if events exist
|
|
618
|
+
events = self.get_notable_events(force_update=True)
|
|
619
|
+
return len(events) > 0
|
|
620
|
+
|
|
621
|
+
def get_notable_events(self, force_update: bool = False) -> list[NotableEvent]:
|
|
622
|
+
"""Get notable events from the Splunk instance
|
|
623
|
+
|
|
624
|
+
Queries the `notable` index and returns any matching notable events
|
|
625
|
+
:param force_update: whether the cached _notable_events should be forcibly updated if
|
|
626
|
+
already set
|
|
627
|
+
:return: a list of notable events
|
|
628
|
+
"""
|
|
629
|
+
# Reset the list of notable events if we're forcing an update
|
|
630
|
+
if force_update:
|
|
631
|
+
self.logger.debug("Resetting notable event cache.")
|
|
632
|
+
self._notable_events = None
|
|
633
|
+
|
|
634
|
+
# Use the cached notable_events unless we're forcing an update
|
|
635
|
+
if self._notable_events is not None:
|
|
636
|
+
self.logger.debug(f"Using cached notable events ({len(self._notable_events)} total).")
|
|
637
|
+
return self._notable_events
|
|
638
|
+
|
|
639
|
+
# Search for all notable events from a single scheduled search (indicated by orig_sid)
|
|
640
|
+
query = (
|
|
641
|
+
f'search index=notable search_name="{self.name}" [search index=notable search '
|
|
642
|
+
f'search_name="{self.name}" | head 1 | fields orig_sid] | tojson'
|
|
643
|
+
)
|
|
561
644
|
result_iterator = self._search(query)
|
|
645
|
+
|
|
646
|
+
# Iterate over the events, storing them in a list and checking for any errors
|
|
647
|
+
events: list[NotableEvent] = []
|
|
562
648
|
try:
|
|
563
649
|
for result in result_iterator:
|
|
564
|
-
#
|
|
650
|
+
# sanity check that this result from the iterator is a notable event and not some
|
|
651
|
+
# other metadata
|
|
565
652
|
if result["index"] == Indexes.NOTABLE_INDEX.value:
|
|
566
|
-
|
|
567
|
-
|
|
568
|
-
|
|
653
|
+
try:
|
|
654
|
+
parsed_raw = json.loads(result["_raw"])
|
|
655
|
+
event = NotableEvent.parse_obj(parsed_raw)
|
|
656
|
+
except Exception:
|
|
657
|
+
self.logger.error(f"Failed to parse NotableEvent from search result: {result}")
|
|
658
|
+
raise
|
|
659
|
+
events.append(event)
|
|
660
|
+
self.logger.debug(f"Found notable event for '{self.name}': {event}")
|
|
569
661
|
except ServerError as e:
|
|
570
662
|
self.logger.error(f"Error returned from Splunk instance: {e}")
|
|
571
663
|
raise e
|
|
572
|
-
self.logger.debug(f"No notable event found for '{self.name}'")
|
|
573
|
-
return False
|
|
574
664
|
|
|
575
|
-
|
|
576
|
-
|
|
577
|
-
|
|
578
|
-
|
|
665
|
+
# Log if no events were found
|
|
666
|
+
if len(events) < 1:
|
|
667
|
+
self.logger.debug(f"No notable events found for '{self.name}'")
|
|
668
|
+
else:
|
|
669
|
+
# Set the cache if we found events
|
|
670
|
+
self._notable_events = events
|
|
671
|
+
self.logger.debug(f"Caching {len(self._notable_events)} notable events.")
|
|
672
|
+
|
|
673
|
+
return events
|
|
674
|
+
|
|
675
|
+
def validate_risk_events(self) -> None:
|
|
676
|
+
"""Validates the existence of any expected risk events
|
|
677
|
+
|
|
678
|
+
First ensure the risk event exists, and if it does validate its risk message and make sure
|
|
679
|
+
any events align with the specified observables. Also adds the risk index to the purge list
|
|
680
|
+
if risk events existed
|
|
681
|
+
:param elapsed_sleep_time: an int representing the amount of time slept thus far waiting to
|
|
682
|
+
check the risks/notables
|
|
683
|
+
:returns: an IntegrationTestResult on failure; None on success
|
|
684
|
+
"""
|
|
685
|
+
# TODO (PEX-433): Re-enable this check once we have refined the logic and reduced the false
|
|
686
|
+
# positive rate in risk/obseravble matching
|
|
687
|
+
# Create a mapping of the relevant observables to counters
|
|
688
|
+
# observables = CorrelationSearch._get_relevant_observables(self.detection.tags.observable)
|
|
689
|
+
# observable_counts: dict[str, int] = {str(x): 0 for x in observables}
|
|
690
|
+
# if len(observables) != len(observable_counts):
|
|
691
|
+
# raise ClientError(
|
|
692
|
+
# f"At least two observables in '{self.detection.name}' have the same name."
|
|
693
|
+
# )
|
|
694
|
+
|
|
695
|
+
# Get the risk events; note that we use the cached risk events, expecting they were
|
|
696
|
+
# saved by a prior call to risk_event_exists
|
|
697
|
+
events = self.get_risk_events()
|
|
698
|
+
|
|
699
|
+
# Validate each risk event individually and record some aggregate counts
|
|
700
|
+
c = 0
|
|
701
|
+
for event in events:
|
|
702
|
+
c += 1
|
|
703
|
+
self.logger.debug(
|
|
704
|
+
f"Validating risk event ({event.risk_object}, {event.risk_object_type}): "
|
|
705
|
+
f"{c}/{len(events)}"
|
|
706
|
+
)
|
|
707
|
+
event.validate_against_detection(self.detection)
|
|
708
|
+
|
|
709
|
+
# TODO (PEX-433): Re-enable this check once we have refined the logic and reduced the
|
|
710
|
+
# false positive rate in risk/obseravble matching
|
|
711
|
+
# Update observable count based on match
|
|
712
|
+
# matched_observable = event.get_matched_observable(self.detection.tags.observable)
|
|
713
|
+
# self.logger.debug(
|
|
714
|
+
# f"Matched risk event ({event.risk_object}, {event.risk_object_type}) to observable "
|
|
715
|
+
# f"({matched_observable.name}, {matched_observable.type}, {matched_observable.role})"
|
|
716
|
+
# )
|
|
717
|
+
# observable_counts[str(matched_observable)] += 1
|
|
718
|
+
|
|
719
|
+
# TODO (PEX-433): test my new contentctl logic against an old ESCU build; my logic should
|
|
720
|
+
# detect the faulty attacker events -> this was the issue from the 4.28/4.27 release;
|
|
721
|
+
# recreate by testing against one of those old builds w/ the bad config
|
|
722
|
+
# TODO (PEX-433): Re-enable this check once we have refined the logic and reduced the false
|
|
723
|
+
# positive
|
|
724
|
+
# rate in risk/obseravble matching
|
|
725
|
+
# TODO (PEX-433): I foresee issues here if for example a parent and child process share a
|
|
726
|
+
# name (matched observable could be either) -> these issues are confirmed to exist, e.g.
|
|
727
|
+
# `Windows Steal Authentication Certificates Export Certificate`
|
|
728
|
+
# Validate risk events in aggregate; we should have an equal amount of risk events for each
|
|
729
|
+
# relevant observable, and the total count should match the total number of events
|
|
730
|
+
# individual_count: Optional[int] = None
|
|
731
|
+
# total_count = 0
|
|
732
|
+
# for observable_str in observable_counts:
|
|
733
|
+
# self.logger.debug(
|
|
734
|
+
# f"Observable <{observable_str}> match count: {observable_counts[observable_str]}"
|
|
735
|
+
# )
|
|
736
|
+
|
|
737
|
+
# # Grab the first value encountered if not set yet
|
|
738
|
+
# if individual_count is None:
|
|
739
|
+
# individual_count = observable_counts[observable_str]
|
|
740
|
+
# else:
|
|
741
|
+
# # Confirm that the count for the current observable matches the count of the others
|
|
742
|
+
# if observable_counts[observable_str] != individual_count:
|
|
743
|
+
# raise ValidationFailed(
|
|
744
|
+
# f"Count of risk events matching observable <\"{observable_str}\"> "
|
|
745
|
+
# f"({observable_counts[observable_str]}) does not match the count of those "
|
|
746
|
+
# f"matching other observables ({individual_count})."
|
|
747
|
+
# )
|
|
748
|
+
|
|
749
|
+
# # Aggregate total count of events matched to observables
|
|
750
|
+
# total_count += observable_counts[observable_str]
|
|
751
|
+
|
|
752
|
+
# # Raise if the the number of events doesn't match the number of those matched to observables
|
|
753
|
+
# if len(events) != total_count:
|
|
754
|
+
# raise ValidationFailed(
|
|
755
|
+
# f"The total number of risk events {len(events)} does not match the number of "
|
|
756
|
+
# f"risk events we were able to match against observables ({total_count})."
|
|
757
|
+
# )
|
|
758
|
+
|
|
759
|
+
# TODO (PEX-434): implement deeper notable validation
|
|
760
|
+
def validate_notable_events(self) -> None:
|
|
761
|
+
"""Validates the existence of any expected notables
|
|
762
|
+
|
|
763
|
+
Ensures the notable exists. Also adds the notable index to the purge list if notables
|
|
764
|
+
existed
|
|
765
|
+
:param elapsed_sleep_time: an int representing the amount of time slept thus far waiting to
|
|
766
|
+
check the risks/notables
|
|
767
|
+
:returns: an IntegrationTestResult on failure; None on success
|
|
768
|
+
"""
|
|
769
|
+
raise NotImplementedError()
|
|
579
770
|
|
|
580
771
|
# NOTE: it would be more ideal to switch this to a system which gets the handle of the saved search job and polls
|
|
581
772
|
# it for completion, but that seems more tricky
|
|
@@ -617,12 +808,12 @@ class CorrelationSearch(BaseModel):
|
|
|
617
808
|
self.update_pbar(TestingStates.PRE_CLEANUP)
|
|
618
809
|
if self.risk_event_exists():
|
|
619
810
|
self.logger.warn(
|
|
620
|
-
f"Risk events matching '{self.name}' already exist; marking for deletion"
|
|
621
|
-
|
|
811
|
+
f"Risk events matching '{self.name}' already exist; marking for deletion"
|
|
812
|
+
)
|
|
622
813
|
if self.notable_event_exists():
|
|
623
814
|
self.logger.warn(
|
|
624
|
-
f"Notable events matching '{self.name}' already exist; marking for deletion"
|
|
625
|
-
|
|
815
|
+
f"Notable events matching '{self.name}' already exist; marking for deletion"
|
|
816
|
+
)
|
|
626
817
|
self.cleanup()
|
|
627
818
|
|
|
628
819
|
# skip test if no risk or notable action defined
|
|
@@ -641,7 +832,6 @@ class CorrelationSearch(BaseModel):
|
|
|
641
832
|
self.logger.info(f"Forcing a run on {self.name}")
|
|
642
833
|
self.update_pbar(TestingStates.FORCE_RUN)
|
|
643
834
|
self.force_run()
|
|
644
|
-
time.sleep(TimeoutConfig.BASE_SLEEP.value)
|
|
645
835
|
|
|
646
836
|
# loop so long as the elapsed time is less than max_sleep
|
|
647
837
|
while elapsed_sleep_time < max_sleep:
|
|
@@ -659,34 +849,49 @@ class CorrelationSearch(BaseModel):
|
|
|
659
849
|
# reset the result to None on each loop iteration
|
|
660
850
|
result = None
|
|
661
851
|
|
|
662
|
-
|
|
663
|
-
|
|
664
|
-
|
|
665
|
-
|
|
666
|
-
|
|
667
|
-
|
|
668
|
-
|
|
669
|
-
|
|
670
|
-
|
|
671
|
-
|
|
672
|
-
|
|
673
|
-
|
|
674
|
-
|
|
675
|
-
|
|
676
|
-
|
|
677
|
-
|
|
678
|
-
|
|
679
|
-
|
|
852
|
+
try:
|
|
853
|
+
# Validate risk events
|
|
854
|
+
self.logger.debug("Checking for matching risk events")
|
|
855
|
+
if self.has_risk_analysis_action:
|
|
856
|
+
if self.risk_event_exists():
|
|
857
|
+
# TODO (PEX-435): should this in the retry loop? or outside it?
|
|
858
|
+
# -> I've observed there being a missing risk event (15/16) on
|
|
859
|
+
# the first few tries, so this does help us check for true
|
|
860
|
+
# positives; BUT, if we have lots of failing detections, this
|
|
861
|
+
# will definitely add to the total wait time
|
|
862
|
+
# -> certain types of failures (e.g. risk message, or any value
|
|
863
|
+
# checking) should fail testing automatically
|
|
864
|
+
# -> other types, like those based on counts of risk events,
|
|
865
|
+
# should happen should fail more slowly as more events may be
|
|
866
|
+
# produced
|
|
867
|
+
self.validate_risk_events()
|
|
868
|
+
else:
|
|
869
|
+
raise ValidationFailed(
|
|
870
|
+
f"TEST FAILED: No matching risk event created for: {self.name}"
|
|
871
|
+
)
|
|
872
|
+
|
|
873
|
+
# Validate notable events
|
|
874
|
+
self.logger.debug("Checking for matching notable events")
|
|
875
|
+
if self.has_notable_action:
|
|
680
876
|
# NOTE: because we check this last, if both fail, the error message about notables will
|
|
681
|
-
# always be the last to be added and thus the one surfaced to the user
|
|
682
|
-
|
|
683
|
-
|
|
684
|
-
|
|
685
|
-
|
|
686
|
-
|
|
687
|
-
|
|
688
|
-
|
|
689
|
-
|
|
877
|
+
# always be the last to be added and thus the one surfaced to the user
|
|
878
|
+
if self.notable_event_exists():
|
|
879
|
+
# TODO (PEX-435): should this in the retry loop? or outside it?
|
|
880
|
+
# TODO (PEX-434): implement deeper notable validation (the method
|
|
881
|
+
# commented out below is unimplemented)
|
|
882
|
+
# self.validate_notable_events(elapsed_sleep_time)
|
|
883
|
+
pass
|
|
884
|
+
else:
|
|
885
|
+
raise ValidationFailed(
|
|
886
|
+
f"TEST FAILED: No matching notable event created for: {self.name}"
|
|
887
|
+
)
|
|
888
|
+
except ValidationFailed as e:
|
|
889
|
+
self.logger.error(f"Risk/notable validation failed: {e}")
|
|
890
|
+
result = IntegrationTestResult(
|
|
891
|
+
status=TestResultStatus.FAIL,
|
|
892
|
+
message=f"TEST FAILED: {e}",
|
|
893
|
+
wait_duration=elapsed_sleep_time,
|
|
894
|
+
)
|
|
690
895
|
|
|
691
896
|
# if result is still None, then all checks passed and we can break the loop
|
|
692
897
|
if result is None:
|
|
@@ -707,6 +912,7 @@ class CorrelationSearch(BaseModel):
|
|
|
707
912
|
if (elapsed_sleep_time + time_to_sleep) > max_sleep:
|
|
708
913
|
time_to_sleep = max_sleep - elapsed_sleep_time
|
|
709
914
|
|
|
915
|
+
# TODO (PEX-436): should cleanup be in a finally block so it runs even on exception?
|
|
710
916
|
# cleanup the created events, disable the detection and return the result
|
|
711
917
|
self.logger.debug("Cleaning up any created risk/notable events...")
|
|
712
918
|
self.update_pbar(TestingStates.POST_CLEANUP)
|
|
@@ -719,9 +925,13 @@ class CorrelationSearch(BaseModel):
|
|
|
719
925
|
wait_duration=elapsed_sleep_time,
|
|
720
926
|
exception=e,
|
|
721
927
|
)
|
|
722
|
-
self.logger.exception(
|
|
928
|
+
self.logger.exception(result.message) # type: ignore
|
|
723
929
|
else:
|
|
724
930
|
raise e
|
|
931
|
+
except Exception as e:
|
|
932
|
+
# Log any exceptions locally and raise to the caller
|
|
933
|
+
self.logger.exception(f"Unhandled exception during testing: {e}")
|
|
934
|
+
raise e
|
|
725
935
|
|
|
726
936
|
# log based on result status
|
|
727
937
|
if result is not None:
|
|
@@ -769,8 +979,8 @@ class CorrelationSearch(BaseModel):
|
|
|
769
979
|
:param index: index to delete all events from (e.g. 'risk')
|
|
770
980
|
"""
|
|
771
981
|
# construct our query and issue our delete job on the index
|
|
772
|
-
self.logger.debug(f"Deleting index '{index}")
|
|
773
|
-
query = f
|
|
982
|
+
self.logger.debug(f"Deleting index '{index}'")
|
|
983
|
+
query = f'search index={index} search_name="{self.name}" | delete'
|
|
774
984
|
result_iterator = self._search(query)
|
|
775
985
|
|
|
776
986
|
# we should get two results, one for "__ALL__" and one for the index; iterate until we find the one for the
|
|
@@ -809,13 +1019,23 @@ class CorrelationSearch(BaseModel):
|
|
|
809
1019
|
# disable the detection
|
|
810
1020
|
self.disable()
|
|
811
1021
|
|
|
812
|
-
#
|
|
1022
|
+
# Add indexes to purge
|
|
813
1023
|
if delete_test_index:
|
|
814
1024
|
self.indexes_to_purge.add(self.test_index) # type: ignore
|
|
1025
|
+
if self._risk_events is not None:
|
|
1026
|
+
self.indexes_to_purge.add(Indexes.RISK_INDEX.value)
|
|
1027
|
+
if self._notable_events is not None:
|
|
1028
|
+
self.indexes_to_purge.add(Indexes.NOTABLE_INDEX.value)
|
|
1029
|
+
|
|
1030
|
+
# delete the indexes
|
|
815
1031
|
for index in self.indexes_to_purge:
|
|
816
1032
|
self._delete_index(index)
|
|
817
1033
|
self.indexes_to_purge.clear()
|
|
818
1034
|
|
|
1035
|
+
# reset caches
|
|
1036
|
+
self._risk_events = None
|
|
1037
|
+
self._notable_events = None
|
|
1038
|
+
|
|
819
1039
|
def update_pbar(self, state: str) -> str:
|
|
820
1040
|
"""
|
|
821
1041
|
Instance specific function to log integrtation testing information via pbar
|
|
@@ -1,15 +1,20 @@
|
|
|
1
1
|
from __future__ import annotations
|
|
2
2
|
from typing import Optional, Any
|
|
3
|
-
from pydantic import Field,
|
|
3
|
+
from pydantic import Field, HttpUrl, model_serializer, BaseModel
|
|
4
4
|
from contentctl.objects.security_content_object import SecurityContentObject
|
|
5
5
|
from contentctl.objects.event_source import EventSource
|
|
6
6
|
|
|
7
|
+
|
|
8
|
+
class TA(BaseModel):
|
|
9
|
+
name: str
|
|
10
|
+
url: HttpUrl | None = None
|
|
11
|
+
version: str
|
|
7
12
|
class DataSource(SecurityContentObject):
|
|
8
13
|
source: str = Field(...)
|
|
9
14
|
sourcetype: str = Field(...)
|
|
10
15
|
separator: Optional[str] = None
|
|
11
16
|
configuration: Optional[str] = None
|
|
12
|
-
supported_TA:
|
|
17
|
+
supported_TA: list[TA] = []
|
|
13
18
|
fields: Optional[list] = None
|
|
14
19
|
field_mappings: Optional[list] = None
|
|
15
20
|
convert_to_log_source: Optional[list] = None
|