contentctl 4.2.2__py3-none-any.whl → 4.2.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (40) hide show
  1. contentctl/actions/detection_testing/infrastructures/DetectionTestingInfrastructure.py +41 -47
  2. contentctl/actions/detection_testing/infrastructures/DetectionTestingInfrastructureContainer.py +1 -1
  3. contentctl/actions/detection_testing/views/DetectionTestingView.py +1 -4
  4. contentctl/actions/initialize.py +3 -2
  5. contentctl/actions/validate.py +40 -1
  6. contentctl/contentctl.py +4 -1
  7. contentctl/enrichments/attack_enrichment.py +6 -8
  8. contentctl/enrichments/cve_enrichment.py +3 -3
  9. contentctl/helper/splunk_app.py +263 -0
  10. contentctl/input/director.py +1 -1
  11. contentctl/input/ssa_detection_builder.py +8 -6
  12. contentctl/objects/abstract_security_content_objects/detection_abstract.py +362 -336
  13. contentctl/objects/abstract_security_content_objects/security_content_object_abstract.py +117 -103
  14. contentctl/objects/atomic.py +7 -10
  15. contentctl/objects/base_test.py +1 -1
  16. contentctl/objects/base_test_result.py +7 -5
  17. contentctl/objects/baseline_tags.py +2 -30
  18. contentctl/objects/config.py +5 -4
  19. contentctl/objects/correlation_search.py +322 -98
  20. contentctl/objects/data_source.py +7 -2
  21. contentctl/objects/detection_tags.py +128 -102
  22. contentctl/objects/errors.py +18 -0
  23. contentctl/objects/lookup.py +1 -0
  24. contentctl/objects/mitre_attack_enrichment.py +3 -3
  25. contentctl/objects/notable_event.py +20 -0
  26. contentctl/objects/observable.py +20 -26
  27. contentctl/objects/risk_analysis_action.py +2 -2
  28. contentctl/objects/risk_event.py +315 -0
  29. contentctl/objects/ssa_detection_tags.py +1 -1
  30. contentctl/objects/story_tags.py +2 -2
  31. contentctl/objects/unit_test.py +1 -9
  32. contentctl/output/conf_output.py +2 -9
  33. contentctl/output/data_source_writer.py +4 -4
  34. contentctl/templates/README.md +10 -0
  35. {contentctl-4.2.2.dist-info → contentctl-4.2.5.dist-info}/METADATA +6 -9
  36. {contentctl-4.2.2.dist-info → contentctl-4.2.5.dist-info}/RECORD +39 -35
  37. contentctl/templates/README +0 -2
  38. {contentctl-4.2.2.dist-info → contentctl-4.2.5.dist-info}/LICENSE.md +0 -0
  39. {contentctl-4.2.2.dist-info → contentctl-4.2.5.dist-info}/WHEEL +0 -0
  40. {contentctl-4.2.2.dist-info → contentctl-4.2.5.dist-info}/entry_points.txt +0 -0
@@ -1,9 +1,10 @@
1
1
  import logging
2
2
  import time
3
+ import json
3
4
  from typing import Union, Optional, Any
4
5
  from enum import Enum
5
6
 
6
- from pydantic import BaseModel, validator, Field
7
+ from pydantic import BaseModel, validator, Field, PrivateAttr
7
8
  from splunklib.results import JSONResultsReader, Message # type: ignore
8
9
  from splunklib.binding import HTTPError, ResponseReader # type: ignore
9
10
  import splunklib.client as splunklib # type: ignore
@@ -18,6 +19,16 @@ from contentctl.actions.detection_testing.progress_bar import (
18
19
  TestReportingType,
19
20
  TestingStates
20
21
  )
22
+ from contentctl.objects.errors import (
23
+ IntegrationTestingError,
24
+ ServerError,
25
+ ClientError,
26
+ ValidationFailed
27
+ )
28
+ from contentctl.objects.detection import Detection
29
+ from contentctl.objects.risk_event import RiskEvent
30
+ from contentctl.objects.notable_event import NotableEvent
31
+ from contentctl.objects.observable import Observable
21
32
 
22
33
 
23
34
  # Suppress logging by default; enable for local testing
@@ -64,21 +75,6 @@ def get_logger() -> logging.Logger:
64
75
  return logger
65
76
 
66
77
 
67
- class IntegrationTestingError(Exception):
68
- """Base exception class for integration testing"""
69
- pass
70
-
71
-
72
- class ServerError(IntegrationTestingError):
73
- """An error encounterd during integration testing, as provided by the server (Splunk instance)"""
74
- pass
75
-
76
-
77
- class ClientError(IntegrationTestingError):
78
- """An error encounterd during integration testing, on the client's side (locally)"""
79
- pass
80
-
81
-
82
78
  class SavedSearchKeys(str, Enum):
83
79
  """
84
80
  Various keys into the SavedSearch content
@@ -108,19 +104,23 @@ class TimeoutConfig(int, Enum):
108
104
  # base amount to sleep for before beginning exponential backoff during testing
109
105
  BASE_SLEEP = 60
110
106
 
111
- # max amount to wait before timing out during exponential backoff
112
- MAX_SLEEP = 210
107
+ # NOTE: Some detections take longer to generate their risk/notables than other; testing has
108
+ # shown 270s to likely be sufficient for all detections in 99% of runs; however we have
109
+ # encountered a handful of transient failures in the last few months. Since our success rate
110
+ # is at 100% now, we will round this to a flat 300s to accomodate these outliers.
111
+ # Max amount to wait before timing out during exponential backoff
112
+ MAX_SLEEP = 300
113
113
 
114
114
 
115
- # TODO (cmcginley): evaluate sane defaults for timeframe for integration testing (e.g. 5y is good
115
+ # TODO (#226): evaluate sane defaults for timeframe for integration testing (e.g. 5y is good
116
116
  # now, but maybe not always...); maybe set latest/earliest to None?
117
117
  class ScheduleConfig(str, Enum):
118
118
  """
119
119
  Configuraton values for the saved search schedule
120
120
  """
121
- EARLIEST_TIME: str = "-5y@y"
122
- LATEST_TIME: str = "-1m@m"
123
- CRON_SCHEDULE: str = "*/1 * * * *"
121
+ EARLIEST_TIME = "-5y@y"
122
+ LATEST_TIME = "-1m@m"
123
+ CRON_SCHEDULE = "*/1 * * * *"
124
124
 
125
125
 
126
126
  class ResultIterator:
@@ -154,7 +154,7 @@ class ResultIterator:
154
154
  level: int = logging.getLevelName(level_name)
155
155
 
156
156
  # log message at appropriate level and raise if needed
157
- message = f"{result.type}: {result.message}"
157
+ message = f"SPLUNK: {result.message}"
158
158
  self.logger.log(level, message)
159
159
  if level == logging.ERROR:
160
160
  raise ServerError(message)
@@ -192,15 +192,15 @@ class CorrelationSearch(BaseModel):
192
192
 
193
193
  In Enterprise Security, a correlation search is wrapper around the saved search entity. This search represents a
194
194
  detection rule for our purposes.
195
- :param detection_name: the name of the search/detection (e.g. "Windows Modify Registry EnableLinkedConnections")
195
+ :param detection: a Detection model
196
196
  :param service: a Service instance representing a connection to a Splunk instance
197
197
  :param pbar_data: the encapsulated info needed for logging w/ pbar
198
198
  :param test_index: the index attack data is forwarded to for testing (optionally used in cleanup)
199
199
  """
200
200
  ## The following three fields are explicitly needed at instantiation # noqa: E266
201
201
 
202
- # the name of the search/detection (e.g. "Windows Modify Registry EnableLinkedConnections")
203
- detection_name: str
202
+ # the detection associated with the correlation search (e.g. "Windows Modify Registry EnableLinkedConnections")
203
+ detection: Detection
204
204
 
205
205
  # a Service instance representing a connection to a Splunk instance
206
206
  service: splunklib.Service
@@ -240,25 +240,33 @@ class CorrelationSearch(BaseModel):
240
240
  # The notable adaptive response action (if defined)
241
241
  notable_action: Union[NotableAction, None] = None
242
242
 
243
+ # The list of risk events found
244
+ _risk_events: Optional[list[RiskEvent]] = PrivateAttr(default=None)
245
+
246
+ # The list of notable events found
247
+ _notable_events: Optional[list[NotableEvent]] = PrivateAttr(default=None)
248
+
243
249
  class Config:
244
250
  # needed to allow fields w/ types like SavedSearch
245
251
  arbitrary_types_allowed = True
252
+ # We want to have more ridgid typing
253
+ extra = 'forbid'
246
254
 
247
255
  @validator("name", always=True)
248
256
  @classmethod
249
257
  def _convert_detection_to_search_name(cls, v, values) -> str:
250
258
  """
251
- Validate detection name and derive if None
259
+ Validate name and derive if None
252
260
  """
253
- if "detection_name" not in values:
254
- raise ValueError("detection_name missing; name is dependent on detection_name")
261
+ if "detection" not in values:
262
+ raise ValueError("detection missing; name is dependent on detection")
255
263
 
256
- expected_name = f"ESCU - {values['detection_name']} - Rule"
264
+ expected_name = f"ESCU - {values['detection'].name} - Rule"
257
265
  if v is not None and v != expected_name:
258
266
  raise ValueError(
259
- "name must be derived from detection_name; leave as None and it will be derived automatically"
267
+ "name must be derived from detection; leave as None and it will be derived automatically"
260
268
  )
261
- return f"ESCU - {values['detection_name']} - Rule"
269
+ return expected_name
262
270
 
263
271
  @validator("splunk_path", always=True)
264
272
  @classmethod
@@ -410,7 +418,20 @@ class CorrelationSearch(BaseModel):
410
418
  return NotableAction.parse_from_dict(content)
411
419
  return None
412
420
 
413
- # TODO (cmcginley): ideally, we could handle this and the following init w/ a call to
421
+ @staticmethod
422
+ def _get_relevant_observables(observables: list[Observable]) -> list[Observable]:
423
+ """
424
+ Given a list of observables, identify the subset of those relevant for risk matching
425
+ :param observables: the Observable objects to filter
426
+ :returns: the filtered list of relevant observables
427
+ """
428
+ relevant = []
429
+ for observable in observables:
430
+ if not RiskEvent.ignore_observable(observable):
431
+ relevant.append(observable)
432
+ return relevant
433
+
434
+ # TODO (PEX-484): ideally, we could handle this and the following init w/ a call to
414
435
  # model_post_init, so that all the logic is encapsulated w/in _parse_risk_and_notable_actions
415
436
  # but that is a pydantic v2 feature (see the init validators for risk/notable actions):
416
437
  # https://docs.pydantic.dev/latest/api/base_model/#pydantic.main.BaseModel.model_post_init
@@ -418,7 +439,7 @@ class CorrelationSearch(BaseModel):
418
439
  """Parses the risk/notable metadata we care about from self.saved_search.content
419
440
 
420
441
  :raises KeyError: if self.saved_search.content does not contain a required key
421
- :raises json.JSONDecodeError: if the value at self.saved_search.content['action.risk.param._risk'] can't be
442
+ :raises json.JSONDecodeError: if the value at self.saved_search.content['action3.risk.param._risk'] can't be
422
443
  decoded from JSON into a dict
423
444
  :raises IntegrationTestingError: if the value at self.saved_search.content['action.risk.param._risk'] is
424
445
  unpacked to be anything other than a singleton
@@ -525,57 +546,231 @@ class CorrelationSearch(BaseModel):
525
546
  if refresh:
526
547
  self.refresh()
527
548
 
528
- # TODO (cmcginley): make the search for risk/notable events a more specific query based on the
529
- # search in question (and update the docstring to relfect when you do)
530
549
  def risk_event_exists(self) -> bool:
531
- """Whether a risk event exists
550
+ """Whether at least one matching risk event exists
551
+
552
+ Queries the `risk` index and returns True if at least one matching risk event exists for
553
+ this search
554
+ :return: a bool indicating whether a risk event for this search exists in the risk index
555
+ """
556
+ # We always force an update on the cache when checking if events exist
557
+ events = self.get_risk_events(force_update=True)
558
+ return len(events) > 0
559
+
560
+ def get_risk_events(self, force_update: bool = False) -> list[RiskEvent]:
561
+ """Get risk events from the Splunk instance
532
562
 
533
- Queries the `risk` index and returns True if a risk event exists
534
- :return: a bool indicating whether a risk event exists in the risk index
563
+ Queries the `risk` index and returns any matching risk events
564
+ :param force_update: whether the cached _risk_events should be forcibly updated if already
565
+ set
566
+ :return: a list of risk events
535
567
  """
536
- # construct our query and issue our search job on the risk index
537
- query = "search index=risk | head 1"
568
+ # Reset the list of risk events if we're forcing an update
569
+ if force_update:
570
+ self.logger.debug("Resetting risk event cache.")
571
+ self._risk_events = None
572
+
573
+ # Use the cached risk_events unless we're forcing an update
574
+ if self._risk_events is not None:
575
+ self.logger.debug(f"Using cached risk events ({len(self._risk_events)} total).")
576
+ return self._risk_events
577
+
578
+ # Search for all risk events from a single scheduled search (indicated by orig_sid)
579
+ query = (
580
+ f'search index=risk search_name="{self.name}" [search index=risk search '
581
+ f'search_name="{self.name}" | head 1 | fields orig_sid] | tojson'
582
+ )
538
583
  result_iterator = self._search(query)
584
+
585
+ # Iterate over the events, storing them in a list and checking for any errors
586
+ events: list[RiskEvent] = []
539
587
  try:
540
588
  for result in result_iterator:
541
- # we return True if we find at least one risk object
542
- # (e.g. users vs systems) and we may want to do more confirmational testing
589
+ # sanity check that this result from the iterator is a risk event and not some
590
+ # other metadata
543
591
  if result["index"] == Indexes.RISK_INDEX.value:
544
- self.logger.debug(
545
- f"Found risk event for '{self.name}': {result}")
546
- return True
592
+ try:
593
+ parsed_raw = json.loads(result["_raw"])
594
+ event = RiskEvent.parse_obj(parsed_raw)
595
+ except Exception:
596
+ self.logger.error(f"Failed to parse RiskEvent from search result: {result}")
597
+ raise
598
+ events.append(event)
599
+ self.logger.debug(f"Found risk event for '{self.name}': {event}")
547
600
  except ServerError as e:
548
601
  self.logger.error(f"Error returned from Splunk instance: {e}")
549
602
  raise e
550
- self.logger.debug(f"No risk event found for '{self.name}'")
551
- return False
603
+
604
+ # Log if no events were found
605
+ if len(events) < 1:
606
+ self.logger.debug(f"No risk events found for '{self.name}'")
607
+ else:
608
+ # Set the cache if we found events
609
+ self._risk_events = events
610
+ self.logger.debug(f"Caching {len(self._risk_events)} risk events.")
611
+
612
+ return events
552
613
 
553
614
  def notable_event_exists(self) -> bool:
554
615
  """Whether a notable event exists
555
616
 
556
- Queries the `notable` index and returns True if a risk event exists
557
- :return: a bool indicating whether a risk event exists in the risk index
617
+ Queries the `notable` index and returns True if a notble event exists
618
+ :return: a bool indicating whether a notable event exists in the notable index
558
619
  """
559
- # construct our query and issue our search job on the risk index
560
- query = "search index=notable | head 1"
620
+ # construct our query and issue our search job on the notsble index
621
+ # We always force an update on the cache when checking if events exist
622
+ events = self.get_notable_events(force_update=True)
623
+ return len(events) > 0
624
+
625
+ def get_notable_events(self, force_update: bool = False) -> list[NotableEvent]:
626
+ """Get notable events from the Splunk instance
627
+
628
+ Queries the `notable` index and returns any matching notable events
629
+ :param force_update: whether the cached _notable_events should be forcibly updated if
630
+ already set
631
+ :return: a list of notable events
632
+ """
633
+ # Reset the list of notable events if we're forcing an update
634
+ if force_update:
635
+ self.logger.debug("Resetting notable event cache.")
636
+ self._notable_events = None
637
+
638
+ # Use the cached notable_events unless we're forcing an update
639
+ if self._notable_events is not None:
640
+ self.logger.debug(f"Using cached notable events ({len(self._notable_events)} total).")
641
+ return self._notable_events
642
+
643
+ # Search for all notable events from a single scheduled search (indicated by orig_sid)
644
+ query = (
645
+ f'search index=notable search_name="{self.name}" [search index=notable search '
646
+ f'search_name="{self.name}" | head 1 | fields orig_sid] | tojson'
647
+ )
561
648
  result_iterator = self._search(query)
649
+
650
+ # Iterate over the events, storing them in a list and checking for any errors
651
+ events: list[NotableEvent] = []
562
652
  try:
563
653
  for result in result_iterator:
564
- # we return True if we find at least one notable object
654
+ # sanity check that this result from the iterator is a notable event and not some
655
+ # other metadata
565
656
  if result["index"] == Indexes.NOTABLE_INDEX.value:
566
- self.logger.debug(
567
- f"Found notable event for '{self.name}': {result}")
568
- return True
657
+ try:
658
+ parsed_raw = json.loads(result["_raw"])
659
+ event = NotableEvent.parse_obj(parsed_raw)
660
+ except Exception:
661
+ self.logger.error(f"Failed to parse NotableEvent from search result: {result}")
662
+ raise
663
+ events.append(event)
664
+ self.logger.debug(f"Found notable event for '{self.name}': {event}")
569
665
  except ServerError as e:
570
666
  self.logger.error(f"Error returned from Splunk instance: {e}")
571
667
  raise e
572
- self.logger.debug(f"No notable event found for '{self.name}'")
573
- return False
574
668
 
575
- def risk_message_is_valid(self) -> bool:
576
- """Validates the observed risk message against the expected risk message"""
577
- # TODO
578
- raise NotImplementedError
669
+ # Log if no events were found
670
+ if len(events) < 1:
671
+ self.logger.debug(f"No notable events found for '{self.name}'")
672
+ else:
673
+ # Set the cache if we found events
674
+ self._notable_events = events
675
+ self.logger.debug(f"Caching {len(self._notable_events)} notable events.")
676
+
677
+ return events
678
+
679
+ def validate_risk_events(self) -> None:
680
+ """Validates the existence of any expected risk events
681
+
682
+ First ensure the risk event exists, and if it does validate its risk message and make sure
683
+ any events align with the specified observables. Also adds the risk index to the purge list
684
+ if risk events existed
685
+ :param elapsed_sleep_time: an int representing the amount of time slept thus far waiting to
686
+ check the risks/notables
687
+ :returns: an IntegrationTestResult on failure; None on success
688
+ """
689
+ # TODO (PEX-433): Re-enable this check once we have refined the logic and reduced the false
690
+ # positive rate in risk/obseravble matching
691
+ # Create a mapping of the relevant observables to counters
692
+ # observables = CorrelationSearch._get_relevant_observables(self.detection.tags.observable)
693
+ # observable_counts: dict[str, int] = {str(x): 0 for x in observables}
694
+ # if len(observables) != len(observable_counts):
695
+ # raise ClientError(
696
+ # f"At least two observables in '{self.detection.name}' have the same name."
697
+ # )
698
+
699
+ # Get the risk events; note that we use the cached risk events, expecting they were
700
+ # saved by a prior call to risk_event_exists
701
+ events = self.get_risk_events()
702
+
703
+ # Validate each risk event individually and record some aggregate counts
704
+ c = 0
705
+ for event in events:
706
+ c += 1
707
+ self.logger.debug(
708
+ f"Validating risk event ({event.risk_object}, {event.risk_object_type}): "
709
+ f"{c}/{len(events)}"
710
+ )
711
+ event.validate_against_detection(self.detection)
712
+
713
+ # TODO (PEX-433): Re-enable this check once we have refined the logic and reduced the
714
+ # false positive rate in risk/obseravble matching
715
+ # Update observable count based on match
716
+ # matched_observable = event.get_matched_observable(self.detection.tags.observable)
717
+ # self.logger.debug(
718
+ # f"Matched risk event ({event.risk_object}, {event.risk_object_type}) to observable "
719
+ # f"({matched_observable.name}, {matched_observable.type}, {matched_observable.role})"
720
+ # )
721
+ # observable_counts[str(matched_observable)] += 1
722
+
723
+ # TODO (PEX-433): test my new contentctl logic against an old ESCU build; my logic should
724
+ # detect the faulty attacker events -> this was the issue from the 4.28/4.27 release;
725
+ # recreate by testing against one of those old builds w/ the bad config
726
+ # TODO (PEX-433): Re-enable this check once we have refined the logic and reduced the false
727
+ # positive
728
+ # rate in risk/obseravble matching
729
+ # TODO (PEX-433): I foresee issues here if for example a parent and child process share a
730
+ # name (matched observable could be either) -> these issues are confirmed to exist, e.g.
731
+ # `Windows Steal Authentication Certificates Export Certificate`
732
+ # Validate risk events in aggregate; we should have an equal amount of risk events for each
733
+ # relevant observable, and the total count should match the total number of events
734
+ # individual_count: Optional[int] = None
735
+ # total_count = 0
736
+ # for observable_str in observable_counts:
737
+ # self.logger.debug(
738
+ # f"Observable <{observable_str}> match count: {observable_counts[observable_str]}"
739
+ # )
740
+
741
+ # # Grab the first value encountered if not set yet
742
+ # if individual_count is None:
743
+ # individual_count = observable_counts[observable_str]
744
+ # else:
745
+ # # Confirm that the count for the current observable matches the count of the others
746
+ # if observable_counts[observable_str] != individual_count:
747
+ # raise ValidationFailed(
748
+ # f"Count of risk events matching observable <\"{observable_str}\"> "
749
+ # f"({observable_counts[observable_str]}) does not match the count of those "
750
+ # f"matching other observables ({individual_count})."
751
+ # )
752
+
753
+ # # Aggregate total count of events matched to observables
754
+ # total_count += observable_counts[observable_str]
755
+
756
+ # # Raise if the the number of events doesn't match the number of those matched to observables
757
+ # if len(events) != total_count:
758
+ # raise ValidationFailed(
759
+ # f"The total number of risk events {len(events)} does not match the number of "
760
+ # f"risk events we were able to match against observables ({total_count})."
761
+ # )
762
+
763
+ # TODO (PEX-434): implement deeper notable validation
764
+ def validate_notable_events(self) -> None:
765
+ """Validates the existence of any expected notables
766
+
767
+ Ensures the notable exists. Also adds the notable index to the purge list if notables
768
+ existed
769
+ :param elapsed_sleep_time: an int representing the amount of time slept thus far waiting to
770
+ check the risks/notables
771
+ :returns: an IntegrationTestResult on failure; None on success
772
+ """
773
+ raise NotImplementedError()
579
774
 
580
775
  # NOTE: it would be more ideal to switch this to a system which gets the handle of the saved search job and polls
581
776
  # it for completion, but that seems more tricky
@@ -617,12 +812,12 @@ class CorrelationSearch(BaseModel):
617
812
  self.update_pbar(TestingStates.PRE_CLEANUP)
618
813
  if self.risk_event_exists():
619
814
  self.logger.warn(
620
- f"Risk events matching '{self.name}' already exist; marking for deletion")
621
- self.indexes_to_purge.add(Indexes.RISK_INDEX.value)
815
+ f"Risk events matching '{self.name}' already exist; marking for deletion"
816
+ )
622
817
  if self.notable_event_exists():
623
818
  self.logger.warn(
624
- f"Notable events matching '{self.name}' already exist; marking for deletion")
625
- self.indexes_to_purge.add(Indexes.NOTABLE_INDEX.value)
819
+ f"Notable events matching '{self.name}' already exist; marking for deletion"
820
+ )
626
821
  self.cleanup()
627
822
 
628
823
  # skip test if no risk or notable action defined
@@ -641,7 +836,6 @@ class CorrelationSearch(BaseModel):
641
836
  self.logger.info(f"Forcing a run on {self.name}")
642
837
  self.update_pbar(TestingStates.FORCE_RUN)
643
838
  self.force_run()
644
- time.sleep(TimeoutConfig.BASE_SLEEP.value)
645
839
 
646
840
  # loop so long as the elapsed time is less than max_sleep
647
841
  while elapsed_sleep_time < max_sleep:
@@ -659,34 +853,49 @@ class CorrelationSearch(BaseModel):
659
853
  # reset the result to None on each loop iteration
660
854
  result = None
661
855
 
662
- # TODO (cmcginley): add more granular error messaging that can show success in
663
- # finding a notable, but failure in finding a risk and vice-versa
664
- # check for risk events
665
- self.logger.debug("Checking for matching risk events")
666
- if self.has_risk_analysis_action:
667
- if not self.risk_event_exists():
668
- result = IntegrationTestResult(
669
- status=TestResultStatus.FAIL,
670
- message=f"TEST FAILED: No matching risk event created for: {self.name}",
671
- wait_duration=elapsed_sleep_time,
672
- )
673
- else:
674
- self.indexes_to_purge.add(Indexes.RISK_INDEX.value)
675
-
676
- # check for notable events
677
- self.logger.debug("Checking for matching notable events")
678
- if self.has_notable_action:
679
- if not self.notable_event_exists():
856
+ try:
857
+ # Validate risk events
858
+ self.logger.debug("Checking for matching risk events")
859
+ if self.has_risk_analysis_action:
860
+ if self.risk_event_exists():
861
+ # TODO (PEX-435): should this in the retry loop? or outside it?
862
+ # -> I've observed there being a missing risk event (15/16) on
863
+ # the first few tries, so this does help us check for true
864
+ # positives; BUT, if we have lots of failing detections, this
865
+ # will definitely add to the total wait time
866
+ # -> certain types of failures (e.g. risk message, or any value
867
+ # checking) should fail testing automatically
868
+ # -> other types, like those based on counts of risk events,
869
+ # should happen should fail more slowly as more events may be
870
+ # produced
871
+ self.validate_risk_events()
872
+ else:
873
+ raise ValidationFailed(
874
+ f"TEST FAILED: No matching risk event created for: {self.name}"
875
+ )
876
+
877
+ # Validate notable events
878
+ self.logger.debug("Checking for matching notable events")
879
+ if self.has_notable_action:
680
880
  # NOTE: because we check this last, if both fail, the error message about notables will
681
- # always be the last to be added and thus the one surfaced to the user; good case for
682
- # adding more descriptive test results
683
- result = IntegrationTestResult(
684
- status=TestResultStatus.FAIL,
685
- message=f"TEST FAILED: No matching notable event created for: {self.name}",
686
- wait_duration=elapsed_sleep_time,
687
- )
688
- else:
689
- self.indexes_to_purge.add(Indexes.NOTABLE_INDEX.value)
881
+ # always be the last to be added and thus the one surfaced to the user
882
+ if self.notable_event_exists():
883
+ # TODO (PEX-435): should this in the retry loop? or outside it?
884
+ # TODO (PEX-434): implement deeper notable validation (the method
885
+ # commented out below is unimplemented)
886
+ # self.validate_notable_events(elapsed_sleep_time)
887
+ pass
888
+ else:
889
+ raise ValidationFailed(
890
+ f"TEST FAILED: No matching notable event created for: {self.name}"
891
+ )
892
+ except ValidationFailed as e:
893
+ self.logger.error(f"Risk/notable validation failed: {e}")
894
+ result = IntegrationTestResult(
895
+ status=TestResultStatus.FAIL,
896
+ message=f"TEST FAILED: {e}",
897
+ wait_duration=elapsed_sleep_time,
898
+ )
690
899
 
691
900
  # if result is still None, then all checks passed and we can break the loop
692
901
  if result is None:
@@ -707,6 +916,7 @@ class CorrelationSearch(BaseModel):
707
916
  if (elapsed_sleep_time + time_to_sleep) > max_sleep:
708
917
  time_to_sleep = max_sleep - elapsed_sleep_time
709
918
 
919
+ # TODO (PEX-436): should cleanup be in a finally block so it runs even on exception?
710
920
  # cleanup the created events, disable the detection and return the result
711
921
  self.logger.debug("Cleaning up any created risk/notable events...")
712
922
  self.update_pbar(TestingStates.POST_CLEANUP)
@@ -719,9 +929,13 @@ class CorrelationSearch(BaseModel):
719
929
  wait_duration=elapsed_sleep_time,
720
930
  exception=e,
721
931
  )
722
- self.logger.exception(f"{result.status.name}: {result.message}") # type: ignore
932
+ self.logger.exception(result.message) # type: ignore
723
933
  else:
724
934
  raise e
935
+ except Exception as e:
936
+ # Log any exceptions locally and raise to the caller
937
+ self.logger.exception(f"Unhandled exception during testing: {e}")
938
+ raise e
725
939
 
726
940
  # log based on result status
727
941
  if result is not None:
@@ -769,8 +983,8 @@ class CorrelationSearch(BaseModel):
769
983
  :param index: index to delete all events from (e.g. 'risk')
770
984
  """
771
985
  # construct our query and issue our delete job on the index
772
- self.logger.debug(f"Deleting index '{index}")
773
- query = f"search index={index} | delete"
986
+ self.logger.debug(f"Deleting index '{index}'")
987
+ query = f'search index={index} search_name="{self.name}" | delete'
774
988
  result_iterator = self._search(query)
775
989
 
776
990
  # we should get two results, one for "__ALL__" and one for the index; iterate until we find the one for the
@@ -809,13 +1023,23 @@ class CorrelationSearch(BaseModel):
809
1023
  # disable the detection
810
1024
  self.disable()
811
1025
 
812
- # delete the indexes
1026
+ # Add indexes to purge
813
1027
  if delete_test_index:
814
1028
  self.indexes_to_purge.add(self.test_index) # type: ignore
1029
+ if self._risk_events is not None:
1030
+ self.indexes_to_purge.add(Indexes.RISK_INDEX.value)
1031
+ if self._notable_events is not None:
1032
+ self.indexes_to_purge.add(Indexes.NOTABLE_INDEX.value)
1033
+
1034
+ # delete the indexes
815
1035
  for index in self.indexes_to_purge:
816
1036
  self._delete_index(index)
817
1037
  self.indexes_to_purge.clear()
818
1038
 
1039
+ # reset caches
1040
+ self._risk_events = None
1041
+ self._notable_events = None
1042
+
819
1043
  def update_pbar(self, state: str) -> str:
820
1044
  """
821
1045
  Instance specific function to log integrtation testing information via pbar
@@ -1,15 +1,20 @@
1
1
  from __future__ import annotations
2
2
  from typing import Optional, Any
3
- from pydantic import Field, FilePath, model_serializer
3
+ from pydantic import Field, HttpUrl, model_serializer, BaseModel
4
4
  from contentctl.objects.security_content_object import SecurityContentObject
5
5
  from contentctl.objects.event_source import EventSource
6
6
 
7
+
8
+ class TA(BaseModel):
9
+ name: str
10
+ url: HttpUrl | None = None
11
+ version: str
7
12
  class DataSource(SecurityContentObject):
8
13
  source: str = Field(...)
9
14
  sourcetype: str = Field(...)
10
15
  separator: Optional[str] = None
11
16
  configuration: Optional[str] = None
12
- supported_TA: Optional[list] = None
17
+ supported_TA: list[TA] = []
13
18
  fields: Optional[list] = None
14
19
  field_mappings: Optional[list] = None
15
20
  convert_to_log_source: Optional[list] = None