contentctl 4.4.7__py3-none-any.whl → 5.0.0a2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (70) hide show
  1. contentctl/actions/build.py +39 -27
  2. contentctl/actions/detection_testing/DetectionTestingManager.py +0 -1
  3. contentctl/actions/detection_testing/GitService.py +132 -72
  4. contentctl/actions/detection_testing/infrastructures/DetectionTestingInfrastructure.py +32 -26
  5. contentctl/actions/detection_testing/progress_bar.py +6 -6
  6. contentctl/actions/detection_testing/views/DetectionTestingView.py +4 -4
  7. contentctl/actions/new_content.py +98 -81
  8. contentctl/actions/test.py +4 -5
  9. contentctl/actions/validate.py +2 -1
  10. contentctl/contentctl.py +114 -80
  11. contentctl/helper/utils.py +0 -14
  12. contentctl/input/director.py +5 -5
  13. contentctl/input/new_content_questions.py +2 -2
  14. contentctl/input/yml_reader.py +11 -6
  15. contentctl/objects/abstract_security_content_objects/detection_abstract.py +228 -120
  16. contentctl/objects/abstract_security_content_objects/security_content_object_abstract.py +5 -7
  17. contentctl/objects/alert_action.py +2 -1
  18. contentctl/objects/atomic.py +1 -0
  19. contentctl/objects/base_test.py +4 -3
  20. contentctl/objects/base_test_result.py +3 -3
  21. contentctl/objects/baseline.py +26 -6
  22. contentctl/objects/baseline_tags.py +2 -3
  23. contentctl/objects/config.py +789 -596
  24. contentctl/objects/constants.py +4 -1
  25. contentctl/objects/correlation_search.py +89 -95
  26. contentctl/objects/data_source.py +5 -6
  27. contentctl/objects/deployment.py +2 -10
  28. contentctl/objects/deployment_email.py +2 -1
  29. contentctl/objects/deployment_notable.py +2 -1
  30. contentctl/objects/deployment_phantom.py +2 -1
  31. contentctl/objects/deployment_rba.py +2 -1
  32. contentctl/objects/deployment_scheduling.py +2 -1
  33. contentctl/objects/deployment_slack.py +2 -1
  34. contentctl/objects/detection_tags.py +7 -42
  35. contentctl/objects/drilldown.py +1 -0
  36. contentctl/objects/enums.py +21 -58
  37. contentctl/objects/investigation.py +6 -5
  38. contentctl/objects/investigation_tags.py +2 -3
  39. contentctl/objects/lookup.py +145 -63
  40. contentctl/objects/macro.py +2 -3
  41. contentctl/objects/mitre_attack_enrichment.py +2 -2
  42. contentctl/objects/observable.py +3 -1
  43. contentctl/objects/playbook_tags.py +5 -1
  44. contentctl/objects/rba.py +90 -0
  45. contentctl/objects/risk_event.py +87 -144
  46. contentctl/objects/story_tags.py +1 -2
  47. contentctl/objects/test_attack_data.py +2 -1
  48. contentctl/objects/unit_test_baseline.py +2 -1
  49. contentctl/output/api_json_output.py +233 -220
  50. contentctl/output/conf_output.py +51 -44
  51. contentctl/output/conf_writer.py +201 -125
  52. contentctl/output/data_source_writer.py +0 -1
  53. contentctl/output/json_writer.py +2 -4
  54. contentctl/output/svg_output.py +1 -1
  55. contentctl/output/templates/analyticstories_detections.j2 +1 -1
  56. contentctl/output/templates/collections.j2 +1 -1
  57. contentctl/output/templates/doc_detections.j2 +0 -5
  58. contentctl/output/templates/savedsearches_detections.j2 +8 -3
  59. contentctl/output/templates/transforms.j2 +4 -4
  60. contentctl/output/yml_writer.py +15 -0
  61. contentctl/templates/detections/endpoint/anomalous_usage_of_7zip.yml +16 -34
  62. {contentctl-4.4.7.dist-info → contentctl-5.0.0a2.dist-info}/METADATA +5 -4
  63. {contentctl-4.4.7.dist-info → contentctl-5.0.0a2.dist-info}/RECORD +66 -69
  64. {contentctl-4.4.7.dist-info → contentctl-5.0.0a2.dist-info}/WHEEL +1 -1
  65. contentctl/objects/event_source.py +0 -11
  66. contentctl/output/detection_writer.py +0 -28
  67. contentctl/output/new_content_yml_output.py +0 -56
  68. contentctl/output/yml_output.py +0 -66
  69. {contentctl-4.4.7.dist-info → contentctl-5.0.0a2.dist-info}/LICENSE.md +0 -0
  70. {contentctl-4.4.7.dist-info → contentctl-5.0.0a2.dist-info}/entry_points.txt +0 -0
@@ -79,6 +79,7 @@ SES_KILL_CHAIN_MAPPINGS = {
79
79
  "Actions on Objectives": 7
80
80
  }
81
81
 
82
+ # TODO (cmcginley): @ljstella should this be removed? also referenced in new_content.py
82
83
  SES_OBSERVABLE_ROLE_MAPPING = {
83
84
  "Other": -1,
84
85
  "Unknown": 0,
@@ -93,6 +94,7 @@ SES_OBSERVABLE_ROLE_MAPPING = {
93
94
  "Observer": 9
94
95
  }
95
96
 
97
+ # TODO (cmcginley): @ljstella should this be removed? also referenced in new_content.py
96
98
  SES_OBSERVABLE_TYPE_MAPPING = {
97
99
  "Unknown": 0,
98
100
  "Hostname": 1,
@@ -135,6 +137,7 @@ SES_ATTACK_TACTICS_ID_MAPPING = {
135
137
  "Impact": "TA0040"
136
138
  }
137
139
 
140
+ # TODO (cmcginley): is this just for the transition testing?
138
141
  RBA_OBSERVABLE_ROLE_MAPPING = {
139
142
  "Attacker": 0,
140
143
  "Victim": 1
@@ -149,7 +152,7 @@ DOWNLOADS_DIRECTORY = "downloads"
149
152
  # errors, if its name is longer than 99 characters.
150
153
  # When an saved search is cloned in Enterprise Security User Interface,
151
154
  # it is wrapped in the following:
152
- # {Detection.tags.security_domain.value} - {SEARCH_STANZA_NAME} - Rule
155
+ # {Detection.tags.security_domain} - {SEARCH_STANZA_NAME} - Rule
153
156
  # Similarly, when we generate the search stanza name in contentctl, it
154
157
  # is app.label - detection.name - Rule
155
158
  # However, in product the search name is:
@@ -2,7 +2,7 @@ import logging
2
2
  import time
3
3
  import json
4
4
  from typing import Any
5
- from enum import Enum
5
+ from enum import StrEnum, IntEnum
6
6
  from functools import cached_property
7
7
 
8
8
  from pydantic import ConfigDict, BaseModel, computed_field, Field, PrivateAttr
@@ -29,7 +29,6 @@ from contentctl.objects.errors import (
29
29
  from contentctl.objects.detection import Detection
30
30
  from contentctl.objects.risk_event import RiskEvent
31
31
  from contentctl.objects.notable_event import NotableEvent
32
- from contentctl.objects.observable import Observable
33
32
 
34
33
 
35
34
  # Suppress logging by default; enable for local testing
@@ -76,7 +75,7 @@ def get_logger() -> logging.Logger:
76
75
  return logger
77
76
 
78
77
 
79
- class SavedSearchKeys(str, Enum):
78
+ class SavedSearchKeys(StrEnum):
80
79
  """
81
80
  Various keys into the SavedSearch content
82
81
  """
@@ -89,7 +88,7 @@ class SavedSearchKeys(str, Enum):
89
88
  DISBALED_KEY = "disabled"
90
89
 
91
90
 
92
- class Indexes(str, Enum):
91
+ class Indexes(StrEnum):
93
92
  """
94
93
  Indexes we search against
95
94
  """
@@ -98,7 +97,7 @@ class Indexes(str, Enum):
98
97
  NOTABLE_INDEX = "notable"
99
98
 
100
99
 
101
- class TimeoutConfig(int, Enum):
100
+ class TimeoutConfig(IntEnum):
102
101
  """
103
102
  Configuration values for the exponential backoff timer
104
103
  """
@@ -115,7 +114,7 @@ class TimeoutConfig(int, Enum):
115
114
 
116
115
  # TODO (#226): evaluate sane defaults for timeframe for integration testing (e.g. 5y is good
117
116
  # now, but maybe not always...); maybe set latest/earliest to None?
118
- class ScheduleConfig(str, Enum):
117
+ class ScheduleConfig(StrEnum):
119
118
  """
120
119
  Configuraton values for the saved search schedule
121
120
  """
@@ -145,24 +144,24 @@ class ResultIterator:
145
144
  def __iter__(self) -> "ResultIterator":
146
145
  return self
147
146
 
148
- def __next__(self) -> dict:
147
+ def __next__(self) -> dict[Any, Any]:
149
148
  # Use a reader for JSON format so we can iterate over our results
150
149
  for result in self.results_reader:
151
150
  # log messages, or raise if error
152
151
  if isinstance(result, Message):
153
152
  # convert level string to level int
154
- level_name = result.type.strip().upper()
153
+ level_name = result.type.strip().upper() # type: ignore
155
154
  level: int = logging.getLevelName(level_name)
156
155
 
157
156
  # log message at appropriate level and raise if needed
158
- message = f"SPLUNK: {result.message}"
157
+ message = f"SPLUNK: {result.message}" # type: ignore
159
158
  self.logger.log(level, message)
160
159
  if level == logging.ERROR:
161
160
  raise ServerError(message)
162
161
 
163
162
  # if dict, just return
164
163
  elif isinstance(result, dict):
165
- return result
164
+ return result # type: ignore
166
165
 
167
166
  # raise for any unexpected types
168
167
  else:
@@ -310,9 +309,11 @@ class CorrelationSearch(BaseModel):
310
309
  The earliest time configured for the saved search
311
310
  """
312
311
  if self.saved_search is not None:
313
- return self.saved_search.content[SavedSearchKeys.EARLIEST_TIME_KEY.value]
312
+ return self.saved_search.content[SavedSearchKeys.EARLIEST_TIME_KEY] # type: ignore
314
313
  else:
315
- raise ClientError("Something unexpected went wrong in initialization; saved_search was not populated")
314
+ raise ClientError(
315
+ "Something unexpected went wrong in initialization; saved_search was not populated"
316
+ )
316
317
 
317
318
  @property
318
319
  def latest_time(self) -> str:
@@ -320,9 +321,11 @@ class CorrelationSearch(BaseModel):
320
321
  The latest time configured for the saved search
321
322
  """
322
323
  if self.saved_search is not None:
323
- return self.saved_search.content[SavedSearchKeys.LATEST_TIME_KEY.value]
324
+ return self.saved_search.content[SavedSearchKeys.LATEST_TIME_KEY] # type: ignore
324
325
  else:
325
- raise ClientError("Something unexpected went wrong in initialization; saved_search was not populated")
326
+ raise ClientError(
327
+ "Something unexpected went wrong in initialization; saved_search was not populated"
328
+ )
326
329
 
327
330
  @property
328
331
  def cron_schedule(self) -> str:
@@ -330,9 +333,11 @@ class CorrelationSearch(BaseModel):
330
333
  The cron schedule configured for the saved search
331
334
  """
332
335
  if self.saved_search is not None:
333
- return self.saved_search.content[SavedSearchKeys.CRON_SCHEDULE_KEY.value]
336
+ return self.saved_search.content[SavedSearchKeys.CRON_SCHEDULE_KEY] # type: ignore
334
337
  else:
335
- raise ClientError("Something unexpected went wrong in initialization; saved_search was not populated")
338
+ raise ClientError(
339
+ "Something unexpected went wrong in initialization; saved_search was not populated"
340
+ )
336
341
 
337
342
  @property
338
343
  def enabled(self) -> bool:
@@ -340,12 +345,14 @@ class CorrelationSearch(BaseModel):
340
345
  Whether the saved search is enabled
341
346
  """
342
347
  if self.saved_search is not None:
343
- if int(self.saved_search.content[SavedSearchKeys.DISBALED_KEY.value]):
348
+ if int(self.saved_search.content[SavedSearchKeys.DISBALED_KEY]): # type: ignore
344
349
  return False
345
350
  else:
346
351
  return True
347
352
  else:
348
- raise ClientError("Something unexpected went wrong in initialization; saved_search was not populated")
353
+ raise ClientError(
354
+ "Something unexpected went wrong in initialization; saved_search was not populated"
355
+ )
349
356
 
350
357
  @ property
351
358
  def has_risk_analysis_action(self) -> bool:
@@ -368,7 +375,7 @@ class CorrelationSearch(BaseModel):
368
375
  :param content: a dict of strings to values
369
376
  :returns: a RiskAnalysisAction, or None if none exists
370
377
  """
371
- if int(content[SavedSearchKeys.RISK_ACTION_KEY.value]):
378
+ if int(content[SavedSearchKeys.RISK_ACTION_KEY]):
372
379
  try:
373
380
  return RiskAnalysisAction.parse_from_dict(content)
374
381
  except ValueError as e:
@@ -383,23 +390,10 @@ class CorrelationSearch(BaseModel):
383
390
  :returns: a NotableAction, or None if none exists
384
391
  """
385
392
  # grab notable details if present
386
- if int(content[SavedSearchKeys.NOTABLE_ACTION_KEY.value]):
393
+ if int(content[SavedSearchKeys.NOTABLE_ACTION_KEY]):
387
394
  return NotableAction.parse_from_dict(content)
388
395
  return None
389
396
 
390
- @staticmethod
391
- def _get_relevant_observables(observables: list[Observable]) -> list[Observable]:
392
- """
393
- Given a list of observables, identify the subset of those relevant for risk matching
394
- :param observables: the Observable objects to filter
395
- :returns: the filtered list of relevant observables
396
- """
397
- relevant = []
398
- for observable in observables:
399
- if not RiskEvent.ignore_observable(observable):
400
- relevant.append(observable)
401
- return relevant
402
-
403
397
  def _parse_risk_and_notable_actions(self) -> None:
404
398
  """Parses the risk/notable metadata we care about from self.saved_search.content
405
399
 
@@ -463,9 +457,9 @@ class CorrelationSearch(BaseModel):
463
457
 
464
458
  def update_timeframe(
465
459
  self,
466
- earliest_time: str = ScheduleConfig.EARLIEST_TIME.value,
467
- latest_time: str = ScheduleConfig.LATEST_TIME.value,
468
- cron_schedule: str = ScheduleConfig.CRON_SCHEDULE.value,
460
+ earliest_time: str = ScheduleConfig.EARLIEST_TIME,
461
+ latest_time: str = ScheduleConfig.LATEST_TIME,
462
+ cron_schedule: str = ScheduleConfig.CRON_SCHEDULE,
469
463
  refresh: bool = True
470
464
  ) -> None:
471
465
  """Updates the correlation search timeframe to work with test data
@@ -481,9 +475,9 @@ class CorrelationSearch(BaseModel):
481
475
  """
482
476
  # update the SavedSearch accordingly
483
477
  data = {
484
- SavedSearchKeys.EARLIEST_TIME_KEY.value: earliest_time,
485
- SavedSearchKeys.LATEST_TIME_KEY.value: latest_time,
486
- SavedSearchKeys.CRON_SCHEDULE_KEY.value: cron_schedule
478
+ SavedSearchKeys.EARLIEST_TIME_KEY: earliest_time,
479
+ SavedSearchKeys.LATEST_TIME_KEY: latest_time,
480
+ SavedSearchKeys.CRON_SCHEDULE_KEY: cron_schedule
487
481
  }
488
482
  self.logger.info(data)
489
483
  self.logger.info(f"Updating timeframe for '{self.name}': {data}")
@@ -495,7 +489,7 @@ class CorrelationSearch(BaseModel):
495
489
  if refresh:
496
490
  self.refresh()
497
491
 
498
- def force_run(self, refresh=True) -> None:
492
+ def force_run(self, refresh: bool = True) -> None:
499
493
  """Forces a detection run
500
494
 
501
495
  Enables the detection, adjusts the cron schedule to run every 1 minute, and widens the earliest/latest window
@@ -506,7 +500,7 @@ class CorrelationSearch(BaseModel):
506
500
  if not self.enabled:
507
501
  self.enable(refresh=False)
508
502
  else:
509
- self.logger.warn(f"Detection '{self.name}' was already enabled")
503
+ self.logger.warning(f"Detection '{self.name}' was already enabled")
510
504
 
511
505
  if refresh:
512
506
  self.refresh()
@@ -554,10 +548,10 @@ class CorrelationSearch(BaseModel):
554
548
  for result in result_iterator:
555
549
  # sanity check that this result from the iterator is a risk event and not some
556
550
  # other metadata
557
- if result["index"] == Indexes.RISK_INDEX.value:
551
+ if result["index"] == Indexes.RISK_INDEX:
558
552
  try:
559
553
  parsed_raw = json.loads(result["_raw"])
560
- event = RiskEvent.parse_obj(parsed_raw)
554
+ event = RiskEvent.model_validate(parsed_raw)
561
555
  except Exception:
562
556
  self.logger.error(f"Failed to parse RiskEvent from search result: {result}")
563
557
  raise
@@ -619,10 +613,10 @@ class CorrelationSearch(BaseModel):
619
613
  for result in result_iterator:
620
614
  # sanity check that this result from the iterator is a notable event and not some
621
615
  # other metadata
622
- if result["index"] == Indexes.NOTABLE_INDEX.value:
616
+ if result["index"] == Indexes.NOTABLE_INDEX:
623
617
  try:
624
618
  parsed_raw = json.loads(result["_raw"])
625
- event = NotableEvent.parse_obj(parsed_raw)
619
+ event = NotableEvent.model_validate(parsed_raw)
626
620
  except Exception:
627
621
  self.logger.error(f"Failed to parse NotableEvent from search result: {result}")
628
622
  raise
@@ -646,24 +640,21 @@ class CorrelationSearch(BaseModel):
646
640
  """Validates the existence of any expected risk events
647
641
 
648
642
  First ensure the risk event exists, and if it does validate its risk message and make sure
649
- any events align with the specified observables. Also adds the risk index to the purge list
643
+ any events align with the specified risk object. Also adds the risk index to the purge list
650
644
  if risk events existed
651
645
  :param elapsed_sleep_time: an int representing the amount of time slept thus far waiting to
652
646
  check the risks/notables
653
647
  :returns: an IntegrationTestResult on failure; None on success
654
648
  """
655
- # Create a mapping of the relevant observables to counters
656
- observables = CorrelationSearch._get_relevant_observables(self.detection.tags.observable)
657
- observable_counts: dict[str, int] = {str(x): 0 for x in observables}
658
-
659
- # NOTE: we intentionally want this to be an error state and not a failure state, as
660
- # ultimately this validation should be handled during the build process
661
- if len(observables) != len(observable_counts):
662
- raise ClientError(
663
- f"At least two observables in '{self.detection.name}' have the same name; "
664
- "each observable for a detection should be unique."
649
+ # Ensure the rba object is defined
650
+ if self.detection.rba is None:
651
+ raise ValidationFailed(
652
+ f"Unexpected error: Detection '{self.detection.name}' has no RBA objects associated"
653
+ " with it; cannot validate."
665
654
  )
666
655
 
656
+ risk_object_counts: dict[int, int] = {id(x): 0 for x in self.detection.rba.risk_objects}
657
+
667
658
  # Get the risk events; note that we use the cached risk events, expecting they were
668
659
  # saved by a prior call to risk_event_exists
669
660
  events = self.get_risk_events()
@@ -673,63 +664,66 @@ class CorrelationSearch(BaseModel):
673
664
  for event in events:
674
665
  c += 1
675
666
  self.logger.debug(
676
- f"Validating risk event ({event.risk_object}, {event.risk_object_type}): "
667
+ f"Validating risk event ({event.es_risk_object}, {event.es_risk_object_type}): "
677
668
  f"{c}/{len(events)}"
678
669
  )
679
670
  event.validate_against_detection(self.detection)
680
671
 
681
- # Update observable count based on match
682
- matched_observable = event.get_matched_observable(self.detection.tags.observable)
672
+ # Update risk object count based on match
673
+ matched_risk_object = event.get_matched_risk_object(self.detection.rba.risk_objects)
683
674
  self.logger.debug(
684
- f"Matched risk event (object={event.risk_object}, type={event.risk_object_type}) "
685
- f"to observable (name={matched_observable.name}, type={matched_observable.type}, "
686
- f"role={matched_observable.role}) using the source field "
675
+ f"Matched risk event (object={event.es_risk_object}, type={event.es_risk_object_type}) "
676
+ f"to detection's risk object (name={matched_risk_object.field}, "
677
+ f"type={matched_risk_object.type.value}) using the source field "
687
678
  f"'{event.source_field_name}'"
688
679
  )
689
- observable_counts[str(matched_observable)] += 1
680
+ risk_object_counts[id(matched_risk_object)] += 1
690
681
 
691
- # Report any observables which did not have at least one match to a risk event
692
- for observable in observables:
682
+ # Report any risk objects which did not have at least one match to a risk event
683
+ for risk_object in self.detection.rba.risk_objects:
693
684
  self.logger.debug(
694
- f"Matched observable (name={observable.name}, type={observable.type}, "
695
- f"role={observable.role}) to {observable_counts[str(observable)]} risk events."
685
+ f"Matched risk object (name={risk_object.field}, type={risk_object.type.value} "
686
+ f"to {risk_object_counts[id(risk_object)]} risk events."
696
687
  )
697
- if observable_counts[str(observable)] == 0:
688
+ if risk_object_counts[id(risk_object)] == 0:
698
689
  raise ValidationFailed(
699
- f"Observable (name={observable.name}, type={observable.type}, "
700
- f"role={observable.role}) was not matched to any risk events."
690
+ f"Risk object (name={risk_object.field}, type={risk_object.type.value}) "
691
+ "was not matched to any risk events."
701
692
  )
702
693
 
703
694
  # TODO (#250): Re-enable and refactor code that validates the specific risk counts
704
695
  # Validate risk events in aggregate; we should have an equal amount of risk events for each
705
- # relevant observable, and the total count should match the total number of events
696
+ # relevant risk object, and the total count should match the total number of events
706
697
  # individual_count: int | None = None
707
698
  # total_count = 0
708
- # for observable_str in observable_counts:
699
+ # for risk_object_id in risk_object_counts:
709
700
  # self.logger.debug(
710
- # f"Observable <{observable_str}> match count: {observable_counts[observable_str]}"
701
+ # f"Risk object <{risk_object_id}> match count: {risk_object_counts[risk_object_id]}"
711
702
  # )
712
703
 
713
704
  # # Grab the first value encountered if not set yet
714
705
  # if individual_count is None:
715
- # individual_count = observable_counts[observable_str]
706
+ # individual_count = risk_object_counts[risk_object_id]
716
707
  # else:
717
- # # Confirm that the count for the current observable matches the count of the others
718
- # if observable_counts[observable_str] != individual_count:
708
+ # # Confirm that the count for the current risk object matches the count of the
709
+ # # others
710
+ # if risk_object_counts[risk_object_id] != individual_count:
719
711
  # raise ValidationFailed(
720
- # f"Count of risk events matching observable <\"{observable_str}\"> "
721
- # f"({observable_counts[observable_str]}) does not match the count of those "
722
- # f"matching other observables ({individual_count})."
712
+ # f"Count of risk events matching detection's risk object <\"{risk_object_id}\"> "
713
+ # f"({risk_object_counts[risk_object_id]}) does not match the count of those "
714
+ # f"matching other risk objects ({individual_count})."
723
715
  # )
724
716
 
725
- # # Aggregate total count of events matched to observables
726
- # total_count += observable_counts[observable_str]
717
+ # # Aggregate total count of events matched to risk objects
718
+ # total_count += risk_object_counts[risk_object_id]
727
719
 
728
- # # Raise if the the number of events doesn't match the number of those matched to observables
720
+ # # Raise if the the number of events doesn't match the number of those matched to risk
721
+ # # objects
729
722
  # if len(events) != total_count:
730
723
  # raise ValidationFailed(
731
724
  # f"The total number of risk events {len(events)} does not match the number of "
732
- # f"risk events we were able to match against observables ({total_count})."
725
+ # "risk events we were able to match against risk objects from the detection "
726
+ # f"({total_count})."
733
727
  # )
734
728
 
735
729
  # TODO (PEX-434): implement deeper notable validation
@@ -746,7 +740,7 @@ class CorrelationSearch(BaseModel):
746
740
 
747
741
  # NOTE: it would be more ideal to switch this to a system which gets the handle of the saved search job and polls
748
742
  # it for completion, but that seems more tricky
749
- def test(self, max_sleep: int = TimeoutConfig.MAX_SLEEP.value, raise_on_exc: bool = False) -> IntegrationTestResult:
743
+ def test(self, max_sleep: int = TimeoutConfig.MAX_SLEEP, raise_on_exc: bool = False) -> IntegrationTestResult:
750
744
  """Execute the integration test
751
745
 
752
746
  Executes an integration test for this CorrelationSearch. First, ensures no matching risk/notables already exist
@@ -760,10 +754,10 @@ class CorrelationSearch(BaseModel):
760
754
  """
761
755
  # max_sleep must be greater than the base value we must wait for the scheduled searchjob to run (jobs run every
762
756
  # 60s)
763
- if max_sleep < TimeoutConfig.BASE_SLEEP.value:
757
+ if max_sleep < TimeoutConfig.BASE_SLEEP:
764
758
  raise ClientError(
765
759
  f"max_sleep value of {max_sleep} is less than the base sleep required "
766
- f"({TimeoutConfig.BASE_SLEEP.value})"
760
+ f"({TimeoutConfig.BASE_SLEEP})"
767
761
  )
768
762
 
769
763
  # initialize result as None
@@ -774,7 +768,7 @@ class CorrelationSearch(BaseModel):
774
768
  num_tries = 0
775
769
 
776
770
  # set the initial base sleep time
777
- time_to_sleep = TimeoutConfig.BASE_SLEEP.value
771
+ time_to_sleep = TimeoutConfig.BASE_SLEEP
778
772
 
779
773
  try:
780
774
  # first make sure the indexes are currently empty and the detection is starting from a disabled state
@@ -783,11 +777,11 @@ class CorrelationSearch(BaseModel):
783
777
  )
784
778
  self.update_pbar(TestingStates.PRE_CLEANUP)
785
779
  if self.risk_event_exists():
786
- self.logger.warn(
780
+ self.logger.warning(
787
781
  f"Risk events matching '{self.name}' already exist; marking for deletion"
788
782
  )
789
783
  if self.notable_event_exists():
790
- self.logger.warn(
784
+ self.logger.warning(
791
785
  f"Notable events matching '{self.name}' already exist; marking for deletion"
792
786
  )
793
787
  self.cleanup()
@@ -934,11 +928,11 @@ class CorrelationSearch(BaseModel):
934
928
  :param query: the SPL string to run
935
929
  """
936
930
  self.logger.debug(f"Executing query: `{query}`")
937
- job = self.service.search(query, exec_mode="blocking")
931
+ job = self.service.search(query, exec_mode="blocking") # type: ignore
938
932
 
939
933
  # query the results, catching any HTTP status code errors
940
934
  try:
941
- response_reader: ResponseReader = job.results(output_mode="json")
935
+ response_reader: ResponseReader = job.results(output_mode="json") # type: ignore
942
936
  except HTTPError as e:
943
937
  # e.g. -> HTTP 400 Bad Request -- b'{"messages":[{"type":"FATAL","text":"Error in \'delete\' command: You
944
938
  # have insufficient privileges to delete events."}]}'
@@ -946,7 +940,7 @@ class CorrelationSearch(BaseModel):
946
940
  self.logger.error(message)
947
941
  raise ServerError(message)
948
942
 
949
- return ResultIterator(response_reader)
943
+ return ResultIterator(response_reader) # type: ignore
950
944
 
951
945
  def _delete_index(self, index: str) -> None:
952
946
  """Deletes events in a given index
@@ -979,7 +973,7 @@ class CorrelationSearch(BaseModel):
979
973
  message = f"No result returned showing deletion in index {index}"
980
974
  raise ServerError(message)
981
975
 
982
- def cleanup(self, delete_test_index=False) -> None:
976
+ def cleanup(self, delete_test_index: bool = False) -> None:
983
977
  """Cleans up after an integration test
984
978
 
985
979
  First, disable the detection; then dump the risk, notable, and (optionally) test indexes. The test index is
@@ -999,9 +993,9 @@ class CorrelationSearch(BaseModel):
999
993
  if delete_test_index:
1000
994
  self.indexes_to_purge.add(self.test_index) # type: ignore
1001
995
  if self._risk_events is not None:
1002
- self.indexes_to_purge.add(Indexes.RISK_INDEX.value)
996
+ self.indexes_to_purge.add(Indexes.RISK_INDEX)
1003
997
  if self._notable_events is not None:
1004
- self.indexes_to_purge.add(Indexes.NOTABLE_INDEX.value)
998
+ self.indexes_to_purge.add(Indexes.NOTABLE_INDEX)
1005
999
 
1006
1000
  # delete the indexes
1007
1001
  for index in self.indexes_to_purge:
@@ -1,8 +1,7 @@
1
1
  from __future__ import annotations
2
2
  from typing import Optional, Any
3
- from pydantic import Field, HttpUrl, model_serializer, BaseModel
3
+ from pydantic import Field, HttpUrl, model_serializer, BaseModel, ConfigDict
4
4
  from contentctl.objects.security_content_object import SecurityContentObject
5
- from contentctl.objects.event_source import EventSource
6
5
 
7
6
 
8
7
  class TA(BaseModel):
@@ -15,10 +14,10 @@ class DataSource(SecurityContentObject):
15
14
  separator: Optional[str] = None
16
15
  configuration: Optional[str] = None
17
16
  supported_TA: list[TA] = []
18
- fields: Optional[list] = None
19
- field_mappings: Optional[list] = None
20
- convert_to_log_source: Optional[list] = None
21
- example_log: Optional[str] = None
17
+ fields: None | list = None
18
+ field_mappings: None | list = None
19
+ convert_to_log_source: None | list = None
20
+ example_log: None | str = None
22
21
 
23
22
 
24
23
  @model_serializer
@@ -1,5 +1,5 @@
1
1
  from __future__ import annotations
2
- from pydantic import Field, computed_field,ValidationInfo, model_serializer, NonNegativeInt
2
+ from pydantic import Field, computed_field,ValidationInfo, model_serializer, NonNegativeInt, ConfigDict
3
3
  from typing import Any
4
4
  import uuid
5
5
  import datetime
@@ -10,14 +10,7 @@ from contentctl.objects.alert_action import AlertAction
10
10
  from contentctl.objects.enums import DeploymentType
11
11
 
12
12
 
13
- class Deployment(SecurityContentObject):
14
- #id: str = None
15
- #date: str = None
16
- #author: str = None
17
- #description: str = None
18
- #contentType: SecurityContentType = SecurityContentType.deployments
19
-
20
-
13
+ class Deployment(SecurityContentObject):
21
14
  scheduling: DeploymentScheduling = Field(...)
22
15
  alert_action: AlertAction = AlertAction()
23
16
  type: DeploymentType = Field(...)
@@ -72,7 +65,6 @@ class Deployment(SecurityContentObject):
72
65
  "tags": self.tags
73
66
  }
74
67
 
75
-
76
68
  #Combine fields from this model with fields from parent
77
69
  model.update(super_fields)
78
70
 
@@ -1,8 +1,9 @@
1
1
  from __future__ import annotations
2
- from pydantic import BaseModel
2
+ from pydantic import BaseModel, ConfigDict
3
3
 
4
4
 
5
5
  class DeploymentEmail(BaseModel):
6
+ model_config = ConfigDict(extra="forbid")
6
7
  message: str
7
8
  subject: str
8
9
  to: str
@@ -1,8 +1,9 @@
1
1
  from __future__ import annotations
2
- from pydantic import BaseModel
2
+ from pydantic import BaseModel, ConfigDict
3
3
  from typing import List
4
4
 
5
5
  class DeploymentNotable(BaseModel):
6
+ model_config = ConfigDict(extra="forbid")
6
7
  rule_description: str
7
8
  rule_title: str
8
9
  nes_fields: List[str]
@@ -1,8 +1,9 @@
1
1
  from __future__ import annotations
2
- from pydantic import BaseModel
2
+ from pydantic import BaseModel, ConfigDict
3
3
 
4
4
 
5
5
  class DeploymentPhantom(BaseModel):
6
+ model_config = ConfigDict(extra="forbid")
6
7
  cam_workers : str
7
8
  label : str
8
9
  phantom_server : str
@@ -1,6 +1,7 @@
1
1
  from __future__ import annotations
2
- from pydantic import BaseModel
2
+ from pydantic import BaseModel, ConfigDict
3
3
 
4
4
 
5
5
  class DeploymentRBA(BaseModel):
6
+ model_config = ConfigDict(extra="forbid")
6
7
  enabled: bool = False
@@ -1,8 +1,9 @@
1
1
  from __future__ import annotations
2
- from pydantic import BaseModel
2
+ from pydantic import BaseModel, ConfigDict
3
3
 
4
4
 
5
5
  class DeploymentScheduling(BaseModel):
6
+ model_config = ConfigDict(extra="forbid")
6
7
  cron_schedule: str
7
8
  earliest_time: str
8
9
  latest_time: str
@@ -1,7 +1,8 @@
1
1
  from __future__ import annotations
2
- from pydantic import BaseModel
2
+ from pydantic import BaseModel, ConfigDict
3
3
 
4
4
 
5
5
  class DeploymentSlack(BaseModel):
6
+ model_config = ConfigDict(extra="forbid")
6
7
  channel: str
7
8
  message: str