contentctl 4.3.4__py3-none-any.whl → 4.4.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (63) hide show
  1. contentctl/actions/build.py +1 -0
  2. contentctl/actions/detection_testing/GitService.py +10 -10
  3. contentctl/actions/detection_testing/infrastructures/DetectionTestingInfrastructure.py +68 -38
  4. contentctl/actions/detection_testing/infrastructures/DetectionTestingInfrastructureContainer.py +5 -1
  5. contentctl/actions/detection_testing/views/DetectionTestingViewWeb.py +10 -8
  6. contentctl/actions/initialize.py +28 -12
  7. contentctl/actions/inspect.py +191 -91
  8. contentctl/actions/new_content.py +10 -2
  9. contentctl/actions/validate.py +3 -6
  10. contentctl/api.py +1 -1
  11. contentctl/contentctl.py +3 -0
  12. contentctl/enrichments/attack_enrichment.py +49 -81
  13. contentctl/enrichments/cve_enrichment.py +6 -7
  14. contentctl/helper/splunk_app.py +141 -10
  15. contentctl/input/director.py +19 -24
  16. contentctl/input/new_content_questions.py +9 -42
  17. contentctl/objects/abstract_security_content_objects/detection_abstract.py +155 -13
  18. contentctl/objects/abstract_security_content_objects/security_content_object_abstract.py +17 -9
  19. contentctl/objects/atomic.py +51 -77
  20. contentctl/objects/base_test_result.py +7 -7
  21. contentctl/objects/baseline.py +12 -18
  22. contentctl/objects/baseline_tags.py +2 -5
  23. contentctl/objects/config.py +154 -26
  24. contentctl/objects/constants.py +34 -1
  25. contentctl/objects/correlation_search.py +79 -114
  26. contentctl/objects/dashboard.py +100 -0
  27. contentctl/objects/deployment.py +20 -5
  28. contentctl/objects/detection_metadata.py +71 -0
  29. contentctl/objects/detection_stanza.py +79 -0
  30. contentctl/objects/detection_tags.py +28 -26
  31. contentctl/objects/drilldown.py +70 -0
  32. contentctl/objects/enums.py +26 -24
  33. contentctl/objects/errors.py +187 -0
  34. contentctl/objects/investigation.py +23 -15
  35. contentctl/objects/investigation_tags.py +4 -3
  36. contentctl/objects/lookup.py +8 -1
  37. contentctl/objects/macro.py +16 -7
  38. contentctl/objects/notable_event.py +6 -5
  39. contentctl/objects/risk_analysis_action.py +4 -4
  40. contentctl/objects/risk_event.py +8 -7
  41. contentctl/objects/savedsearches_conf.py +196 -0
  42. contentctl/objects/story.py +4 -16
  43. contentctl/objects/throttling.py +46 -0
  44. contentctl/output/conf_output.py +4 -0
  45. contentctl/output/conf_writer.py +24 -4
  46. contentctl/output/new_content_yml_output.py +4 -9
  47. contentctl/output/templates/analyticstories_detections.j2 +2 -2
  48. contentctl/output/templates/analyticstories_investigations.j2 +5 -5
  49. contentctl/output/templates/analyticstories_stories.j2 +1 -1
  50. contentctl/output/templates/savedsearches_baselines.j2 +2 -3
  51. contentctl/output/templates/savedsearches_detections.j2 +12 -7
  52. contentctl/output/templates/savedsearches_investigations.j2 +3 -4
  53. contentctl/templates/detections/endpoint/anomalous_usage_of_7zip.yml +10 -1
  54. {contentctl-4.3.4.dist-info → contentctl-4.4.0.dist-info}/METADATA +6 -5
  55. {contentctl-4.3.4.dist-info → contentctl-4.4.0.dist-info}/RECORD +58 -57
  56. {contentctl-4.3.4.dist-info → contentctl-4.4.0.dist-info}/WHEEL +1 -1
  57. contentctl/objects/ssa_detection.py +0 -157
  58. contentctl/objects/ssa_detection_tags.py +0 -138
  59. contentctl/objects/unit_test_old.py +0 -10
  60. contentctl/objects/unit_test_ssa.py +0 -31
  61. contentctl/output/templates/finding_report.j2 +0 -30
  62. {contentctl-4.3.4.dist-info → contentctl-4.4.0.dist-info}/LICENSE.md +0 -0
  63. {contentctl-4.3.4.dist-info → contentctl-4.4.0.dist-info}/entry_points.txt +0 -0
@@ -20,7 +20,8 @@ from contentctl.objects.lookup import Lookup
20
20
  if TYPE_CHECKING:
21
21
  from contentctl.input.director import DirectorOutputDto
22
22
  from contentctl.objects.baseline import Baseline
23
-
23
+ from contentctl.objects.config import CustomApp
24
+
24
25
  from contentctl.objects.security_content_object import SecurityContentObject
25
26
  from contentctl.objects.enums import AnalyticsType
26
27
  from contentctl.objects.enums import DataModel
@@ -35,11 +36,17 @@ from contentctl.objects.test_group import TestGroup
35
36
  from contentctl.objects.integration_test import IntegrationTest
36
37
  from contentctl.objects.data_source import DataSource
37
38
  from contentctl.objects.base_test_result import TestResultStatus
38
-
39
- # from contentctl.objects.playbook import Playbook
39
+ from contentctl.objects.drilldown import Drilldown, DRILLDOWN_SEARCH_PLACEHOLDER
40
40
  from contentctl.objects.enums import ProvidingTechnology
41
41
  from contentctl.enrichments.cve_enrichment import CveEnrichmentObj
42
42
  import datetime
43
+ from contentctl.objects.constants import (
44
+ ES_MAX_STANZA_LENGTH,
45
+ ES_SEARCH_STANZA_NAME_FORMAT_AFTER_CLONING_IN_PRODUCT_TEMPLATE,
46
+ CONTENTCTL_MAX_SEARCH_NAME_LENGTH,
47
+ CONTENTCTL_DETECTION_STANZA_NAME_FORMAT_TEMPLATE
48
+ )
49
+
43
50
  MISSING_SOURCES: set[str] = set()
44
51
 
45
52
  # Those AnalyticsTypes that we do not test via contentctl
@@ -51,8 +58,8 @@ SKIPPED_ANALYTICS_TYPES: set[str] = {
51
58
  # TODO (#266): disable the use_enum_values configuration
52
59
  class Detection_Abstract(SecurityContentObject):
53
60
  model_config = ConfigDict(use_enum_values=True)
54
-
55
- # contentType: SecurityContentType = SecurityContentType.detections
61
+ name:str = Field(...,max_length=CONTENTCTL_MAX_SEARCH_NAME_LENGTH)
62
+ #contentType: SecurityContentType = SecurityContentType.detections
56
63
  type: AnalyticsType = Field(...)
57
64
  status: DetectionStatus = Field(...)
58
65
  data_source: list[str] = []
@@ -60,6 +67,16 @@ class Detection_Abstract(SecurityContentObject):
60
67
  search: str = Field(...)
61
68
  how_to_implement: str = Field(..., min_length=4)
62
69
  known_false_positives: str = Field(..., min_length=4)
70
+ explanation: None | str = Field(
71
+ default=None,
72
+ exclude=True, #Don't serialize this value when dumping the object
73
+ description="Provide an explanation to be included "
74
+ "in the 'Explanation' field of the Detection in "
75
+ "the Use Case Library. If this field is not "
76
+ "defined in the YML, it will default to the "
77
+ "value of the 'description' field when "
78
+ "serialized in analyticstories_detections.j2",
79
+ )
63
80
 
64
81
  enabled_by_default: bool = False
65
82
  file_path: FilePath = Field(...)
@@ -70,9 +87,30 @@ class Detection_Abstract(SecurityContentObject):
70
87
  # https://github.com/pydantic/pydantic/issues/9101#issuecomment-2019032541
71
88
  tests: List[Annotated[Union[UnitTest, IntegrationTest, ManualTest], Field(union_mode='left_to_right')]] = []
72
89
  # A list of groups of tests, relying on the same data
73
- test_groups: Union[list[TestGroup], None] = Field(None, validate_default=True)
90
+ test_groups: list[TestGroup] = []
74
91
 
75
92
  data_source_objects: list[DataSource] = []
93
+ drilldown_searches: list[Drilldown] = Field(default=[], description="A list of Drilldowns that should be included with this search")
94
+
95
+ def get_conf_stanza_name(self, app:CustomApp)->str:
96
+ stanza_name = CONTENTCTL_DETECTION_STANZA_NAME_FORMAT_TEMPLATE.format(app_label=app.label, detection_name=self.name)
97
+ self.check_conf_stanza_max_length(stanza_name)
98
+ return stanza_name
99
+
100
+
101
+ def get_action_dot_correlationsearch_dot_label(self, app:CustomApp, max_stanza_length:int=ES_MAX_STANZA_LENGTH)->str:
102
+ stanza_name = self.get_conf_stanza_name(app)
103
+ stanza_name_after_saving_in_es = ES_SEARCH_STANZA_NAME_FORMAT_AFTER_CLONING_IN_PRODUCT_TEMPLATE.format(
104
+ security_domain_value = self.tags.security_domain.value,
105
+ search_name = stanza_name
106
+ )
107
+
108
+
109
+ if len(stanza_name_after_saving_in_es) > max_stanza_length:
110
+ raise ValueError(f"label may only be {max_stanza_length} characters to allow updating in-product, "
111
+ f"but stanza was actually {len(stanza_name_after_saving_in_es)} characters: '{stanza_name_after_saving_in_es}' ")
112
+
113
+ return stanza_name
76
114
 
77
115
  @field_validator("search", mode="before")
78
116
  @classmethod
@@ -83,15 +121,13 @@ class Detection_Abstract(SecurityContentObject):
83
121
 
84
122
 
85
123
  Args:
86
- value (Union[str, dict[str,Any]]): The search. It can either be a string (and should be
87
- SPL or a dict, in which case it is Sigma-formatted.
124
+ value (str): The SPL search. It must be an SPL-formatted string.
88
125
  info (ValidationInfo): The validation info can contain a number of different objects.
89
126
  Today it only contains the director.
90
127
 
91
128
  Returns:
92
- Union[str, dict[str,Any]]: The search, either in sigma or SPL format.
93
- """
94
-
129
+ str: The search, as an SPL formatted string.
130
+ """
95
131
 
96
132
  # Otherwise, the search is SPL.
97
133
 
@@ -132,6 +168,7 @@ class Detection_Abstract(SecurityContentObject):
132
168
  the model from the list of unit tests. Also, preemptively skips all manual tests, as well as
133
169
  tests for experimental/deprecated detections and Correlation type detections.
134
170
  """
171
+
135
172
  # Since ManualTest and UnitTest are not differentiable without looking at the manual_test
136
173
  # tag, Pydantic builds all tests as UnitTest objects. If we see the manual_test flag, we
137
174
  # convert these to ManualTest
@@ -250,6 +287,7 @@ class Detection_Abstract(SecurityContentObject):
250
287
  annotations_dict["cve"] = self.tags.cve
251
288
  annotations_dict["impact"] = self.tags.impact
252
289
  annotations_dict["type"] = self.type
290
+ annotations_dict["type_list"] = [self.type]
253
291
  # annotations_dict["version"] = self.version
254
292
 
255
293
  annotations_dict["data_source"] = self.data_source
@@ -390,7 +428,11 @@ class Detection_Abstract(SecurityContentObject):
390
428
  # NOTE: we ignore the type error around self.status because we are using Pydantic's
391
429
  # use_enum_values configuration
392
430
  # https://docs.pydantic.dev/latest/api/config/#pydantic.config.ConfigDict.populate_by_name
393
-
431
+
432
+ # NOTE: The `inspect` action is HIGHLY sensitive to the structure of the metadata line in
433
+ # the detection stanza in savedsearches.conf. Additive operations (e.g. a new field in the
434
+ # dict below) should not have any impact, but renaming or removing any of these fields will
435
+ # break the `inspect` action.
394
436
  return {
395
437
  'detection_id': str(self.id),
396
438
  'deprecated': '1' if self.status == DetectionStatus.deprecated.value else '0', # type: ignore
@@ -516,13 +558,53 @@ class Detection_Abstract(SecurityContentObject):
516
558
  self.data_source_objects = matched_data_sources
517
559
 
518
560
  for story in self.tags.analytic_story:
519
- story.detections.append(self)
561
+ story.detections.append(self)
520
562
 
521
563
  self.cve_enrichment_func(__context)
522
564
 
523
565
  # Derive TestGroups and IntegrationTests, adjust for ManualTests, skip as needed
524
566
  self.adjust_tests_and_groups()
525
567
 
568
+ # Ensure that if there is at least 1 drilldown, at least
569
+ # 1 of the drilldowns contains the string Drilldown.SEARCH_PLACEHOLDER.
570
+ # This is presently a requirement when 1 or more drilldowns are added to a detection.
571
+ # Note that this is only required for production searches that are not hunting
572
+
573
+ if self.type == AnalyticsType.Hunting.value or self.status != DetectionStatus.production.value:
574
+ #No additional check need to happen on the potential drilldowns.
575
+ pass
576
+ else:
577
+ found_placeholder = False
578
+ if len(self.drilldown_searches) < 2:
579
+ raise ValueError(f"This detection is required to have 2 drilldown_searches, but only has [{len(self.drilldown_searches)}]")
580
+ for drilldown in self.drilldown_searches:
581
+ if DRILLDOWN_SEARCH_PLACEHOLDER in drilldown.search:
582
+ found_placeholder = True
583
+ if not found_placeholder:
584
+ raise ValueError("Detection has one or more drilldown_searches, but none of them "
585
+ f"contained '{DRILLDOWN_SEARCH_PLACEHOLDER}. This is a requirement "
586
+ "if drilldown_searches are defined.'")
587
+
588
+ # Update the search fields with the original search, if required
589
+ for drilldown in self.drilldown_searches:
590
+ drilldown.perform_search_substitutions(self)
591
+
592
+ #For experimental purposes, add the default drilldowns
593
+ #self.drilldown_searches.extend(Drilldown.constructDrilldownsFromDetection(self))
594
+
595
+ @property
596
+ def drilldowns_in_JSON(self) -> list[dict[str,str]]:
597
+ """This function is required for proper JSON
598
+ serializiation of drilldowns to occur in savedsearches.conf.
599
+ It returns the list[Drilldown] as a list[dict].
600
+ Without this function, the jinja template is unable
601
+ to convert list[Drilldown] to JSON
602
+
603
+ Returns:
604
+ list[dict[str,str]]: List of Drilldowns dumped to dict format
605
+ """
606
+ return [drilldown.model_dump() for drilldown in self.drilldown_searches]
607
+
526
608
  @field_validator('lookups', mode="before")
527
609
  @classmethod
528
610
  def getDetectionLookups(cls, v:list[str], info:ValidationInfo) -> list[Lookup]:
@@ -651,6 +733,27 @@ class Detection_Abstract(SecurityContentObject):
651
733
  else:
652
734
  self.tags.nist = [NistCategory.DE_AE]
653
735
  return self
736
+
737
+
738
+ @model_validator(mode="after")
739
+ def ensureThrottlingFieldsExist(self):
740
+ '''
741
+ For throttling to work properly, the fields to throttle on MUST
742
+ exist in the search itself. If not, then we cannot apply the throttling
743
+ '''
744
+ if self.tags.throttling is None:
745
+ # No throttling configured for this detection
746
+ return self
747
+
748
+ missing_fields:list[str] = [field for field in self.tags.throttling.fields if field not in self.search]
749
+ if len(missing_fields) > 0:
750
+ raise ValueError(f"The following throttle fields were missing from the search: {missing_fields}")
751
+
752
+ else:
753
+ # All throttling fields present in search
754
+ return self
755
+
756
+
654
757
 
655
758
  @model_validator(mode="after")
656
759
  def ensureProperObservablesExist(self):
@@ -728,6 +831,45 @@ class Detection_Abstract(SecurityContentObject):
728
831
  # Found everything
729
832
  return self
730
833
 
834
+ @field_validator("tests", mode="before")
835
+ def ensure_yml_test_is_unittest(cls, v:list[dict]):
836
+ """The typing for the tests field allows it to be one of
837
+ a number of different types of tests. However, ONLY
838
+ UnitTest should be allowed to be defined in the YML
839
+ file. If part of the UnitTest defined in the YML
840
+ is incorrect, such as the attack_data file, then
841
+ it will FAIL to be instantiated as a UnitTest and
842
+ may instead be instantiated as a different type of
843
+ test, such as IntegrationTest (since that requires
844
+ less fields) which is incorrect. Ensure that any
845
+ raw data read from the YML can actually construct
846
+ a valid UnitTest and, if not, return errors right
847
+ away instead of letting Pydantic try to construct
848
+ it into a different type of test
849
+
850
+ Args:
851
+ v (list[dict]): list of dicts read from the yml.
852
+ Each one SHOULD be a valid UnitTest. If we cannot
853
+ construct a valid unitTest from it, a ValueError should be raised
854
+
855
+ Returns:
856
+ _type_: The input of the function, assuming no
857
+ ValueError is raised.
858
+ """
859
+ valueErrors:list[ValueError] = []
860
+ for unitTest in v:
861
+ #This raises a ValueError on a failed UnitTest.
862
+ try:
863
+ UnitTest.model_validate(unitTest)
864
+ except ValueError as e:
865
+ valueErrors.append(e)
866
+ if len(valueErrors):
867
+ raise ValueError(valueErrors)
868
+ # All of these can be constructred as UnitTests with no
869
+ # Exceptions, so let the normal flow continue
870
+ return v
871
+
872
+
731
873
  @field_validator("tests")
732
874
  def tests_validate(
733
875
  cls,
@@ -5,8 +5,10 @@ if TYPE_CHECKING:
5
5
  from contentctl.objects.deployment import Deployment
6
6
  from contentctl.objects.security_content_object import SecurityContentObject
7
7
  from contentctl.input.director import DirectorOutputDto
8
+ from contentctl.objects.config import CustomApp
8
9
 
9
10
  from contentctl.objects.enums import AnalyticsType
11
+ from contentctl.objects.constants import CONTENTCTL_MAX_STANZA_LENGTH
10
12
  import abc
11
13
  import uuid
12
14
  import datetime
@@ -31,14 +33,14 @@ NO_FILE_NAME = "NO_FILE_NAME"
31
33
 
32
34
  # TODO (#266): disable the use_enum_values configuration
33
35
  class SecurityContentObject_Abstract(BaseModel, abc.ABC):
34
- model_config = ConfigDict(use_enum_values=True, validate_default=True)
35
-
36
- name: str = Field(...)
37
- author: str = Field("Content Author", max_length=255)
38
- date: datetime.date = Field(datetime.date.today())
39
- version: NonNegativeInt = 1
40
- id: uuid.UUID = Field(default_factory=uuid.uuid4) # we set a default here until all content has a uuid
41
- description: str = Field("Enter Description Here", max_length=10000)
36
+ model_config = ConfigDict(use_enum_values=True,validate_default=True)
37
+
38
+ name: str = Field(...,max_length=99)
39
+ author: str = Field(...,max_length=255)
40
+ date: datetime.date = Field(...)
41
+ version: NonNegativeInt = Field(...)
42
+ id: uuid.UUID = Field(...) #we set a default here until all content has a uuid
43
+ description: str = Field(...,max_length=10000)
42
44
  file_path: Optional[FilePath] = None
43
45
  references: Optional[List[HttpUrl]] = None
44
46
 
@@ -56,7 +58,13 @@ class SecurityContentObject_Abstract(BaseModel, abc.ABC):
56
58
  "description": self.description,
57
59
  "references": [str(url) for url in self.references or []]
58
60
  }
59
-
61
+
62
+
63
+ def check_conf_stanza_max_length(self, stanza_name:str, max_stanza_length:int=CONTENTCTL_MAX_STANZA_LENGTH) -> None:
64
+ if len(stanza_name) > max_stanza_length:
65
+ raise ValueError(f"conf stanza may only be {max_stanza_length} characters, "
66
+ f"but stanza was actually {len(stanza_name)} characters: '{stanza_name}' ")
67
+
60
68
  @staticmethod
61
69
  def objectListToNameList(objects: list[SecurityContentObject]) -> list[str]:
62
70
  return [object.getName() for object in objects]
@@ -1,12 +1,15 @@
1
1
  from __future__ import annotations
2
+ from typing import TYPE_CHECKING
3
+ if TYPE_CHECKING:
4
+ from contentctl.objects.config import validate
5
+
2
6
  from contentctl.input.yml_reader import YmlReader
3
7
  from pydantic import BaseModel, model_validator, ConfigDict, FilePath, UUID4
8
+ import dataclasses
4
9
  from typing import List, Optional, Dict, Union, Self
5
10
  import pathlib
6
-
7
-
8
11
  from enum import StrEnum, auto
9
-
12
+ import uuid
10
13
 
11
14
  class SupportedPlatform(StrEnum):
12
15
  windows = auto()
@@ -84,15 +87,6 @@ class AtomicTest(BaseModel):
84
87
  dependencies: Optional[List[AtomicDependency]] = None
85
88
  dependency_executor_name: Optional[DependencyExecutorType] = None
86
89
 
87
- @staticmethod
88
- def AtomicTestWhenEnrichmentIsDisabled(auto_generated_guid: UUID4) -> AtomicTest:
89
- return AtomicTest(name="Placeholder Atomic Test (enrichment disabled)",
90
- auto_generated_guid=auto_generated_guid,
91
- description="This is a placeholder AtomicTest. Because enrichments were not enabled, it has not been validated against the real Atomic Red Team Repo.",
92
- supported_platforms=[],
93
- executor=AtomicExecutor(name="Placeholder Executor (enrichment disabled)",
94
- command="Placeholder command (enrichment disabled)"))
95
-
96
90
  @staticmethod
97
91
  def AtomicTestWhenTestIsMissing(auto_generated_guid: UUID4) -> AtomicTest:
98
92
  return AtomicTest(name="Missing Atomic",
@@ -100,31 +94,16 @@ class AtomicTest(BaseModel):
100
94
  description="This is a placeholder AtomicTest. Either the auto_generated_guid is incorrect or it there was an exception while parsing its AtomicFile.",
101
95
  supported_platforms=[],
102
96
  executor=AtomicExecutor(name="Placeholder Executor (failed to find auto_generated_guid)",
103
- command="Placeholder command (failed to find auto_generated_guid)"))
104
-
105
-
106
- @classmethod
107
- def getAtomicByAtomicGuid(cls, guid: UUID4, all_atomics:list[AtomicTest] | None)->AtomicTest:
108
- if all_atomics is None:
109
- return AtomicTest.AtomicTestWhenEnrichmentIsDisabled(guid)
110
- matching_atomics = [atomic for atomic in all_atomics if atomic.auto_generated_guid == guid]
111
- if len(matching_atomics) == 0:
112
- raise ValueError(f"Unable to find atomic_guid {guid} in {len(all_atomics)} atomic_tests from ART Repo")
113
- elif len(matching_atomics) > 1:
114
- raise ValueError(f"Found {len(matching_atomics)} matching tests for atomic_guid {guid} in {len(all_atomics)} atomic_tests from ART Repo")
115
-
116
- return matching_atomics[0]
97
+ command="Placeholder command (failed to find auto_generated_guid)"))
117
98
 
118
99
  @classmethod
119
- def parseArtRepo(cls, repo_path:pathlib.Path)->List[AtomicFile]:
120
- if not repo_path.is_dir():
121
- print(f"WARNING: Atomic Red Team repo does NOT exist at {repo_path.absolute()}. You can check it out with:\n * git clone --single-branch https://github.com/redcanaryco/atomic-red-team. This will ONLY throw a validation error if you reference atomid_guids in your detection(s).")
122
- return []
100
+ def parseArtRepo(cls, repo_path:pathlib.Path)->dict[uuid.UUID, AtomicTest]:
101
+ test_mapping: dict[uuid.UUID, AtomicTest] = {}
123
102
  atomics_path = repo_path/"atomics"
124
103
  if not atomics_path.is_dir():
125
- print(f"WARNING: Atomic Red Team repo exists at {repo_path.absolute}, but atomics directory does NOT exist at {atomics_path.absolute()}. Was it deleted or renamed? This will ONLY throw a validation error if you reference atomid_guids in your detection(s).")
126
- return []
127
-
104
+ raise FileNotFoundError(f"WARNING: Atomic Red Team repo exists at {repo_path}, "
105
+ f"but atomics directory does NOT exist at {atomics_path}. "
106
+ "Was it deleted or renamed?")
128
107
 
129
108
  atomic_files:List[AtomicFile] = []
130
109
  error_messages:List[str] = []
@@ -133,6 +112,7 @@ class AtomicTest(BaseModel):
133
112
  atomic_files.append(cls.constructAtomicFile(obj_path))
134
113
  except Exception as e:
135
114
  error_messages.append(f"File [{obj_path}]\n{str(e)}")
115
+
136
116
  if len(error_messages) > 0:
137
117
  exceptions_string = '\n\n'.join(error_messages)
138
118
  print(f"WARNING: The following [{len(error_messages)}] ERRORS were generated when parsing the Atomic Red Team Repo.\n"
@@ -140,38 +120,28 @@ class AtomicTest(BaseModel):
140
120
  "Note that this is only a warning and contentctl will ignore Atomics contained in these files.\n"
141
121
  f"However, if you have written a detection that references them, 'contentctl build --enrichments' will fail:\n\n{exceptions_string}")
142
122
 
143
- return atomic_files
123
+ # Now iterate over all the files, collect all the tests, and return the dict mapping
124
+ redefined_guids:set[uuid.UUID] = set()
125
+ for atomic_file in atomic_files:
126
+ for atomic_test in atomic_file.atomic_tests:
127
+ if atomic_test.auto_generated_guid in test_mapping:
128
+ redefined_guids.add(atomic_test.auto_generated_guid)
129
+ else:
130
+ test_mapping[atomic_test.auto_generated_guid] = atomic_test
131
+ if len(redefined_guids) > 0:
132
+ guids_string = '\n\t'.join([str(guid) for guid in redefined_guids])
133
+ raise Exception(f"The following [{len(redefined_guids)}] Atomic Test"
134
+ " auto_generated_guid(s) were defined more than once. "
135
+ f"auto_generated_guids MUST be unique:\n\t{guids_string}")
136
+
137
+ print(f"Successfully parsed [{len(test_mapping)}] Atomic Red Team Tests!")
138
+ return test_mapping
144
139
 
145
140
  @classmethod
146
141
  def constructAtomicFile(cls, file_path:pathlib.Path)->AtomicFile:
147
142
  yml_dict = YmlReader.load_file(file_path)
148
143
  atomic_file = AtomicFile.model_validate(yml_dict)
149
144
  return atomic_file
150
-
151
- @classmethod
152
- def getAtomicTestsFromArtRepo(cls, repo_path:pathlib.Path, enabled:bool=True)->list[AtomicTest] | None:
153
- # Get all the atomic files. Note that if the ART repo is not found, we will not throw an error,
154
- # but will not have any atomics. This means that if atomic_guids are referenced during validation,
155
- # validation for those detections will fail
156
- if not enabled:
157
- return None
158
-
159
- atomic_files = cls.getAtomicFilesFromArtRepo(repo_path)
160
-
161
- atomic_tests:List[AtomicTest] = []
162
- for atomic_file in atomic_files:
163
- atomic_tests.extend(atomic_file.atomic_tests)
164
- print(f"Found [{len(atomic_tests)}] Atomic Simulations in the Atomic Red Team Repo!")
165
- return atomic_tests
166
-
167
-
168
- @classmethod
169
- def getAtomicFilesFromArtRepo(cls, repo_path:pathlib.Path)->List[AtomicFile]:
170
- return cls.parseArtRepo(repo_path)
171
-
172
-
173
-
174
-
175
145
 
176
146
 
177
147
  class AtomicFile(BaseModel):
@@ -182,27 +152,31 @@ class AtomicFile(BaseModel):
182
152
  atomic_tests: List[AtomicTest]
183
153
 
184
154
 
155
+ class AtomicEnrichment(BaseModel):
156
+ data: dict[uuid.UUID,AtomicTest] = dataclasses.field(default_factory = dict)
157
+ use_enrichment: bool = False
185
158
 
159
+ @classmethod
160
+ def getAtomicEnrichment(cls, config:validate)->AtomicEnrichment:
161
+ enrichment = AtomicEnrichment(use_enrichment=config.enrichments)
162
+ if config.enrichments:
163
+ enrichment.data = AtomicTest.parseArtRepo(config.atomic_red_team_repo_path)
164
+
165
+ return enrichment
166
+
167
+ def getAtomic(self, atomic_guid: uuid.UUID)->AtomicTest:
168
+ if self.use_enrichment:
169
+ if atomic_guid in self.data:
170
+ return self.data[atomic_guid]
171
+ else:
172
+ raise Exception(f"Atomic with GUID {atomic_guid} not found.")
173
+ else:
174
+ # If enrichment is not enabled, for the sake of compatability
175
+ # return a stub test with no useful or meaningful information.
176
+ return AtomicTest.AtomicTestWhenTestIsMissing(atomic_guid)
186
177
 
187
- # ATOMICS_PATH = pathlib.Path("./atomics")
188
- # atomic_objects = []
189
- # atomic_simulations = []
190
- # for obj_path in ATOMICS_PATH.glob("**/T*.yaml"):
191
- # try:
192
- # with open(obj_path, 'r', encoding="utf-8") as obj_handle:
193
- # obj_data = yaml.load(obj_handle, Loader=yaml.CSafeLoader)
194
- # atomic_obj = AtomicFile.model_validate(obj_data)
195
- # except Exception as e:
196
- # print(f"Error parsing object at path {obj_path}: {str(e)}")
197
- # print(f"We have successfully parsed {len(atomic_objects)}, however!")
198
- # sys.exit(1)
199
-
200
- # print(f"Successfully parsed {obj_path}!")
201
- # atomic_objects.append(atomic_obj)
202
- # atomic_simulations += atomic_obj.atomic_tests
178
+
203
179
 
204
- # print(f"Successfully parsed all {len(atomic_objects)} files!")
205
- # print(f"Successfully parsed all {len(atomic_simulations)} simulations!")
206
180
 
207
181
 
208
182
 
@@ -1,8 +1,8 @@
1
1
  from typing import Union, Any
2
2
  from enum import Enum
3
3
 
4
- from pydantic import BaseModel
5
- from splunklib.data import Record
4
+ from pydantic import ConfigDict, BaseModel
5
+ from splunklib.data import Record # type: ignore
6
6
 
7
7
  from contentctl.helper.utils import Utils
8
8
 
@@ -53,11 +53,11 @@ class BaseTestResult(BaseModel):
53
53
  # The Splunk endpoint URL
54
54
  sid_link: Union[None, str] = None
55
55
 
56
- class Config:
57
- validate_assignment = True
58
-
59
- # Needed to allow for embedding of Exceptions in the model
60
- arbitrary_types_allowed = True
56
+ # Needed to allow for embedding of Exceptions in the model
57
+ model_config = ConfigDict(
58
+ validate_assignment=True,
59
+ arbitrary_types_allowed=True
60
+ )
61
61
 
62
62
  @property
63
63
  def passed(self) -> bool:
@@ -1,33 +1,21 @@
1
1
 
2
2
  from __future__ import annotations
3
- from typing import TYPE_CHECKING, Annotated, Optional, List,Any
3
+ from typing import Annotated, Optional, List,Any
4
4
  from pydantic import field_validator, ValidationInfo, Field, model_serializer
5
- if TYPE_CHECKING:
6
- from contentctl.input.director import DirectorOutputDto
7
-
8
5
  from contentctl.objects.deployment import Deployment
9
6
  from contentctl.objects.security_content_object import SecurityContentObject
10
- from contentctl.objects.enums import DataModel, AnalyticsType
7
+ from contentctl.objects.enums import DataModel
11
8
  from contentctl.objects.baseline_tags import BaselineTags
12
- from contentctl.objects.enums import DeploymentType
13
- #from contentctl.objects.deployment import Deployment
14
9
 
15
- # from typing import TYPE_CHECKING
16
- # if TYPE_CHECKING:
17
- # from contentctl.input.director import DirectorOutputDto
10
+ from contentctl.objects.config import CustomApp
11
+
18
12
 
13
+ from contentctl.objects.constants import CONTENTCTL_MAX_SEARCH_NAME_LENGTH,CONTENTCTL_BASELINE_STANZA_NAME_FORMAT_TEMPLATE
19
14
 
20
15
  class Baseline(SecurityContentObject):
21
- # baseline spec
22
- #name: str
23
- #id: str
24
- #version: int
25
- #date: str
26
- #author: str
27
- #contentType: SecurityContentType = SecurityContentType.baselines
16
+ name:str = Field(...,max_length=CONTENTCTL_MAX_SEARCH_NAME_LENGTH)
28
17
  type: Annotated[str,Field(pattern="^Baseline$")] = Field(...)
29
18
  datamodel: Optional[List[DataModel]] = None
30
- #description: str
31
19
  search: str = Field(..., min_length=4)
32
20
  how_to_implement: str = Field(..., min_length=4)
33
21
  known_false_positives: str = Field(..., min_length=4)
@@ -35,6 +23,12 @@ class Baseline(SecurityContentObject):
35
23
 
36
24
  # enrichment
37
25
  deployment: Deployment = Field({})
26
+
27
+
28
+ def get_conf_stanza_name(self, app:CustomApp)->str:
29
+ stanza_name = CONTENTCTL_BASELINE_STANZA_NAME_FORMAT_TEMPLATE.format(app_label=app.label, detection_name=self.name)
30
+ self.check_conf_stanza_max_length(stanza_name)
31
+ return stanza_name
38
32
 
39
33
  @field_validator("deployment", mode="before")
40
34
  def getDeployment(cls, v:Any, info:ValidationInfo)->Deployment:
@@ -1,15 +1,12 @@
1
1
  from __future__ import annotations
2
- from typing import TYPE_CHECKING
3
2
  from pydantic import BaseModel, Field, field_validator, ValidationInfo, model_serializer
4
3
  from typing import List, Any, Union
5
4
 
6
5
  from contentctl.objects.story import Story
7
- from contentctl.objects.deployment import Deployment
8
6
  from contentctl.objects.detection import Detection
9
7
  from contentctl.objects.enums import SecurityContentProductName
10
8
  from contentctl.objects.enums import SecurityDomain
11
- if TYPE_CHECKING:
12
- from contentctl.input.director import DirectorOutputDto
9
+
13
10
 
14
11
 
15
12
 
@@ -19,7 +16,7 @@ class BaselineTags(BaseModel):
19
16
  #deployment: Deployment = Field('SET_IN_GET_DEPLOYMENT_FUNCTION')
20
17
  # TODO (#223): can we remove str from the possible types here?
21
18
  detections: List[Union[Detection,str]] = Field(...)
22
- product: list[SecurityContentProductName] = Field(...,min_length=1)
19
+ product: List[SecurityContentProductName] = Field(...,min_length=1)
23
20
  required_fields: List[str] = Field(...,min_length=1)
24
21
  security_domain: SecurityDomain = Field(...)
25
22