contentctl 3.6.0__py3-none-any.whl → 4.0.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- contentctl/actions/build.py +89 -0
- contentctl/actions/detection_testing/DetectionTestingManager.py +48 -49
- contentctl/actions/detection_testing/GitService.py +148 -230
- contentctl/actions/detection_testing/infrastructures/DetectionTestingInfrastructure.py +14 -24
- contentctl/actions/detection_testing/infrastructures/DetectionTestingInfrastructureContainer.py +43 -17
- contentctl/actions/detection_testing/views/DetectionTestingView.py +3 -2
- contentctl/actions/detection_testing/views/DetectionTestingViewFile.py +0 -8
- contentctl/actions/doc_gen.py +1 -1
- contentctl/actions/initialize.py +28 -65
- contentctl/actions/inspect.py +260 -0
- contentctl/actions/new_content.py +106 -13
- contentctl/actions/release_notes.py +168 -144
- contentctl/actions/reporting.py +24 -13
- contentctl/actions/test.py +39 -20
- contentctl/actions/validate.py +25 -48
- contentctl/contentctl.py +196 -754
- contentctl/enrichments/attack_enrichment.py +69 -19
- contentctl/enrichments/cve_enrichment.py +28 -13
- contentctl/helper/link_validator.py +24 -26
- contentctl/helper/utils.py +7 -3
- contentctl/input/director.py +139 -201
- contentctl/input/new_content_questions.py +63 -61
- contentctl/input/sigma_converter.py +1 -2
- contentctl/input/ssa_detection_builder.py +16 -7
- contentctl/input/yml_reader.py +4 -3
- contentctl/objects/abstract_security_content_objects/detection_abstract.py +487 -154
- contentctl/objects/abstract_security_content_objects/security_content_object_abstract.py +155 -51
- contentctl/objects/alert_action.py +40 -0
- contentctl/objects/atomic.py +212 -0
- contentctl/objects/baseline.py +44 -43
- contentctl/objects/baseline_tags.py +69 -20
- contentctl/objects/config.py +857 -125
- contentctl/objects/constants.py +0 -1
- contentctl/objects/correlation_search.py +1 -1
- contentctl/objects/data_source.py +2 -4
- contentctl/objects/deployment.py +61 -21
- contentctl/objects/deployment_email.py +2 -2
- contentctl/objects/deployment_notable.py +4 -4
- contentctl/objects/deployment_phantom.py +2 -2
- contentctl/objects/deployment_rba.py +3 -4
- contentctl/objects/deployment_scheduling.py +2 -3
- contentctl/objects/deployment_slack.py +2 -2
- contentctl/objects/detection.py +1 -5
- contentctl/objects/detection_tags.py +210 -119
- contentctl/objects/enums.py +312 -24
- contentctl/objects/integration_test.py +1 -1
- contentctl/objects/integration_test_result.py +0 -2
- contentctl/objects/investigation.py +62 -53
- contentctl/objects/investigation_tags.py +30 -6
- contentctl/objects/lookup.py +80 -31
- contentctl/objects/macro.py +29 -45
- contentctl/objects/mitre_attack_enrichment.py +29 -5
- contentctl/objects/observable.py +3 -7
- contentctl/objects/playbook.py +60 -30
- contentctl/objects/playbook_tags.py +45 -8
- contentctl/objects/security_content_object.py +1 -5
- contentctl/objects/ssa_detection.py +8 -4
- contentctl/objects/ssa_detection_tags.py +19 -26
- contentctl/objects/story.py +142 -44
- contentctl/objects/story_tags.py +46 -33
- contentctl/objects/unit_test.py +7 -2
- contentctl/objects/unit_test_attack_data.py +10 -19
- contentctl/objects/unit_test_baseline.py +1 -1
- contentctl/objects/unit_test_old.py +4 -3
- contentctl/objects/unit_test_result.py +5 -3
- contentctl/objects/unit_test_ssa.py +31 -0
- contentctl/output/api_json_output.py +202 -130
- contentctl/output/attack_nav_output.py +20 -9
- contentctl/output/attack_nav_writer.py +3 -3
- contentctl/output/ba_yml_output.py +3 -3
- contentctl/output/conf_output.py +125 -391
- contentctl/output/conf_writer.py +169 -31
- contentctl/output/jinja_writer.py +2 -2
- contentctl/output/json_writer.py +17 -5
- contentctl/output/new_content_yml_output.py +8 -7
- contentctl/output/svg_output.py +17 -27
- contentctl/output/templates/analyticstories_detections.j2 +8 -4
- contentctl/output/templates/analyticstories_investigations.j2 +1 -1
- contentctl/output/templates/analyticstories_stories.j2 +6 -6
- contentctl/output/templates/app.conf.j2 +2 -2
- contentctl/output/templates/app.manifest.j2 +2 -2
- contentctl/output/templates/detection_coverage.j2 +6 -8
- contentctl/output/templates/doc_detection_page.j2 +2 -2
- contentctl/output/templates/doc_detections.j2 +2 -2
- contentctl/output/templates/doc_stories.j2 +1 -1
- contentctl/output/templates/es_investigations_investigations.j2 +1 -1
- contentctl/output/templates/es_investigations_stories.j2 +1 -1
- contentctl/output/templates/header.j2 +2 -1
- contentctl/output/templates/macros.j2 +6 -10
- contentctl/output/templates/savedsearches_baselines.j2 +5 -5
- contentctl/output/templates/savedsearches_detections.j2 +36 -33
- contentctl/output/templates/savedsearches_investigations.j2 +4 -4
- contentctl/output/templates/transforms.j2 +4 -4
- contentctl/output/yml_writer.py +2 -2
- contentctl/templates/app_template/README.md +7 -0
- contentctl/{output/templates/splunk_app → templates/app_template}/default/data/ui/nav/default.xml +1 -0
- contentctl/templates/app_template/lookups/mitre_enrichment.csv +638 -0
- contentctl/templates/deployments/{00_default_anomaly.yml → escu_default_configuration_anomaly.yml} +1 -2
- contentctl/templates/deployments/{00_default_baseline.yml → escu_default_configuration_baseline.yml} +1 -2
- contentctl/templates/deployments/{00_default_correlation.yml → escu_default_configuration_correlation.yml} +2 -2
- contentctl/templates/deployments/{00_default_hunting.yml → escu_default_configuration_hunting.yml} +2 -2
- contentctl/templates/deployments/{00_default_ttp.yml → escu_default_configuration_ttp.yml} +1 -2
- contentctl/templates/detections/anomalous_usage_of_7zip.yml +0 -1
- contentctl/templates/stories/cobalt_strike.yml +0 -1
- {contentctl-3.6.0.dist-info → contentctl-4.0.2.dist-info}/METADATA +36 -15
- contentctl-4.0.2.dist-info/RECORD +168 -0
- contentctl/actions/detection_testing/DataManipulation.py +0 -149
- contentctl/actions/generate.py +0 -91
- contentctl/helper/config_handler.py +0 -75
- contentctl/input/baseline_builder.py +0 -66
- contentctl/input/basic_builder.py +0 -58
- contentctl/input/detection_builder.py +0 -370
- contentctl/input/investigation_builder.py +0 -42
- contentctl/input/new_content_generator.py +0 -95
- contentctl/input/playbook_builder.py +0 -68
- contentctl/input/story_builder.py +0 -106
- contentctl/objects/app.py +0 -214
- contentctl/objects/repo_config.py +0 -163
- contentctl/objects/test_config.py +0 -630
- contentctl/output/templates/macros_detections.j2 +0 -7
- contentctl/output/templates/splunk_app/README.md +0 -7
- contentctl-3.6.0.dist-info/RECORD +0 -176
- /contentctl/{output/templates/splunk_app → templates/app_template}/README/essoc_story_detail.txt +0 -0
- /contentctl/{output/templates/splunk_app → templates/app_template}/README/essoc_summary.txt +0 -0
- /contentctl/{output/templates/splunk_app → templates/app_template}/README/essoc_usage_dashboard.txt +0 -0
- /contentctl/{output/templates/splunk_app → templates/app_template}/default/analytic_stories.conf +0 -0
- /contentctl/{output/templates/splunk_app → templates/app_template}/default/app.conf +0 -0
- /contentctl/{output/templates/splunk_app → templates/app_template}/default/commands.conf +0 -0
- /contentctl/{output/templates/splunk_app → templates/app_template}/default/content-version.conf +0 -0
- /contentctl/{output/templates/splunk_app → templates/app_template}/default/data/ui/views/escu_summary.xml +0 -0
- /contentctl/{output/templates/splunk_app → templates/app_template}/default/data/ui/views/feedback.xml +0 -0
- /contentctl/{output/templates/splunk_app → templates/app_template}/default/distsearch.conf +0 -0
- /contentctl/{output/templates/splunk_app → templates/app_template}/default/usage_searches.conf +0 -0
- /contentctl/{output/templates/splunk_app → templates/app_template}/default/use_case_library.conf +0 -0
- /contentctl/{output/templates/splunk_app → templates/app_template}/metadata/default.meta +0 -0
- /contentctl/{output/templates/splunk_app → templates/app_template}/static/appIcon.png +0 -0
- /contentctl/{output/templates/splunk_app → templates/app_template}/static/appIconAlt.png +0 -0
- /contentctl/{output/templates/splunk_app → templates/app_template}/static/appIconAlt_2x.png +0 -0
- /contentctl/{output/templates/splunk_app → templates/app_template}/static/appIcon_2x.png +0 -0
- {contentctl-3.6.0.dist-info → contentctl-4.0.2.dist-info}/LICENSE.md +0 -0
- {contentctl-3.6.0.dist-info → contentctl-4.0.2.dist-info}/WHEEL +0 -0
- {contentctl-3.6.0.dist-info → contentctl-4.0.2.dist-info}/entry_points.txt +0 -0
|
@@ -1,72 +1,62 @@
|
|
|
1
1
|
from __future__ import annotations
|
|
2
|
-
|
|
2
|
+
from typing import TYPE_CHECKING,Union, Optional, List, Any, Annotated
|
|
3
|
+
import os.path
|
|
3
4
|
import re
|
|
4
5
|
import pathlib
|
|
5
|
-
from pydantic import
|
|
6
|
-
from typing import Union
|
|
6
|
+
from pydantic import BaseModel, field_validator, model_validator, ValidationInfo, Field, computed_field, model_serializer,ConfigDict, FilePath
|
|
7
7
|
|
|
8
|
+
from contentctl.objects.macro import Macro
|
|
9
|
+
from contentctl.objects.lookup import Lookup
|
|
10
|
+
if TYPE_CHECKING:
|
|
11
|
+
from contentctl.input.director import DirectorOutputDto
|
|
12
|
+
from contentctl.objects.baseline import Baseline
|
|
13
|
+
|
|
8
14
|
from contentctl.objects.security_content_object import SecurityContentObject
|
|
9
15
|
from contentctl.objects.enums import AnalyticsType
|
|
10
16
|
from contentctl.objects.enums import DataModel
|
|
11
17
|
from contentctl.objects.enums import DetectionStatus
|
|
18
|
+
from contentctl.objects.enums import NistCategory
|
|
19
|
+
|
|
12
20
|
from contentctl.objects.detection_tags import DetectionTags
|
|
13
|
-
from contentctl.objects.
|
|
21
|
+
from contentctl.objects.deployment import Deployment
|
|
14
22
|
from contentctl.objects.unit_test import UnitTest
|
|
15
|
-
from contentctl.objects.integration_test import IntegrationTest
|
|
16
|
-
from contentctl.objects.macro import Macro
|
|
17
|
-
from contentctl.objects.lookup import Lookup
|
|
18
|
-
from contentctl.objects.baseline import Baseline
|
|
19
|
-
from contentctl.objects.playbook import Playbook
|
|
20
|
-
from contentctl.helper.link_validator import LinkValidator
|
|
21
|
-
from contentctl.objects.enums import SecurityContentType
|
|
22
23
|
from contentctl.objects.test_group import TestGroup
|
|
24
|
+
from contentctl.objects.integration_test import IntegrationTest
|
|
23
25
|
|
|
24
|
-
class Detection_Abstract(SecurityContentObject):
|
|
25
|
-
# contentType: SecurityContentType = SecurityContentType.detections
|
|
26
|
-
# NOTE: because `use_enum_values` is configured, this will actually be type str
|
|
27
|
-
type: AnalyticsType = ...
|
|
28
|
-
file_path: str = None
|
|
29
|
-
# status field is REQUIRED (the way to denote this with pydantic is ...)
|
|
30
|
-
status: DetectionStatus = ...
|
|
31
|
-
data_source: list[str]
|
|
32
|
-
tags: DetectionTags
|
|
33
|
-
search: Union[str, dict]
|
|
34
|
-
how_to_implement: str
|
|
35
|
-
known_false_positives: str
|
|
36
|
-
check_references: bool = False
|
|
37
|
-
references: list
|
|
38
|
-
|
|
39
|
-
tests: list[Union[UnitTest, IntegrationTest]] = []
|
|
40
|
-
|
|
41
|
-
# enrichments
|
|
42
|
-
datamodel: list = None
|
|
43
|
-
deployment: ConfigDetectionConfiguration = None
|
|
44
|
-
annotations: dict = None
|
|
45
|
-
risk: list = None
|
|
46
|
-
playbooks: list[Playbook] = []
|
|
47
|
-
baselines: list[Baseline] = []
|
|
48
|
-
mappings: dict = None
|
|
49
|
-
macros: list[Macro] = []
|
|
50
|
-
lookups: list[Lookup] = []
|
|
51
|
-
cve_enrichment: list = None
|
|
52
|
-
splunk_app_enrichment: list = None
|
|
53
|
-
|
|
54
|
-
source: str = None
|
|
55
|
-
nes_fields: str = None
|
|
56
|
-
providing_technologies: list = None
|
|
57
|
-
runtime: str = None
|
|
58
|
-
enabled_by_default: bool = False
|
|
59
26
|
|
|
27
|
+
#from contentctl.objects.playbook import Playbook
|
|
28
|
+
from contentctl.objects.enums import DataSource,ProvidingTechnology
|
|
29
|
+
from contentctl.enrichments.cve_enrichment import CveEnrichment, CveEnrichmentObj
|
|
60
30
|
|
|
61
|
-
class Config:
|
|
62
|
-
use_enum_values = True
|
|
63
31
|
|
|
32
|
+
class Detection_Abstract(SecurityContentObject):
|
|
33
|
+
model_config = ConfigDict(use_enum_values=True)
|
|
34
|
+
|
|
35
|
+
#contentType: SecurityContentType = SecurityContentType.detections
|
|
36
|
+
type: AnalyticsType = Field(...)
|
|
37
|
+
status: DetectionStatus = Field(...)
|
|
38
|
+
data_source: Optional[List[str]] = None
|
|
39
|
+
tags: DetectionTags = Field(...)
|
|
40
|
+
search: Union[str, dict[str,Any]] = Field(...)
|
|
41
|
+
how_to_implement: str = Field(..., min_length=4)
|
|
42
|
+
known_false_positives: str = Field(..., min_length=4)
|
|
43
|
+
check_references: bool = False
|
|
44
|
+
#data_source: Optional[List[DataSource]] = None
|
|
64
45
|
|
|
46
|
+
enabled_by_default: bool = False
|
|
47
|
+
file_path: FilePath = Field(...)
|
|
48
|
+
# For model construction to first attempt construction of the leftmost object.
|
|
49
|
+
# From a file, this should be UnitTest. Note this is different than the
|
|
50
|
+
# default mode, 'smart'
|
|
51
|
+
# https://docs.pydantic.dev/latest/concepts/unions/#left-to-right-mode
|
|
52
|
+
# https://github.com/pydantic/pydantic/issues/9101#issuecomment-2019032541
|
|
53
|
+
tests: List[Annotated[Union[UnitTest, IntegrationTest], Field(union_mode='left_to_right')]] = []
|
|
65
54
|
# A list of groups of tests, relying on the same data
|
|
66
|
-
test_groups: Union[list[TestGroup], None] = None
|
|
55
|
+
test_groups: Union[list[TestGroup], None] = Field(None,validate_default=True)
|
|
67
56
|
|
|
68
|
-
@
|
|
69
|
-
|
|
57
|
+
@field_validator("test_groups")
|
|
58
|
+
@classmethod
|
|
59
|
+
def validate_test_groups(cls, value:Union[None, List[TestGroup]], info:ValidationInfo) -> Union[List[TestGroup], None]:
|
|
70
60
|
"""
|
|
71
61
|
Validates the `test_groups` field and constructs the model from the list of unit tests
|
|
72
62
|
if no explicit construct was provided
|
|
@@ -79,24 +69,357 @@ class Detection_Abstract(SecurityContentObject):
|
|
|
79
69
|
|
|
80
70
|
# iterate over the unit tests and create a TestGroup (and as a result, an IntegrationTest) for each
|
|
81
71
|
test_groups: list[TestGroup] = []
|
|
82
|
-
for unit_test in
|
|
83
|
-
test_group = TestGroup.derive_from_unit_test(unit_test,
|
|
72
|
+
for unit_test in info.data.get("tests"):
|
|
73
|
+
test_group = TestGroup.derive_from_unit_test(unit_test, info.data.get("name"))
|
|
84
74
|
test_groups.append(test_group)
|
|
85
75
|
|
|
86
76
|
# now add each integration test to the list of tests
|
|
87
77
|
for test_group in test_groups:
|
|
88
|
-
|
|
78
|
+
info.data.get("tests").append(test_group.integration_test)
|
|
89
79
|
return test_groups
|
|
90
80
|
|
|
91
81
|
|
|
92
|
-
|
|
93
|
-
|
|
82
|
+
@computed_field
|
|
83
|
+
@property
|
|
84
|
+
def datamodel(self)->List[DataModel]:
|
|
85
|
+
if isinstance(self.search, str):
|
|
86
|
+
return [dm for dm in DataModel if dm.value in self.search]
|
|
87
|
+
else:
|
|
88
|
+
return []
|
|
89
|
+
|
|
90
|
+
@computed_field
|
|
91
|
+
@property
|
|
92
|
+
def source(self)->str:
|
|
93
|
+
if self.file_path is not None:
|
|
94
|
+
return self.file_path.absolute().parent.name
|
|
95
|
+
else:
|
|
96
|
+
raise ValueError(f"Cannot get 'source' for detection {self.name} - 'file_path' was None.")
|
|
97
|
+
|
|
98
|
+
deployment: Deployment = Field({})
|
|
99
|
+
|
|
100
|
+
@computed_field
|
|
101
|
+
@property
|
|
102
|
+
def annotations(self)->dict[str,Union[List[str],int,str]]:
|
|
103
|
+
|
|
104
|
+
annotations_dict:dict[str, Union[List[str], int]] = {}
|
|
105
|
+
annotations_dict["analytic_story"]=[story.name for story in self.tags.analytic_story]
|
|
106
|
+
annotations_dict["confidence"] = self.tags.confidence
|
|
107
|
+
if len(self.tags.cve or []) > 0:
|
|
108
|
+
annotations_dict["cve"] = self.tags.cve
|
|
109
|
+
annotations_dict["impact"] = self.tags.impact
|
|
110
|
+
annotations_dict["type"] = self.type
|
|
111
|
+
#annotations_dict["version"] = self.version
|
|
112
|
+
|
|
113
|
+
#The annotations object is a superset of the mappings object.
|
|
114
|
+
# So start with the mapping object.
|
|
115
|
+
annotations_dict.update(self.mappings)
|
|
116
|
+
|
|
117
|
+
#Make sure that the results are sorted for readability/easier diffs
|
|
118
|
+
return dict(sorted(annotations_dict.items(), key=lambda item: item[0]))
|
|
119
|
+
|
|
120
|
+
#playbooks: list[Playbook] = []
|
|
121
|
+
|
|
122
|
+
baselines: list[Baseline] = Field([],validate_default=True)
|
|
123
|
+
|
|
124
|
+
@computed_field
|
|
125
|
+
@property
|
|
126
|
+
def mappings(self)->dict[str, List[str]]:
|
|
127
|
+
mappings:dict[str,Any] = {}
|
|
128
|
+
if len(self.tags.cis20) > 0:
|
|
129
|
+
mappings["cis20"] = [tag.value for tag in self.tags.cis20]
|
|
130
|
+
if len(self.tags.kill_chain_phases) > 0:
|
|
131
|
+
mappings['kill_chain_phases'] = [phase.value for phase in self.tags.kill_chain_phases]
|
|
132
|
+
if len(self.tags.mitre_attack_id) > 0:
|
|
133
|
+
mappings['mitre_attack'] = self.tags.mitre_attack_id
|
|
134
|
+
if len(self.tags.nist) > 0:
|
|
135
|
+
mappings['nist'] = [category.value for category in self.tags.nist]
|
|
136
|
+
|
|
137
|
+
|
|
138
|
+
# No need to sort the dict! It has been constructed in-order.
|
|
139
|
+
# However, if this logic is changed, then consider reordering or
|
|
140
|
+
# adding the sort back!
|
|
141
|
+
#return dict(sorted(mappings.items(), key=lambda item: item[0]))
|
|
142
|
+
return mappings
|
|
143
|
+
|
|
144
|
+
macros: list[Macro] = Field([],validate_default=True)
|
|
145
|
+
lookups: list[Lookup] = Field([],validate_default=True)
|
|
146
|
+
|
|
147
|
+
@computed_field
|
|
148
|
+
@property
|
|
149
|
+
def cve_enrichment(self)->List[CveEnrichmentObj]:
|
|
150
|
+
raise Exception("CVE Enrichment Functionality not currently supported. It will be re-added at a later time.")
|
|
151
|
+
enriched_cves = []
|
|
152
|
+
for cve_id in self.tags.cve:
|
|
153
|
+
print(f"\nEnriching {cve_id}\n")
|
|
154
|
+
enriched_cves.append(CveEnrichment.enrich_cve(cve_id))
|
|
155
|
+
|
|
156
|
+
return enriched_cves
|
|
157
|
+
|
|
158
|
+
splunk_app_enrichment: Optional[List[dict]] = None
|
|
159
|
+
|
|
160
|
+
@computed_field
|
|
161
|
+
@property
|
|
162
|
+
def nes_fields(self)->Optional[str]:
|
|
163
|
+
if self.deployment.alert_action.notable is not None:
|
|
164
|
+
return ','.join(self.deployment.alert_action.notable.nes_fields)
|
|
165
|
+
else:
|
|
166
|
+
return None
|
|
167
|
+
|
|
168
|
+
@computed_field
|
|
169
|
+
@property
|
|
170
|
+
def providing_technologies(self)->List[ProvidingTechnology]:
|
|
171
|
+
if isinstance(self.search, str):
|
|
172
|
+
return ProvidingTechnology.getProvidingTechFromSearch(self.search)
|
|
173
|
+
else:
|
|
174
|
+
#Dict-formatted searches (sigma) will not have providing technologies
|
|
175
|
+
return []
|
|
176
|
+
|
|
177
|
+
@computed_field
|
|
178
|
+
@property
|
|
179
|
+
def risk(self)->list[dict[str,Any]]:
|
|
180
|
+
risk_objects = []
|
|
181
|
+
risk_object_user_types = {'user', 'username', 'email address'}
|
|
182
|
+
risk_object_system_types = {'device', 'endpoint', 'hostname', 'ip address'}
|
|
183
|
+
process_threat_object_types = {'process name','process'}
|
|
184
|
+
file_threat_object_types = {'file name','file', 'file hash'}
|
|
185
|
+
url_threat_object_types = {'url string','url'}
|
|
186
|
+
ip_threat_object_types = {'ip address'}
|
|
187
|
+
|
|
188
|
+
|
|
189
|
+
for entity in self.tags.observable:
|
|
190
|
+
|
|
191
|
+
risk_object = dict()
|
|
192
|
+
if 'Victim' in entity.role and entity.type.lower() in risk_object_user_types:
|
|
193
|
+
risk_object['risk_object_type'] = 'user'
|
|
194
|
+
risk_object['risk_object_field'] = entity.name
|
|
195
|
+
risk_object['risk_score'] = self.tags.risk_score
|
|
196
|
+
risk_objects.append(risk_object)
|
|
197
|
+
|
|
198
|
+
elif 'Victim' in entity.role and entity.type.lower() in risk_object_system_types:
|
|
199
|
+
risk_object['risk_object_type'] = 'system'
|
|
200
|
+
risk_object['risk_object_field'] = entity.name
|
|
201
|
+
risk_object['risk_score'] = self.tags.risk_score
|
|
202
|
+
risk_objects.append(risk_object)
|
|
203
|
+
|
|
204
|
+
elif 'Attacker' in entity.role and entity.type.lower() in process_threat_object_types:
|
|
205
|
+
risk_object['threat_object_field'] = entity.name
|
|
206
|
+
risk_object['threat_object_type'] = "process"
|
|
207
|
+
risk_objects.append(risk_object)
|
|
208
|
+
|
|
209
|
+
elif 'Attacker' in entity.role and entity.type.lower() in file_threat_object_types:
|
|
210
|
+
risk_object['threat_object_field'] = entity.name
|
|
211
|
+
risk_object['threat_object_type'] = "file_name"
|
|
212
|
+
risk_objects.append(risk_object)
|
|
213
|
+
|
|
214
|
+
elif 'Attacker' in entity.role and entity.type.lower() in ip_threat_object_types:
|
|
215
|
+
risk_object['threat_object_field'] = entity.name
|
|
216
|
+
risk_object['threat_object_type'] = "ip_address"
|
|
217
|
+
risk_objects.append(risk_object)
|
|
218
|
+
|
|
219
|
+
elif 'Attacker' in entity.role and entity.type.lower() in url_threat_object_types:
|
|
220
|
+
risk_object['threat_object_field'] = entity.name
|
|
221
|
+
risk_object['threat_object_type'] = "url"
|
|
222
|
+
risk_objects.append(risk_object)
|
|
223
|
+
|
|
224
|
+
else:
|
|
225
|
+
risk_object['risk_object_type'] = 'other'
|
|
226
|
+
risk_object['risk_object_field'] = entity.name
|
|
227
|
+
risk_object['risk_score'] = self.tags.risk_score
|
|
228
|
+
risk_objects.append(risk_object)
|
|
229
|
+
continue
|
|
230
|
+
|
|
231
|
+
|
|
232
|
+
return risk_objects
|
|
233
|
+
|
|
234
|
+
|
|
235
|
+
|
|
236
|
+
@computed_field
|
|
237
|
+
@property
|
|
238
|
+
def metadata(self)->dict[str,str]:
|
|
239
|
+
return {'detection_id':str(self.id),
|
|
240
|
+
'deprecated':'1' if self.status==DetectionStatus.deprecated.value else '0',
|
|
241
|
+
'detection_version':str(self.version)}
|
|
242
|
+
|
|
243
|
+
@model_serializer
|
|
244
|
+
def serialize_model(self):
|
|
245
|
+
#Call serializer for parent
|
|
246
|
+
super_fields = super().serialize_model()
|
|
247
|
+
|
|
248
|
+
#All fields custom to this model
|
|
249
|
+
model= {
|
|
250
|
+
"tags": self.tags.model_dump(),
|
|
251
|
+
"type": self.type,
|
|
252
|
+
"search": self.search,
|
|
253
|
+
"how_to_implement":self.how_to_implement,
|
|
254
|
+
"known_false_positives":self.known_false_positives,
|
|
255
|
+
"datamodel": self.datamodel,
|
|
256
|
+
"source": self.source,
|
|
257
|
+
"nes_fields": self.nes_fields,
|
|
258
|
+
}
|
|
259
|
+
#Only a subset of macro fields are required:
|
|
260
|
+
all_macros = []
|
|
261
|
+
for macro in self.macros:
|
|
262
|
+
macro_dump:dict = {
|
|
263
|
+
"name": macro.name,
|
|
264
|
+
"definition": macro.definition,
|
|
265
|
+
"description": macro.description
|
|
266
|
+
}
|
|
267
|
+
if len(macro.arguments) > 0:
|
|
268
|
+
macro_dump['arguments'] = macro.arguments
|
|
269
|
+
|
|
270
|
+
all_macros.append(macro_dump)
|
|
271
|
+
model['macros'] = all_macros
|
|
272
|
+
|
|
273
|
+
|
|
274
|
+
all_lookups = []
|
|
275
|
+
for lookup in self.lookups:
|
|
276
|
+
if lookup.collection is not None:
|
|
277
|
+
all_lookups.append({
|
|
278
|
+
"name":lookup.name,
|
|
279
|
+
"description":lookup.description,
|
|
280
|
+
"collection":lookup.collection,
|
|
281
|
+
"case_sensitive_match": None,
|
|
282
|
+
"fields_list":lookup.fields_list})
|
|
283
|
+
elif lookup.filename is not None:
|
|
284
|
+
all_lookups.append({
|
|
285
|
+
"name":lookup.name,
|
|
286
|
+
"description":lookup.description,
|
|
287
|
+
"filename": lookup.filename.name,
|
|
288
|
+
"default_match":"true" if lookup.default_match else "false",
|
|
289
|
+
"case_sensitive_match": "true" if lookup.case_sensitive_match else "false",
|
|
290
|
+
"match_type":lookup.match_type,
|
|
291
|
+
"min_matches":lookup.min_matches,
|
|
292
|
+
"fields_list":lookup.fields_list})
|
|
293
|
+
model['lookups'] = all_lookups
|
|
294
|
+
|
|
295
|
+
|
|
296
|
+
#Combine fields from this model with fields from parent
|
|
297
|
+
super_fields.update(model)
|
|
298
|
+
|
|
299
|
+
#return the model
|
|
300
|
+
return super_fields
|
|
301
|
+
|
|
302
|
+
|
|
303
|
+
def model_post_init(self, ctx:dict[str,Any]):
|
|
304
|
+
# director: Optional[DirectorOutputDto] = ctx.get("output_dto",None)
|
|
305
|
+
# if not isinstance(director,DirectorOutputDto):
|
|
306
|
+
# raise ValueError("DirectorOutputDto was not passed in context of Detection model_post_init")
|
|
307
|
+
director: Optional[DirectorOutputDto] = ctx.get("output_dto",None)
|
|
308
|
+
for story in self.tags.analytic_story:
|
|
309
|
+
story.detections.append(self)
|
|
310
|
+
|
|
311
|
+
#Ensure that all baselines link to this detection
|
|
312
|
+
for baseline in self.baselines:
|
|
313
|
+
new_detections = []
|
|
314
|
+
replaced = False
|
|
315
|
+
for d in baseline.tags.detections:
|
|
316
|
+
if isinstance(d,str) and self.name==d:
|
|
317
|
+
new_detections.append(self)
|
|
318
|
+
replaced = True
|
|
319
|
+
else:
|
|
320
|
+
new_detections.append(d)
|
|
321
|
+
if replaced is False:
|
|
322
|
+
raise ValueError(f"Error, failed to replace detection reference in Baseline '{baseline.name}' to detection '{self.name}'")
|
|
323
|
+
baseline.tags.detections = new_detections
|
|
324
|
+
|
|
325
|
+
return self
|
|
326
|
+
|
|
327
|
+
|
|
328
|
+
|
|
329
|
+
|
|
330
|
+
@field_validator('lookups',mode="before")
|
|
331
|
+
@classmethod
|
|
332
|
+
def getDetectionLookups(cls, v:list[str], info:ValidationInfo)->list[Lookup]:
|
|
333
|
+
director:DirectorOutputDto = info.context.get("output_dto",None)
|
|
334
|
+
|
|
335
|
+
search:Union[str,dict] = info.data.get("search",None)
|
|
336
|
+
if not isinstance(search,str):
|
|
337
|
+
#The search was sigma formatted (or failed other validation and was None), so we will not validate macros in it
|
|
338
|
+
return []
|
|
339
|
+
|
|
340
|
+
lookups= Lookup.get_lookups(search, director)
|
|
341
|
+
return lookups
|
|
342
|
+
|
|
343
|
+
@field_validator('baselines',mode="before")
|
|
344
|
+
@classmethod
|
|
345
|
+
def mapDetectionNamesToBaselineObjects(cls, v:list[str], info:ValidationInfo)->List[Baseline]:
|
|
346
|
+
if len(v) > 0:
|
|
347
|
+
raise ValueError("Error, baselines are constructed automatically at runtime. Please do not include this field.")
|
|
348
|
+
|
|
349
|
+
|
|
350
|
+
name:Union[str,dict] = info.data.get("name",None)
|
|
351
|
+
if name is None:
|
|
352
|
+
raise ValueError("Error, cannot get Baselines because the Detection does not have a 'name' defined.")
|
|
353
|
+
|
|
354
|
+
director:DirectorOutputDto = info.context.get("output_dto",None)
|
|
355
|
+
baselines:List[Baseline] = []
|
|
356
|
+
for baseline in director.baselines:
|
|
357
|
+
if name in baseline.tags.detections:
|
|
358
|
+
baselines.append(baseline)
|
|
359
|
+
|
|
360
|
+
return baselines
|
|
361
|
+
|
|
362
|
+
@field_validator('macros',mode="before")
|
|
363
|
+
@classmethod
|
|
364
|
+
def getDetectionMacros(cls, v:list[str], info:ValidationInfo)->list[Macro]:
|
|
365
|
+
director:DirectorOutputDto = info.context.get("output_dto",None)
|
|
366
|
+
|
|
367
|
+
search:Union[str,dict] = info.data.get("search",None)
|
|
368
|
+
if not isinstance(search,str):
|
|
369
|
+
#The search was sigma formatted (or failed other validation and was None), so we will not validate macros in it
|
|
370
|
+
return []
|
|
371
|
+
|
|
372
|
+
search_name:Union[str,Any] = info.data.get("name",None)
|
|
373
|
+
assert isinstance(search_name,str), f"Expected 'search_name' to be a string, instead it was [{type(search_name)}]"
|
|
374
|
+
|
|
375
|
+
|
|
376
|
+
|
|
377
|
+
filter_macro_name = search_name.replace(' ', '_').replace('-', '_').replace('.', '_').replace('/', '_').lower() + '_filter'
|
|
378
|
+
try:
|
|
379
|
+
filter_macro = Macro.mapNamesToSecurityContentObjects([filter_macro_name], director)[0]
|
|
380
|
+
except:
|
|
381
|
+
# Filter macro did not exist, so create one at runtime
|
|
382
|
+
filter_macro = Macro.model_validate({"name":filter_macro_name,
|
|
383
|
+
"definition":'search *',
|
|
384
|
+
"description":'Update this macro to limit the output results to filter out false positives.'})
|
|
385
|
+
director.macros.append(filter_macro)
|
|
386
|
+
|
|
387
|
+
macros_from_search = Macro.get_macros(search, director)
|
|
388
|
+
|
|
389
|
+
return macros_from_search + [filter_macro]
|
|
390
|
+
|
|
391
|
+
def get_content_dependencies(self)->list[SecurityContentObject]:
|
|
392
|
+
#Do this separately to satisfy type checker
|
|
393
|
+
objects: list[SecurityContentObject] = []
|
|
394
|
+
objects += self.macros
|
|
395
|
+
objects += self.lookups
|
|
396
|
+
return objects
|
|
397
|
+
|
|
398
|
+
|
|
399
|
+
@field_validator("deployment", mode="before")
|
|
400
|
+
def getDeployment(cls, v:Any, info:ValidationInfo)->Deployment:
|
|
401
|
+
return Deployment.getDeployment(v,info)
|
|
402
|
+
return SecurityContentObject.getDeploymentFromType(info.data.get("type",None), info)
|
|
403
|
+
# director: Optional[DirectorOutputDto] = info.context.get("output_dto",None)
|
|
404
|
+
# if not director:
|
|
405
|
+
# raise ValueError("Cannot set deployment - DirectorOutputDto not passed to Detection Constructor in context")
|
|
406
|
+
|
|
407
|
+
|
|
408
|
+
# typeField = info.data.get("type",None)
|
|
409
|
+
|
|
410
|
+
# deps = [deployment for deployment in director.deployments if deployment.type == typeField]
|
|
411
|
+
# if len(deps) == 1:
|
|
412
|
+
# return deps[0]
|
|
413
|
+
# elif len(deps) == 0:
|
|
414
|
+
# raise ValueError(f"Failed to find Deployment for type '{typeField}' "\
|
|
415
|
+
# f"from possible {[deployment.type for deployment in director.deployments]}")
|
|
416
|
+
# else:
|
|
417
|
+
# raise ValueError(f"Found more than 1 ({len(deps)}) Deployment for type '{typeField}' "\
|
|
418
|
+
# f"from possible {[deployment.type for deployment in director.deployments]}")
|
|
419
|
+
|
|
94
420
|
|
|
95
421
|
@staticmethod
|
|
96
|
-
def get_detections_from_filenames(
|
|
97
|
-
detection_filenames: set[str],
|
|
98
|
-
all_detections: list[Detection_Abstract]
|
|
99
|
-
) -> list[Detection_Abstract]:
|
|
422
|
+
def get_detections_from_filenames(detection_filenames:set[str], all_detections:list[Detection_Abstract])->list[Detection_Abstract]:
|
|
100
423
|
detection_filenames = set(str(pathlib.Path(filename).absolute()) for filename in detection_filenames)
|
|
101
424
|
detection_dict = SecurityContentObject.create_filename_to_content_dict(all_detections)
|
|
102
425
|
|
|
@@ -104,6 +427,7 @@ class Detection_Abstract(SecurityContentObject):
|
|
|
104
427
|
return [detection_dict[detection_filename] for detection_filename in detection_filenames]
|
|
105
428
|
except Exception as e:
|
|
106
429
|
raise Exception(f"Failed to find detection object for modified detection: {str(e)}")
|
|
430
|
+
|
|
107
431
|
|
|
108
432
|
# @validator("type")
|
|
109
433
|
# def type_valid(cls, v, values):
|
|
@@ -111,25 +435,9 @@ class Detection_Abstract(SecurityContentObject):
|
|
|
111
435
|
# raise ValueError("not valid analytics type: " + values["name"])
|
|
112
436
|
# return v
|
|
113
437
|
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
if isinstance(v, dict) and field.name == "search":
|
|
118
|
-
# This is a special case of the search field. It can be a dict, containing
|
|
119
|
-
# a sigma search, if we are running the converter. So we will not
|
|
120
|
-
# validate the field further. Additional validation will be done
|
|
121
|
-
# during conversion phase later on
|
|
122
|
-
return v
|
|
123
|
-
else:
|
|
124
|
-
# No other fields should contain a non-str type:
|
|
125
|
-
raise ValueError(
|
|
126
|
-
f"Error validating field '{field.name}'. Field MUST be be a string, not type '{type(v)}' "
|
|
127
|
-
)
|
|
128
|
-
|
|
129
|
-
return SecurityContentObject.free_text_field_valid(cls, v, values, field)
|
|
130
|
-
|
|
131
|
-
@validator('enabled_by_default')
|
|
132
|
-
def only_enabled_if_production_status(cls,v,values):
|
|
438
|
+
|
|
439
|
+
@field_validator("enabled_by_default",mode="before")
|
|
440
|
+
def only_enabled_if_production_status(cls,v:Any,info:ValidationInfo)->bool:
|
|
133
441
|
'''
|
|
134
442
|
A detection can ONLY be enabled by default if it is a PRODUCTION detection.
|
|
135
443
|
If not (for example, it is EXPERIMENTAL or DEPRECATED) then we will throw an exception.
|
|
@@ -138,9 +446,9 @@ class Detection_Abstract(SecurityContentObject):
|
|
|
138
446
|
'''
|
|
139
447
|
if v == False:
|
|
140
448
|
return v
|
|
141
|
-
|
|
142
|
-
status = DetectionStatus(
|
|
143
|
-
searchType = AnalyticsType(
|
|
449
|
+
|
|
450
|
+
status = DetectionStatus(info.data.get("status"))
|
|
451
|
+
searchType = AnalyticsType(info.data.get("type"))
|
|
144
452
|
errors = []
|
|
145
453
|
if status != DetectionStatus.production:
|
|
146
454
|
errors.append(f"status is '{status.name}'. Detections that are enabled by default MUST be '{DetectionStatus.production.value}'")
|
|
@@ -152,79 +460,109 @@ class Detection_Abstract(SecurityContentObject):
|
|
|
152
460
|
raise ValueError(f"Detection is 'enabled_by_default: true' however \n - {error_message}")
|
|
153
461
|
|
|
154
462
|
return v
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
@validator("status")
|
|
158
|
-
def validation_for_ba_only(cls, v, values):
|
|
159
|
-
# Ensure that only a BA detection can have status: validation
|
|
160
|
-
p = pathlib.Path(values['file_path'])
|
|
161
|
-
if v == DetectionStatus.validation.value:
|
|
162
|
-
if p.name.startswith("ssa___"):
|
|
163
|
-
pass
|
|
164
|
-
else:
|
|
165
|
-
raise ValueError(
|
|
166
|
-
f"The following is NOT an ssa_ detection, but has 'status: {v}' which may ONLY be used for "
|
|
167
|
-
f"ssa_ detections: {values['file_path']}"
|
|
168
|
-
)
|
|
169
|
-
|
|
170
|
-
return v
|
|
463
|
+
|
|
171
464
|
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
# def references_check(cls, v, values):
|
|
185
|
-
# return LinkValidator.check_references(v, values["name"])
|
|
186
|
-
# return v
|
|
465
|
+
@model_validator(mode="after")
|
|
466
|
+
def addTags_nist(self):
|
|
467
|
+
if self.type == AnalyticsType.TTP.value:
|
|
468
|
+
self.tags.nist = [NistCategory.DE_CM]
|
|
469
|
+
else:
|
|
470
|
+
self.tags.nist = [NistCategory.DE_AE]
|
|
471
|
+
return self
|
|
472
|
+
|
|
473
|
+
@model_validator(mode="after")
|
|
474
|
+
def ensureProperObservablesExist(self):
|
|
475
|
+
"""
|
|
476
|
+
If a detections is PRODUCTION and either TTP or ANOMALY, then it MUST have an Observable with the VICTIM role.
|
|
187
477
|
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
478
|
+
Returns:
|
|
479
|
+
self: Returns itself if the valdiation passes
|
|
480
|
+
"""
|
|
481
|
+
if self.status not in [DetectionStatus.production.value]:
|
|
482
|
+
# Only perform this validation on production detections
|
|
483
|
+
return self
|
|
194
484
|
|
|
195
|
-
|
|
485
|
+
if self.type not in [AnalyticsType.TTP.value, AnalyticsType.Anomaly.value]:
|
|
486
|
+
# Only perform this validation on TTP and Anomaly detections
|
|
487
|
+
return self
|
|
488
|
+
|
|
489
|
+
#Detection is required to have a victim
|
|
490
|
+
roles = []
|
|
491
|
+
for observable in self.tags.observable:
|
|
492
|
+
roles.extend(observable.role)
|
|
493
|
+
|
|
494
|
+
if roles.count("Victim") == 0:
|
|
495
|
+
raise ValueError(f"Error, there must be AT LEAST 1 Observable with the role 'Victim' declared in Detection.tags.observables. However, none were found.")
|
|
496
|
+
|
|
497
|
+
# Exactly one victim was found
|
|
498
|
+
return self
|
|
499
|
+
|
|
196
500
|
|
|
197
|
-
|
|
501
|
+
@model_validator(mode="after")
|
|
502
|
+
def search_observables_exist_validate(self):
|
|
503
|
+
|
|
504
|
+
if isinstance(self.search, str):
|
|
505
|
+
|
|
506
|
+
observable_fields = [ob.name.lower() for ob in self.tags.observable]
|
|
507
|
+
|
|
508
|
+
#All $field$ fields from the message must appear in the search
|
|
198
509
|
field_match_regex = r"\$([^\s.]*)\$"
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
510
|
+
|
|
511
|
+
|
|
512
|
+
if self.tags.message:
|
|
513
|
+
message_fields = [match.replace("$", "").lower() for match in re.findall(field_match_regex, self.tags.message.lower())]
|
|
514
|
+
missing_fields = set([field for field in observable_fields if field not in self.search.lower()])
|
|
515
|
+
else:
|
|
516
|
+
message_fields = []
|
|
517
|
+
missing_fields = set()
|
|
518
|
+
|
|
204
519
|
|
|
205
520
|
error_messages = []
|
|
206
521
|
if len(missing_fields) > 0:
|
|
207
|
-
error_messages.append(
|
|
208
|
-
f"The following fields are declared as observables, but do not exist in the search: "
|
|
209
|
-
f"{missing_fields}"
|
|
210
|
-
)
|
|
522
|
+
error_messages.append(f"The following fields are declared as observables, but do not exist in the search: {missing_fields}")
|
|
211
523
|
|
|
212
|
-
|
|
524
|
+
|
|
525
|
+
missing_fields = set([field for field in message_fields if field not in self.search.lower()])
|
|
213
526
|
if len(missing_fields) > 0:
|
|
214
|
-
error_messages.append(
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
)
|
|
527
|
+
error_messages.append(f"The following fields are used as fields in the message, but do not exist in the search: {missing_fields}")
|
|
528
|
+
|
|
529
|
+
if len(error_messages) > 0 and self.status == DetectionStatus.production.value:
|
|
530
|
+
msg = "Use of fields in observables/messages that do not appear in search:\n\t- "+ "\n\t- ".join(error_messages)
|
|
531
|
+
raise(ValueError(msg))
|
|
532
|
+
|
|
533
|
+
# Found everything
|
|
534
|
+
return self
|
|
535
|
+
|
|
536
|
+
|
|
537
|
+
@model_validator(mode='after')
|
|
538
|
+
def ensurePresenceOfRequiredTests(self):
|
|
539
|
+
# TODO (cmcginley): Fix detection_abstract.tests_validate so that it surfaces validation errors
|
|
540
|
+
# (e.g. a lack of tests) to the final results, instead of just showing a failed detection w/
|
|
541
|
+
# no tests (maybe have a message propagated at the detection level? do a separate coverage
|
|
542
|
+
# check as part of validation?):
|
|
543
|
+
|
|
544
|
+
|
|
545
|
+
#Only production analytics require tests
|
|
546
|
+
if self.status != DetectionStatus.production.value:
|
|
547
|
+
return self
|
|
548
|
+
|
|
549
|
+
# All types EXCEPT Correlation MUST have test(s). Any other type, including newly defined types, requires them.
|
|
550
|
+
# Accordingly, we do not need to do additional checks if the type is Correlation
|
|
551
|
+
if self.type in set([AnalyticsType.Correlation.value]):
|
|
552
|
+
return self
|
|
553
|
+
|
|
554
|
+
if self.tags.manual_test is not None:
|
|
555
|
+
for test in self.tests:
|
|
556
|
+
test.skip(f"TEST SKIPPED: Detection marked as 'manual_test' with explanation: '{self.tags.manual_test}'")
|
|
218
557
|
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
|
|
558
|
+
if len(self.tests) == 0:
|
|
559
|
+
raise ValueError(f"At least one test is REQUIRED for production detection: {self.name}")
|
|
560
|
+
|
|
222
561
|
|
|
223
|
-
|
|
224
|
-
return v
|
|
562
|
+
return self
|
|
225
563
|
|
|
226
|
-
@
|
|
227
|
-
def tests_validate(cls, v,
|
|
564
|
+
@field_validator("tests")
|
|
565
|
+
def tests_validate(cls, v, info:ValidationInfo):
|
|
228
566
|
# TODO (cmcginley): Fix detection_abstract.tests_validate so that it surfaces validation errors
|
|
229
567
|
# (e.g. a lack of tests) to the final results, instead of just showing a failed detection w/
|
|
230
568
|
# no tests (maybe have a message propagated at the detection level? do a separate coverage
|
|
@@ -232,18 +570,18 @@ class Detection_Abstract(SecurityContentObject):
|
|
|
232
570
|
|
|
233
571
|
|
|
234
572
|
#Only production analytics require tests
|
|
235
|
-
if
|
|
573
|
+
if info.data.get("status","") != DetectionStatus.production.value:
|
|
236
574
|
return v
|
|
237
575
|
|
|
238
576
|
# All types EXCEPT Correlation MUST have test(s). Any other type, including newly defined types, requires them.
|
|
239
577
|
# Accordingly, we do not need to do additional checks if the type is Correlation
|
|
240
|
-
if
|
|
578
|
+
if info.data.get("type","") in set([AnalyticsType.Correlation.value]):
|
|
241
579
|
return v
|
|
242
580
|
|
|
243
581
|
|
|
244
582
|
# Ensure that there is at least 1 test
|
|
245
583
|
if len(v) == 0:
|
|
246
|
-
if
|
|
584
|
+
if info.data.get("tags",None) and info.data.get("tags").manual_test is not None:
|
|
247
585
|
# Detections that are manual_test MAY have detections, but it is not required. If they
|
|
248
586
|
# do not have one, then create one which will be a placeholder.
|
|
249
587
|
# Note that this fake UnitTest (and by extension, Integration Test) will NOT be generated
|
|
@@ -252,19 +590,12 @@ class Detection_Abstract(SecurityContentObject):
|
|
|
252
590
|
return [placeholder_test]
|
|
253
591
|
|
|
254
592
|
else:
|
|
255
|
-
raise ValueError("At least one test is REQUIRED for production detection: " +
|
|
593
|
+
raise ValueError("At least one test is REQUIRED for production detection: " + info.data.get("name", "NO NAME FOUND"))
|
|
256
594
|
|
|
257
595
|
|
|
258
596
|
#No issues - at least one test provided for production type requiring testing
|
|
259
597
|
return v
|
|
260
|
-
|
|
261
|
-
@validator("datamodel")
|
|
262
|
-
def datamodel_valid(cls, v, values):
|
|
263
|
-
for datamodel in v:
|
|
264
|
-
if datamodel not in [el.name for el in DataModel]:
|
|
265
|
-
raise ValueError("not valid data model: " + values["name"])
|
|
266
|
-
return v
|
|
267
|
-
|
|
598
|
+
|
|
268
599
|
def all_tests_successful(self) -> bool:
|
|
269
600
|
"""
|
|
270
601
|
Checks that all tests in the detection succeeded. If no tests are defined, consider that a
|
|
@@ -346,6 +677,7 @@ class Detection_Abstract(SecurityContentObject):
|
|
|
346
677
|
summary_dict["tests"].append(result)
|
|
347
678
|
|
|
348
679
|
# Return the summary
|
|
680
|
+
|
|
349
681
|
return summary_dict
|
|
350
682
|
|
|
351
683
|
|
|
@@ -354,3 +686,4 @@ class Detection_Abstract(SecurityContentObject):
|
|
|
354
686
|
'deprecated':'1' if self.status==DetectionStatus.deprecated.value else '0',
|
|
355
687
|
'detection_version':str(self.version)}
|
|
356
688
|
|
|
689
|
+
|