contentctl 4.3.4__py3-none-any.whl → 4.4.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- contentctl/actions/build.py +1 -0
- contentctl/actions/detection_testing/GitService.py +10 -10
- contentctl/actions/detection_testing/infrastructures/DetectionTestingInfrastructure.py +68 -38
- contentctl/actions/detection_testing/infrastructures/DetectionTestingInfrastructureContainer.py +5 -1
- contentctl/actions/detection_testing/views/DetectionTestingViewWeb.py +10 -8
- contentctl/actions/initialize.py +28 -12
- contentctl/actions/inspect.py +191 -91
- contentctl/actions/new_content.py +10 -2
- contentctl/actions/validate.py +3 -6
- contentctl/api.py +1 -1
- contentctl/contentctl.py +3 -0
- contentctl/enrichments/attack_enrichment.py +49 -81
- contentctl/enrichments/cve_enrichment.py +6 -7
- contentctl/helper/splunk_app.py +141 -10
- contentctl/input/director.py +19 -24
- contentctl/input/new_content_questions.py +9 -42
- contentctl/objects/abstract_security_content_objects/detection_abstract.py +155 -13
- contentctl/objects/abstract_security_content_objects/security_content_object_abstract.py +17 -9
- contentctl/objects/atomic.py +51 -77
- contentctl/objects/base_test_result.py +7 -7
- contentctl/objects/baseline.py +12 -18
- contentctl/objects/baseline_tags.py +2 -5
- contentctl/objects/config.py +154 -26
- contentctl/objects/constants.py +34 -1
- contentctl/objects/correlation_search.py +79 -114
- contentctl/objects/dashboard.py +100 -0
- contentctl/objects/deployment.py +20 -5
- contentctl/objects/detection_metadata.py +71 -0
- contentctl/objects/detection_stanza.py +79 -0
- contentctl/objects/detection_tags.py +28 -26
- contentctl/objects/drilldown.py +70 -0
- contentctl/objects/enums.py +26 -24
- contentctl/objects/errors.py +187 -0
- contentctl/objects/investigation.py +23 -15
- contentctl/objects/investigation_tags.py +4 -3
- contentctl/objects/lookup.py +8 -1
- contentctl/objects/macro.py +16 -7
- contentctl/objects/notable_event.py +6 -5
- contentctl/objects/risk_analysis_action.py +4 -4
- contentctl/objects/risk_event.py +8 -7
- contentctl/objects/savedsearches_conf.py +196 -0
- contentctl/objects/story.py +4 -16
- contentctl/objects/throttling.py +46 -0
- contentctl/output/conf_output.py +4 -0
- contentctl/output/conf_writer.py +24 -4
- contentctl/output/new_content_yml_output.py +4 -9
- contentctl/output/templates/analyticstories_detections.j2 +2 -2
- contentctl/output/templates/analyticstories_investigations.j2 +5 -5
- contentctl/output/templates/analyticstories_stories.j2 +1 -1
- contentctl/output/templates/savedsearches_baselines.j2 +2 -3
- contentctl/output/templates/savedsearches_detections.j2 +12 -7
- contentctl/output/templates/savedsearches_investigations.j2 +3 -4
- contentctl/templates/detections/endpoint/anomalous_usage_of_7zip.yml +10 -1
- {contentctl-4.3.4.dist-info → contentctl-4.4.0.dist-info}/METADATA +6 -5
- {contentctl-4.3.4.dist-info → contentctl-4.4.0.dist-info}/RECORD +58 -57
- {contentctl-4.3.4.dist-info → contentctl-4.4.0.dist-info}/WHEEL +1 -1
- contentctl/objects/ssa_detection.py +0 -157
- contentctl/objects/ssa_detection_tags.py +0 -138
- contentctl/objects/unit_test_old.py +0 -10
- contentctl/objects/unit_test_ssa.py +0 -31
- contentctl/output/templates/finding_report.j2 +0 -30
- {contentctl-4.3.4.dist-info → contentctl-4.4.0.dist-info}/LICENSE.md +0 -0
- {contentctl-4.3.4.dist-info → contentctl-4.4.0.dist-info}/entry_points.txt +0 -0
|
@@ -0,0 +1,100 @@
|
|
|
1
|
+
from typing import Any
|
|
2
|
+
from pydantic import Field, Json, model_validator
|
|
3
|
+
|
|
4
|
+
import pathlib
|
|
5
|
+
from jinja2 import Environment
|
|
6
|
+
import json
|
|
7
|
+
from contentctl.objects.security_content_object import SecurityContentObject
|
|
8
|
+
from contentctl.objects.config import build
|
|
9
|
+
from enum import StrEnum
|
|
10
|
+
|
|
11
|
+
DEFAULT_DASHBAORD_JINJA2_TEMPLATE = '''<dashboard version="2" theme="{{ dashboard.theme }}">
|
|
12
|
+
<label>{{ dashboard.label(config) }}</label>
|
|
13
|
+
<description></description>
|
|
14
|
+
<definition><![CDATA[
|
|
15
|
+
{{ dashboard.pretty_print_json_obj() }}
|
|
16
|
+
]]></definition>
|
|
17
|
+
<meta type="hiddenElements"><![CDATA[
|
|
18
|
+
{
|
|
19
|
+
"hideEdit": false,
|
|
20
|
+
"hideOpenInSearch": false,
|
|
21
|
+
"hideExport": false
|
|
22
|
+
}
|
|
23
|
+
]]></meta>
|
|
24
|
+
</dashboard>'''
|
|
25
|
+
|
|
26
|
+
class DashboardTheme(StrEnum):
|
|
27
|
+
light = "light"
|
|
28
|
+
dark = "dark"
|
|
29
|
+
|
|
30
|
+
class Dashboard(SecurityContentObject):
|
|
31
|
+
j2_template: str = Field(default=DEFAULT_DASHBAORD_JINJA2_TEMPLATE, description="Jinja2 Template used to construct the dashboard")
|
|
32
|
+
description: str = Field(...,description="A description of the dashboard. This does not have to match "
|
|
33
|
+
"the description of the dashboard in the JSON file.", max_length=10000)
|
|
34
|
+
theme: DashboardTheme = Field(default=DashboardTheme.light, description="The theme of the dashboard. Choose between 'light' and 'dark'.")
|
|
35
|
+
json_obj: Json[dict[str,Any]] = Field(..., description="Valid JSON object that describes the dashboard")
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
def label(self, config:build)->str:
|
|
40
|
+
return f"{config.app.label} - {self.name}"
|
|
41
|
+
|
|
42
|
+
@model_validator(mode="before")
|
|
43
|
+
@classmethod
|
|
44
|
+
def validate_fields_from_json(cls, data:Any)->Any:
|
|
45
|
+
yml_file_name:str|None = data.get("file_path", None)
|
|
46
|
+
if yml_file_name is None:
|
|
47
|
+
raise ValueError("File name not passed to dashboard constructor")
|
|
48
|
+
yml_file_path = pathlib.Path(yml_file_name)
|
|
49
|
+
json_file_path = yml_file_path.with_suffix(".json")
|
|
50
|
+
|
|
51
|
+
if not json_file_path.is_file():
|
|
52
|
+
raise ValueError(f"Required file {json_file_path} does not exist.")
|
|
53
|
+
|
|
54
|
+
with open(json_file_path,'r') as jsonFilePointer:
|
|
55
|
+
try:
|
|
56
|
+
json_obj:dict[str,Any] = json.load(jsonFilePointer)
|
|
57
|
+
except Exception as e:
|
|
58
|
+
raise ValueError(f"Unable to load data from {json_file_path}: {str(e)}")
|
|
59
|
+
|
|
60
|
+
name_from_file = data.get("name",None)
|
|
61
|
+
name_from_json = json_obj.get("title",None)
|
|
62
|
+
|
|
63
|
+
errors:list[str] = []
|
|
64
|
+
if name_from_json is None:
|
|
65
|
+
errors.append(f"'title' field is missing from {json_file_path}")
|
|
66
|
+
elif name_from_json != name_from_file:
|
|
67
|
+
errors.append(f"The 'title' field in the JSON file [{json_file_path}] does not match the 'name' field in the YML object [{yml_file_path}]. These two MUST match:\n "
|
|
68
|
+
f"title in JSON : {name_from_json}\n "
|
|
69
|
+
f"title in YML : {name_from_file}\n ")
|
|
70
|
+
|
|
71
|
+
description_from_json = json_obj.get("description",None)
|
|
72
|
+
if description_from_json is None:
|
|
73
|
+
errors.append("'description' field is missing from field 'json_object'")
|
|
74
|
+
|
|
75
|
+
if len(errors) > 0 :
|
|
76
|
+
err_string = "\n - ".join(errors)
|
|
77
|
+
raise ValueError(f"Error(s) validating dashboard:\n - {err_string}")
|
|
78
|
+
|
|
79
|
+
data['name'] = name_from_file
|
|
80
|
+
data['json_obj'] = json.dumps(json_obj)
|
|
81
|
+
return data
|
|
82
|
+
|
|
83
|
+
|
|
84
|
+
def pretty_print_json_obj(self):
|
|
85
|
+
return json.dumps(self.json_obj, indent=4)
|
|
86
|
+
|
|
87
|
+
def getOutputFilepathRelativeToAppRoot(self, config:build)->pathlib.Path:
|
|
88
|
+
filename = f"{self.file_path.stem}.xml".lower()
|
|
89
|
+
return pathlib.Path("default/data/ui/views")/filename
|
|
90
|
+
|
|
91
|
+
|
|
92
|
+
def writeDashboardFile(self, j2_env:Environment, config:build):
|
|
93
|
+
template = j2_env.from_string(self.j2_template)
|
|
94
|
+
dashboard_text = template.render(config=config, dashboard=self)
|
|
95
|
+
|
|
96
|
+
with open(config.getPackageDirectoryPath()/self.getOutputFilepathRelativeToAppRoot(config), 'a') as f:
|
|
97
|
+
output_xml = dashboard_text.encode('utf-8', 'ignore').decode('utf-8')
|
|
98
|
+
f.write(output_xml)
|
|
99
|
+
|
|
100
|
+
|
contentctl/objects/deployment.py
CHANGED
|
@@ -1,7 +1,8 @@
|
|
|
1
1
|
from __future__ import annotations
|
|
2
|
-
from pydantic import Field, computed_field,
|
|
3
|
-
from typing import
|
|
4
|
-
|
|
2
|
+
from pydantic import Field, computed_field,ValidationInfo, model_serializer, NonNegativeInt
|
|
3
|
+
from typing import Any
|
|
4
|
+
import uuid
|
|
5
|
+
import datetime
|
|
5
6
|
from contentctl.objects.security_content_object import SecurityContentObject
|
|
6
7
|
from contentctl.objects.deployment_scheduling import DeploymentScheduling
|
|
7
8
|
from contentctl.objects.alert_action import AlertAction
|
|
@@ -15,9 +16,13 @@ class Deployment(SecurityContentObject):
|
|
|
15
16
|
#author: str = None
|
|
16
17
|
#description: str = None
|
|
17
18
|
#contentType: SecurityContentType = SecurityContentType.deployments
|
|
19
|
+
|
|
20
|
+
|
|
18
21
|
scheduling: DeploymentScheduling = Field(...)
|
|
19
22
|
alert_action: AlertAction = AlertAction()
|
|
20
23
|
type: DeploymentType = Field(...)
|
|
24
|
+
author: str = Field(...,max_length=255)
|
|
25
|
+
version: NonNegativeInt = 1
|
|
21
26
|
|
|
22
27
|
#Type was the only tag exposed and should likely be removed/refactored.
|
|
23
28
|
#For transitional reasons, provide this as a computed_field in prep for removal
|
|
@@ -25,7 +30,8 @@ class Deployment(SecurityContentObject):
|
|
|
25
30
|
@property
|
|
26
31
|
def tags(self)->dict[str,DeploymentType]:
|
|
27
32
|
return {"type": self.type}
|
|
28
|
-
|
|
33
|
+
|
|
34
|
+
|
|
29
35
|
@staticmethod
|
|
30
36
|
def getDeployment(v:dict[str,Any], info:ValidationInfo)->Deployment:
|
|
31
37
|
if v != {}:
|
|
@@ -36,8 +42,17 @@ class Deployment(SecurityContentObject):
|
|
|
36
42
|
detection_name = info.data.get("name", None)
|
|
37
43
|
if detection_name is None:
|
|
38
44
|
raise ValueError("Could not create inline deployment - Baseline or Detection lacking 'name' field,")
|
|
45
|
+
|
|
46
|
+
# Add a number of static values
|
|
47
|
+
v.update({
|
|
48
|
+
'name': f"{detection_name} - Inline Deployment",
|
|
49
|
+
'id':uuid.uuid4(),
|
|
50
|
+
'date': datetime.date.today(),
|
|
51
|
+
'description': "Inline deployment created at runtime.",
|
|
52
|
+
'author': "contentctl tool"
|
|
53
|
+
})
|
|
54
|
+
|
|
39
55
|
|
|
40
|
-
v['name'] = f"{detection_name} - Inline Deployment"
|
|
41
56
|
# This constructs a temporary in-memory deployment,
|
|
42
57
|
# allowing the deployment to be easily defined in the
|
|
43
58
|
# detection on a per detection basis.
|
|
@@ -0,0 +1,71 @@
|
|
|
1
|
+
import uuid
|
|
2
|
+
from typing import Any
|
|
3
|
+
|
|
4
|
+
from pydantic import BaseModel, Field, field_validator
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
class DetectionMetadata(BaseModel):
|
|
8
|
+
"""
|
|
9
|
+
A model of the metadata line in a detection stanza in savedsearches.conf
|
|
10
|
+
"""
|
|
11
|
+
# A bool indicating whether the detection is deprecated (serialized as an int, 1 or 0)
|
|
12
|
+
deprecated: bool = Field(...)
|
|
13
|
+
|
|
14
|
+
# A UUID identifying the detection
|
|
15
|
+
detection_id: uuid.UUID = Field(...)
|
|
16
|
+
|
|
17
|
+
# The version of the detection
|
|
18
|
+
detection_version: int = Field(...)
|
|
19
|
+
|
|
20
|
+
# The time the detection was published. **NOTE** This field was added to the metadata in ESCU
|
|
21
|
+
# as of v4.39.0
|
|
22
|
+
publish_time: float = Field(...)
|
|
23
|
+
|
|
24
|
+
class Config:
|
|
25
|
+
# Allowing for future fields that may be added to the metadata JSON
|
|
26
|
+
extra = "allow"
|
|
27
|
+
|
|
28
|
+
@field_validator("deprecated", mode="before")
|
|
29
|
+
@classmethod
|
|
30
|
+
def validate_deprecated(cls, v: Any) -> Any:
|
|
31
|
+
"""
|
|
32
|
+
Convert str to int, and then ints to bools for deprecated; raise if not 0 or 1 in the case
|
|
33
|
+
of an int, or if str cannot be converted to int.
|
|
34
|
+
|
|
35
|
+
:param v: the value passed
|
|
36
|
+
:type v: :class:`typing.Any`
|
|
37
|
+
|
|
38
|
+
:returns: the value
|
|
39
|
+
:rtype: :class:`typing.Any`
|
|
40
|
+
"""
|
|
41
|
+
if isinstance(v, str):
|
|
42
|
+
try:
|
|
43
|
+
v = int(v)
|
|
44
|
+
except ValueError as e:
|
|
45
|
+
raise ValueError(f"Cannot convert str value ({v}) to int: {e}") from e
|
|
46
|
+
if isinstance(v, int):
|
|
47
|
+
if not (0 <= v <= 1):
|
|
48
|
+
raise ValueError(
|
|
49
|
+
f"Value for field 'deprecated' ({v}) must be 0 or 1, if not a bool."
|
|
50
|
+
)
|
|
51
|
+
v = bool(v)
|
|
52
|
+
return v
|
|
53
|
+
|
|
54
|
+
@field_validator("detection_version", mode="before")
|
|
55
|
+
@classmethod
|
|
56
|
+
def validate_detection_version(cls, v: Any) -> Any:
|
|
57
|
+
"""
|
|
58
|
+
Convert str to int; raise if str cannot be converted to int.
|
|
59
|
+
|
|
60
|
+
:param v: the value passed
|
|
61
|
+
:type v: :class:`typing.Any`
|
|
62
|
+
|
|
63
|
+
:returns: the value
|
|
64
|
+
:rtype: :class:`typing.Any`
|
|
65
|
+
"""
|
|
66
|
+
if isinstance(v, str):
|
|
67
|
+
try:
|
|
68
|
+
v = int(v)
|
|
69
|
+
except ValueError as e:
|
|
70
|
+
raise ValueError(f"Cannot convert str value ({v}) to int: {e}") from e
|
|
71
|
+
return v
|
|
@@ -0,0 +1,79 @@
|
|
|
1
|
+
from typing import ClassVar
|
|
2
|
+
import hashlib
|
|
3
|
+
from functools import cached_property
|
|
4
|
+
|
|
5
|
+
from pydantic import BaseModel, Field, computed_field
|
|
6
|
+
|
|
7
|
+
from contentctl.objects.detection_metadata import DetectionMetadata
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class DetectionStanza(BaseModel):
|
|
11
|
+
"""
|
|
12
|
+
A model representing a stanza for a detection in savedsearches.conf
|
|
13
|
+
"""
|
|
14
|
+
# The lines that comprise this stanza, in the order they appear in the conf
|
|
15
|
+
lines: list[str] = Field(...)
|
|
16
|
+
|
|
17
|
+
# The full name of the detection (e.g. "ESCU - My Detection - Rule")
|
|
18
|
+
name: str = Field(...)
|
|
19
|
+
|
|
20
|
+
# The key prefix indicating the metadata attribute
|
|
21
|
+
METADATA_LINE_PREFIX: ClassVar[str] = "action.correlationsearch.metadata = "
|
|
22
|
+
|
|
23
|
+
@computed_field
|
|
24
|
+
@cached_property
|
|
25
|
+
def metadata(self) -> DetectionMetadata:
|
|
26
|
+
"""
|
|
27
|
+
The metadata extracted from the stanza. Using the provided lines, parse out the metadata
|
|
28
|
+
|
|
29
|
+
:returns: the detection stanza's metadata
|
|
30
|
+
:rtype: :class:`contentctl.objects.detection_metadata.DetectionMetadata`
|
|
31
|
+
"""
|
|
32
|
+
# Set a variable to store the metadata line in
|
|
33
|
+
meta_line: str | None = None
|
|
34
|
+
|
|
35
|
+
# Iterate over the lines to look for the metadata line
|
|
36
|
+
for line in self.lines:
|
|
37
|
+
if line.startswith(DetectionStanza.METADATA_LINE_PREFIX):
|
|
38
|
+
# If we find a matching line more than once, we've hit an error
|
|
39
|
+
if meta_line is not None:
|
|
40
|
+
raise Exception(
|
|
41
|
+
f"Metadata for detection '{self.name}' found twice in stanza."
|
|
42
|
+
)
|
|
43
|
+
meta_line = line
|
|
44
|
+
|
|
45
|
+
# Report if we could not find the metadata line
|
|
46
|
+
if meta_line is None:
|
|
47
|
+
raise Exception(f"No metadata for detection '{self.name}' found in stanza.")
|
|
48
|
+
|
|
49
|
+
# Parse the metadata JSON into a model
|
|
50
|
+
return DetectionMetadata.model_validate_json(meta_line[len(DetectionStanza.METADATA_LINE_PREFIX):])
|
|
51
|
+
|
|
52
|
+
@computed_field
|
|
53
|
+
@cached_property
|
|
54
|
+
def hash(self) -> str:
|
|
55
|
+
"""
|
|
56
|
+
The SHA256 hash of the lines of the stanza, excluding the metadata line
|
|
57
|
+
|
|
58
|
+
:returns: hexdigest
|
|
59
|
+
:rtype: str
|
|
60
|
+
"""
|
|
61
|
+
hash = hashlib.sha256()
|
|
62
|
+
for line in self.lines:
|
|
63
|
+
if not line.startswith(DetectionStanza.METADATA_LINE_PREFIX):
|
|
64
|
+
hash.update(line.encode("utf-8"))
|
|
65
|
+
return hash.hexdigest()
|
|
66
|
+
|
|
67
|
+
def version_should_be_bumped(self, previous: "DetectionStanza") -> bool:
|
|
68
|
+
"""
|
|
69
|
+
A helper method that compares this stanza against the same stanza from a previous build;
|
|
70
|
+
returns True if the version still needs to be bumped (e.g. the detection was changed but
|
|
71
|
+
the version was not), False otherwise.
|
|
72
|
+
|
|
73
|
+
:param previous: the previous build's DetectionStanza for comparison
|
|
74
|
+
:type previous: :class:`contentctl.objects.detection_stanza.DetectionStanza`
|
|
75
|
+
|
|
76
|
+
:returns: True if the version still needs to be bumped
|
|
77
|
+
:rtype: bool
|
|
78
|
+
"""
|
|
79
|
+
return (self.hash != previous.hash) and (self.metadata.detection_version <= previous.metadata.detection_version)
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
from __future__ import annotations
|
|
2
2
|
import uuid
|
|
3
|
-
from typing import TYPE_CHECKING, List, Optional,
|
|
3
|
+
from typing import TYPE_CHECKING, List, Optional, Union
|
|
4
4
|
from pydantic import (
|
|
5
5
|
BaseModel,
|
|
6
6
|
Field,
|
|
@@ -16,6 +16,7 @@ from pydantic import (
|
|
|
16
16
|
model_validator
|
|
17
17
|
)
|
|
18
18
|
from contentctl.objects.story import Story
|
|
19
|
+
from contentctl.objects.throttling import Throttling
|
|
19
20
|
if TYPE_CHECKING:
|
|
20
21
|
from contentctl.input.director import DirectorOutputDto
|
|
21
22
|
|
|
@@ -29,10 +30,9 @@ from contentctl.objects.enums import (
|
|
|
29
30
|
RiskSeverity,
|
|
30
31
|
KillChainPhase,
|
|
31
32
|
NistCategory,
|
|
32
|
-
RiskLevel,
|
|
33
33
|
SecurityContentProductName
|
|
34
34
|
)
|
|
35
|
-
from contentctl.objects.atomic import AtomicTest
|
|
35
|
+
from contentctl.objects.atomic import AtomicEnrichment, AtomicTest
|
|
36
36
|
from contentctl.objects.annotated_types import MITRE_ATTACK_ID_TYPE, CVE_TYPE
|
|
37
37
|
|
|
38
38
|
# TODO (#266): disable the use_enum_values configuration
|
|
@@ -49,6 +49,23 @@ class DetectionTags(BaseModel):
|
|
|
49
49
|
@property
|
|
50
50
|
def risk_score(self) -> int:
|
|
51
51
|
return round((self.confidence * self.impact)/100)
|
|
52
|
+
|
|
53
|
+
@computed_field
|
|
54
|
+
@property
|
|
55
|
+
def severity(self)->RiskSeverity:
|
|
56
|
+
if 0 <= self.risk_score <= 20:
|
|
57
|
+
return RiskSeverity.INFORMATIONAL
|
|
58
|
+
elif 20 < self.risk_score <= 40:
|
|
59
|
+
return RiskSeverity.LOW
|
|
60
|
+
elif 40 < self.risk_score <= 60:
|
|
61
|
+
return RiskSeverity.MEDIUM
|
|
62
|
+
elif 60 < self.risk_score <= 80:
|
|
63
|
+
return RiskSeverity.HIGH
|
|
64
|
+
elif 80 < self.risk_score <= 100:
|
|
65
|
+
return RiskSeverity.CRITICAL
|
|
66
|
+
else:
|
|
67
|
+
raise Exception(f"Error getting severity - risk_score must be between 0-100, but was actually {self.risk_score}")
|
|
68
|
+
|
|
52
69
|
|
|
53
70
|
mitre_attack_id: List[MITRE_ATTACK_ID_TYPE] = []
|
|
54
71
|
nist: list[NistCategory] = []
|
|
@@ -58,31 +75,16 @@ class DetectionTags(BaseModel):
|
|
|
58
75
|
message: str = Field(...)
|
|
59
76
|
product: list[SecurityContentProductName] = Field(..., min_length=1)
|
|
60
77
|
required_fields: list[str] = Field(min_length=1)
|
|
61
|
-
|
|
78
|
+
throttling: Optional[Throttling] = None
|
|
62
79
|
security_domain: SecurityDomain = Field(...)
|
|
63
|
-
|
|
64
|
-
@computed_field
|
|
65
|
-
@property
|
|
66
|
-
def risk_severity(self) -> RiskSeverity:
|
|
67
|
-
if self.risk_score >= 80:
|
|
68
|
-
return RiskSeverity('high')
|
|
69
|
-
elif (self.risk_score >= 50 and self.risk_score <= 79):
|
|
70
|
-
return RiskSeverity('medium')
|
|
71
|
-
else:
|
|
72
|
-
return RiskSeverity('low')
|
|
73
|
-
|
|
74
80
|
cve: List[CVE_TYPE] = []
|
|
75
81
|
atomic_guid: List[AtomicTest] = []
|
|
76
|
-
|
|
82
|
+
|
|
77
83
|
|
|
78
84
|
# enrichment
|
|
79
85
|
mitre_attack_enrichments: List[MitreAttackEnrichment] = Field([], validate_default=True)
|
|
80
86
|
confidence_id: Optional[PositiveInt] = Field(None, ge=1, le=3)
|
|
81
87
|
impact_id: Optional[PositiveInt] = Field(None, ge=1, le=5)
|
|
82
|
-
# context_ids: list = None
|
|
83
|
-
risk_level_id: Optional[NonNegativeInt] = Field(None, le=4)
|
|
84
|
-
risk_level: Optional[RiskLevel] = None
|
|
85
|
-
# observable_str: str = None
|
|
86
88
|
evidence_str: Optional[str] = None
|
|
87
89
|
|
|
88
90
|
@computed_field
|
|
@@ -112,7 +114,7 @@ class DetectionTags(BaseModel):
|
|
|
112
114
|
|
|
113
115
|
# TODO (#268): Validate manual_test has length > 0 if not None
|
|
114
116
|
manual_test: Optional[str] = None
|
|
115
|
-
|
|
117
|
+
|
|
116
118
|
# The following validator is temporarily disabled pending further discussions
|
|
117
119
|
# @validator('message')
|
|
118
120
|
# def validate_message(cls,v,values):
|
|
@@ -158,7 +160,7 @@ class DetectionTags(BaseModel):
|
|
|
158
160
|
"message": self.message,
|
|
159
161
|
"risk_score": self.risk_score,
|
|
160
162
|
"security_domain": self.security_domain,
|
|
161
|
-
"risk_severity": self.
|
|
163
|
+
"risk_severity": self.severity,
|
|
162
164
|
"mitre_attack_id": self.mitre_attack_id,
|
|
163
165
|
"mitre_attack_enrichments": self.mitre_attack_enrichments
|
|
164
166
|
}
|
|
@@ -240,7 +242,7 @@ class DetectionTags(BaseModel):
|
|
|
240
242
|
if output_dto is None:
|
|
241
243
|
raise ValueError("Context not provided to detection.detection_tags.atomic_guid validator")
|
|
242
244
|
|
|
243
|
-
|
|
245
|
+
atomic_enrichment: AtomicEnrichment = output_dto.atomic_enrichment
|
|
244
246
|
|
|
245
247
|
matched_tests: List[AtomicTest] = []
|
|
246
248
|
missing_tests: List[UUID4] = []
|
|
@@ -254,7 +256,7 @@ class DetectionTags(BaseModel):
|
|
|
254
256
|
badly_formatted_guids.append(str(atomic_guid_str))
|
|
255
257
|
continue
|
|
256
258
|
try:
|
|
257
|
-
matched_tests.append(
|
|
259
|
+
matched_tests.append(atomic_enrichment.getAtomic(atomic_guid))
|
|
258
260
|
except Exception:
|
|
259
261
|
missing_tests.append(atomic_guid)
|
|
260
262
|
|
|
@@ -265,7 +267,7 @@ class DetectionTags(BaseModel):
|
|
|
265
267
|
f"\n\tPlease review the output above for potential exception(s) when parsing the "
|
|
266
268
|
"Atomic Red Team Repo."
|
|
267
269
|
"\n\tVerify that these auto_generated_guid exist and try updating/pulling the "
|
|
268
|
-
f"repo again
|
|
270
|
+
f"repo again: {[str(guid) for guid in missing_tests]}"
|
|
269
271
|
)
|
|
270
272
|
else:
|
|
271
273
|
missing_tests_string = ""
|
|
@@ -278,6 +280,6 @@ class DetectionTags(BaseModel):
|
|
|
278
280
|
raise ValueError(f"{bad_guids_string}{missing_tests_string}")
|
|
279
281
|
|
|
280
282
|
elif len(missing_tests) > 0:
|
|
281
|
-
|
|
283
|
+
raise ValueError(missing_tests_string)
|
|
282
284
|
|
|
283
285
|
return matched_tests + [AtomicTest.AtomicTestWhenTestIsMissing(test) for test in missing_tests]
|
|
@@ -0,0 +1,70 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
from pydantic import BaseModel, Field, model_serializer
|
|
3
|
+
from typing import TYPE_CHECKING
|
|
4
|
+
if TYPE_CHECKING:
|
|
5
|
+
from contentctl.objects.detection import Detection
|
|
6
|
+
from contentctl.objects.enums import AnalyticsType
|
|
7
|
+
DRILLDOWN_SEARCH_PLACEHOLDER = "%original_detection_search%"
|
|
8
|
+
EARLIEST_OFFSET = "$info_min_time$"
|
|
9
|
+
LATEST_OFFSET = "$info_max_time$"
|
|
10
|
+
RISK_SEARCH = "index = risk starthoursago = 168 endhoursago = 0 | stats count values(search_name) values(risk_message) values(analyticstories) values(annotations._all) values(annotations.mitre_attack.mitre_tactic) "
|
|
11
|
+
|
|
12
|
+
class Drilldown(BaseModel):
|
|
13
|
+
name: str = Field(..., description="The name of the drilldown search", min_length=5)
|
|
14
|
+
search: str = Field(..., description="The text of a drilldown search. This must be valid SPL.", min_length=1)
|
|
15
|
+
earliest_offset:None | str = Field(...,
|
|
16
|
+
description="Earliest offset time for the drilldown search. "
|
|
17
|
+
f"The most common value for this field is '{EARLIEST_OFFSET}', "
|
|
18
|
+
"but it is NOT the default value and must be supplied explicitly.",
|
|
19
|
+
min_length= 1)
|
|
20
|
+
latest_offset:None | str = Field(...,
|
|
21
|
+
description="Latest offset time for the driolldown search. "
|
|
22
|
+
f"The most common value for this field is '{LATEST_OFFSET}', "
|
|
23
|
+
"but it is NOT the default value and must be supplied explicitly.",
|
|
24
|
+
min_length= 1)
|
|
25
|
+
|
|
26
|
+
@classmethod
|
|
27
|
+
def constructDrilldownsFromDetection(cls, detection: Detection) -> list[Drilldown]:
|
|
28
|
+
victim_observables = [o for o in detection.tags.observable if o.role[0] == "Victim"]
|
|
29
|
+
if len(victim_observables) == 0 or detection.type == AnalyticsType.Hunting:
|
|
30
|
+
# No victims, so no drilldowns
|
|
31
|
+
return []
|
|
32
|
+
print(f"Adding default drilldowns for [{detection.name}]")
|
|
33
|
+
variableNamesString = ' and '.join([f"${o.name}$" for o in victim_observables])
|
|
34
|
+
nameField = f"View the detection results for {variableNamesString}"
|
|
35
|
+
appendedSearch = " | search " + ' '.join([f"{o.name} = ${o.name}$" for o in victim_observables])
|
|
36
|
+
search_field = f"{detection.search}{appendedSearch}"
|
|
37
|
+
detection_results = cls(name=nameField, earliest_offset=EARLIEST_OFFSET, latest_offset=LATEST_OFFSET, search=search_field)
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
nameField = f"View risk events for the last 7 days for {variableNamesString}"
|
|
41
|
+
fieldNamesListString = ', '.join([o.name for o in victim_observables])
|
|
42
|
+
search_field = f"{RISK_SEARCH}by {fieldNamesListString} {appendedSearch}"
|
|
43
|
+
risk_events_last_7_days = cls(name=nameField, earliest_offset=None, latest_offset=None, search=search_field)
|
|
44
|
+
|
|
45
|
+
return [detection_results,risk_events_last_7_days]
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
def perform_search_substitutions(self, detection:Detection)->None:
|
|
49
|
+
"""Replaces the field DRILLDOWN_SEARCH_PLACEHOLDER (%original_detection_search%)
|
|
50
|
+
with the search contained in the detection. We do this so that the YML does not
|
|
51
|
+
need the search copy/pasted from the search field into the drilldown object.
|
|
52
|
+
|
|
53
|
+
Args:
|
|
54
|
+
detection (Detection): Detection to be used to update the search field of the drilldown
|
|
55
|
+
"""
|
|
56
|
+
self.search = self.search.replace(DRILLDOWN_SEARCH_PLACEHOLDER, detection.search)
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
@model_serializer
|
|
60
|
+
def serialize_model(self) -> dict[str,str]:
|
|
61
|
+
#Call serializer for parent
|
|
62
|
+
model:dict[str,str] = {}
|
|
63
|
+
|
|
64
|
+
model['name'] = self.name
|
|
65
|
+
model['search'] = self.search
|
|
66
|
+
if self.earliest_offset is not None:
|
|
67
|
+
model['earliest_offset'] = self.earliest_offset
|
|
68
|
+
if self.latest_offset is not None:
|
|
69
|
+
model['latest_offset'] = self.latest_offset
|
|
70
|
+
return model
|
contentctl/objects/enums.py
CHANGED
|
@@ -54,8 +54,9 @@ class SecurityContentType(enum.Enum):
|
|
|
54
54
|
deployments = 7
|
|
55
55
|
investigations = 8
|
|
56
56
|
unit_tests = 9
|
|
57
|
-
ssa_detections = 10
|
|
58
57
|
data_sources = 11
|
|
58
|
+
dashboards = 12
|
|
59
|
+
|
|
59
60
|
|
|
60
61
|
# Bringing these changes back in line will take some time after
|
|
61
62
|
# the initial merge is complete
|
|
@@ -69,7 +70,6 @@ class SecurityContentType(enum.Enum):
|
|
|
69
70
|
|
|
70
71
|
class SecurityContentProduct(enum.Enum):
|
|
71
72
|
SPLUNK_APP = 1
|
|
72
|
-
SSA = 2
|
|
73
73
|
API = 3
|
|
74
74
|
CUSTOM = 4
|
|
75
75
|
|
|
@@ -197,21 +197,21 @@ class KillChainPhase(str, enum.Enum):
|
|
|
197
197
|
class DataSource(str,enum.Enum):
|
|
198
198
|
OSQUERY_ES_PROCESS_EVENTS = "OSQuery ES Process Events"
|
|
199
199
|
POWERSHELL_4104 = "Powershell 4104"
|
|
200
|
-
SYSMON_EVENT_ID_1 = "Sysmon
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
200
|
+
SYSMON_EVENT_ID_1 = "Sysmon EventID 1"
|
|
201
|
+
SYSMON_EVENT_ID_3 = "Sysmon EventID 3"
|
|
202
|
+
SYSMON_EVENT_ID_5 = "Sysmon EventID 5"
|
|
203
|
+
SYSMON_EVENT_ID_6 = "Sysmon EventID 6"
|
|
204
|
+
SYSMON_EVENT_ID_7 = "Sysmon EventID 7"
|
|
205
|
+
SYSMON_EVENT_ID_8 = "Sysmon EventID 8"
|
|
206
|
+
SYSMON_EVENT_ID_9 = "Sysmon EventID 9"
|
|
207
|
+
SYSMON_EVENT_ID_10 = "Sysmon EventID 10"
|
|
208
|
+
SYSMON_EVENT_ID_11 = "Sysmon EventID 11"
|
|
209
|
+
SYSMON_EVENT_ID_13 = "Sysmon EventID 13"
|
|
210
|
+
SYSMON_EVENT_ID_15 = "Sysmon EventID 15"
|
|
211
|
+
SYSMON_EVENT_ID_20 = "Sysmon EventID 20"
|
|
212
|
+
SYSMON_EVENT_ID_21 = "Sysmon EventID 21"
|
|
213
|
+
SYSMON_EVENT_ID_22 = "Sysmon EventID 22"
|
|
214
|
+
SYSMON_EVENT_ID_23 = "Sysmon EventID 23"
|
|
215
215
|
WINDOWS_SECURITY_4624 = "Windows Security 4624"
|
|
216
216
|
WINDOWS_SECURITY_4625 = "Windows Security 4625"
|
|
217
217
|
WINDOWS_SECURITY_4648 = "Windows Security 4648"
|
|
@@ -407,14 +407,16 @@ class NistCategory(str, enum.Enum):
|
|
|
407
407
|
RC_IM = "RC.IM"
|
|
408
408
|
RC_CO = "RC.CO"
|
|
409
409
|
|
|
410
|
-
class RiskLevel(str,enum.Enum):
|
|
411
|
-
INFO = "Info"
|
|
412
|
-
LOW = "Low"
|
|
413
|
-
MEDIUM = "Medium"
|
|
414
|
-
HIGH = "High"
|
|
415
|
-
CRITICAL = "Critical"
|
|
416
|
-
|
|
417
410
|
class RiskSeverity(str,enum.Enum):
|
|
411
|
+
# Levels taken from the following documentation link
|
|
412
|
+
# https://docs.splunk.com/Documentation/ES/7.3.2/User/RiskScoring
|
|
413
|
+
# 20 - info (0-20 for us)
|
|
414
|
+
# 40 - low (21-40 for us)
|
|
415
|
+
# 60 - medium (41-60 for us)
|
|
416
|
+
# 80 - high (61-80 for us)
|
|
417
|
+
# 100 - critical (81 - 100 for us)
|
|
418
|
+
INFORMATIONAL = "informational"
|
|
418
419
|
LOW = "low"
|
|
419
420
|
MEDIUM = "medium"
|
|
420
421
|
HIGH = "high"
|
|
422
|
+
CRITICAL = "critical"
|