contentctl 5.0.0a2__py3-none-any.whl → 5.0.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- contentctl/__init__.py +1 -1
- contentctl/actions/build.py +88 -55
- contentctl/actions/deploy_acs.py +29 -24
- contentctl/actions/detection_testing/DetectionTestingManager.py +66 -41
- contentctl/actions/detection_testing/GitService.py +2 -4
- contentctl/actions/detection_testing/generate_detection_coverage_badge.py +48 -30
- contentctl/actions/detection_testing/infrastructures/DetectionTestingInfrastructure.py +163 -124
- contentctl/actions/detection_testing/infrastructures/DetectionTestingInfrastructureContainer.py +45 -32
- contentctl/actions/detection_testing/progress_bar.py +3 -0
- contentctl/actions/detection_testing/views/DetectionTestingView.py +15 -18
- contentctl/actions/detection_testing/views/DetectionTestingViewCLI.py +1 -5
- contentctl/actions/detection_testing/views/DetectionTestingViewFile.py +2 -2
- contentctl/actions/detection_testing/views/DetectionTestingViewWeb.py +1 -4
- contentctl/actions/doc_gen.py +9 -5
- contentctl/actions/initialize.py +45 -33
- contentctl/actions/inspect.py +118 -61
- contentctl/actions/new_content.py +83 -53
- contentctl/actions/release_notes.py +276 -146
- contentctl/actions/reporting.py +23 -19
- contentctl/actions/test.py +31 -25
- contentctl/actions/validate.py +54 -34
- contentctl/api.py +54 -45
- contentctl/contentctl.py +10 -10
- contentctl/enrichments/attack_enrichment.py +112 -72
- contentctl/enrichments/cve_enrichment.py +34 -28
- contentctl/enrichments/splunk_app_enrichment.py +38 -36
- contentctl/helper/link_validator.py +101 -78
- contentctl/helper/splunk_app.py +69 -41
- contentctl/helper/utils.py +58 -39
- contentctl/input/director.py +69 -37
- contentctl/input/new_content_questions.py +26 -34
- contentctl/input/yml_reader.py +22 -17
- contentctl/objects/abstract_security_content_objects/detection_abstract.py +255 -323
- contentctl/objects/abstract_security_content_objects/security_content_object_abstract.py +111 -46
- contentctl/objects/alert_action.py +8 -8
- contentctl/objects/annotated_types.py +1 -1
- contentctl/objects/atomic.py +64 -54
- contentctl/objects/base_test.py +2 -1
- contentctl/objects/base_test_result.py +16 -8
- contentctl/objects/baseline.py +47 -35
- contentctl/objects/baseline_tags.py +29 -22
- contentctl/objects/config.py +1 -1
- contentctl/objects/constants.py +32 -58
- contentctl/objects/correlation_search.py +75 -55
- contentctl/objects/dashboard.py +55 -41
- contentctl/objects/data_source.py +13 -13
- contentctl/objects/deployment.py +44 -37
- contentctl/objects/deployment_email.py +1 -1
- contentctl/objects/deployment_notable.py +2 -1
- contentctl/objects/deployment_phantom.py +5 -5
- contentctl/objects/deployment_rba.py +1 -1
- contentctl/objects/deployment_scheduling.py +1 -1
- contentctl/objects/deployment_slack.py +1 -1
- contentctl/objects/detection.py +5 -2
- contentctl/objects/detection_metadata.py +1 -0
- contentctl/objects/detection_stanza.py +7 -2
- contentctl/objects/detection_tags.py +54 -64
- contentctl/objects/drilldown.py +66 -35
- contentctl/objects/enums.py +61 -43
- contentctl/objects/errors.py +16 -24
- contentctl/objects/integration_test.py +3 -3
- contentctl/objects/integration_test_result.py +1 -0
- contentctl/objects/investigation.py +53 -31
- contentctl/objects/investigation_tags.py +29 -17
- contentctl/objects/lookup.py +234 -113
- contentctl/objects/macro.py +55 -38
- contentctl/objects/manual_test.py +3 -3
- contentctl/objects/manual_test_result.py +1 -0
- contentctl/objects/mitre_attack_enrichment.py +17 -16
- contentctl/objects/notable_action.py +2 -1
- contentctl/objects/notable_event.py +1 -3
- contentctl/objects/playbook.py +37 -35
- contentctl/objects/playbook_tags.py +22 -16
- contentctl/objects/rba.py +68 -11
- contentctl/objects/risk_analysis_action.py +15 -11
- contentctl/objects/risk_event.py +27 -20
- contentctl/objects/risk_object.py +1 -0
- contentctl/objects/savedsearches_conf.py +9 -7
- contentctl/objects/security_content_object.py +5 -2
- contentctl/objects/story.py +54 -49
- contentctl/objects/story_tags.py +56 -44
- contentctl/objects/test_group.py +5 -2
- contentctl/objects/threat_object.py +1 -0
- contentctl/objects/throttling.py +27 -18
- contentctl/objects/unit_test.py +3 -4
- contentctl/objects/unit_test_baseline.py +4 -5
- contentctl/objects/unit_test_result.py +6 -6
- contentctl/output/api_json_output.py +22 -22
- contentctl/output/attack_nav_output.py +21 -21
- contentctl/output/attack_nav_writer.py +29 -37
- contentctl/output/conf_output.py +230 -174
- contentctl/output/data_source_writer.py +38 -25
- contentctl/output/doc_md_output.py +53 -27
- contentctl/output/jinja_writer.py +19 -15
- contentctl/output/json_writer.py +20 -8
- contentctl/output/svg_output.py +56 -38
- contentctl/output/templates/analyticstories_detections.j2 +1 -1
- contentctl/output/templates/analyticstories_stories.j2 +1 -1
- contentctl/output/templates/es_investigations_investigations.j2 +1 -1
- contentctl/output/templates/es_investigations_stories.j2 +1 -1
- contentctl/output/templates/savedsearches_baselines.j2 +2 -2
- contentctl/output/templates/savedsearches_detections.j2 +2 -8
- contentctl/output/templates/savedsearches_investigations.j2 +2 -2
- contentctl/output/templates/transforms.j2 +2 -4
- contentctl/output/yml_writer.py +18 -24
- contentctl/templates/stories/cobalt_strike.yml +1 -0
- {contentctl-5.0.0a2.dist-info → contentctl-5.0.1.dist-info}/METADATA +1 -1
- contentctl-5.0.1.dist-info/RECORD +168 -0
- contentctl/actions/initialize_old.py +0 -245
- contentctl/objects/observable.py +0 -39
- contentctl-5.0.0a2.dist-info/RECORD +0 -170
- {contentctl-5.0.0a2.dist-info → contentctl-5.0.1.dist-info}/LICENSE.md +0 -0
- {contentctl-5.0.0a2.dist-info → contentctl-5.0.1.dist-info}/WHEEL +0 -0
- {contentctl-5.0.0a2.dist-info → contentctl-5.0.1.dist-info}/entry_points.txt +0 -0
|
@@ -3,37 +3,50 @@ from contentctl.objects.data_source import DataSource
|
|
|
3
3
|
from typing import List
|
|
4
4
|
import pathlib
|
|
5
5
|
|
|
6
|
-
class DataSourceWriter:
|
|
7
6
|
|
|
7
|
+
class DataSourceWriter:
|
|
8
8
|
@staticmethod
|
|
9
|
-
def writeDataSourceCsv(
|
|
10
|
-
|
|
9
|
+
def writeDataSourceCsv(
|
|
10
|
+
data_source_objects: List[DataSource], file_path: pathlib.Path
|
|
11
|
+
):
|
|
12
|
+
with open(file_path, mode="w", newline="") as file:
|
|
11
13
|
writer = csv.writer(file)
|
|
12
14
|
# Write the header
|
|
13
|
-
writer.writerow(
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
15
|
+
writer.writerow(
|
|
16
|
+
[
|
|
17
|
+
"name",
|
|
18
|
+
"id",
|
|
19
|
+
"author",
|
|
20
|
+
"source",
|
|
21
|
+
"sourcetype",
|
|
22
|
+
"separator",
|
|
23
|
+
"supported_TA_name",
|
|
24
|
+
"supported_TA_version",
|
|
25
|
+
"supported_TA_url",
|
|
26
|
+
"description",
|
|
27
|
+
]
|
|
28
|
+
)
|
|
18
29
|
# Write the data
|
|
19
30
|
for data_source in data_source_objects:
|
|
20
|
-
if
|
|
31
|
+
if len(data_source.supported_TA) > 0:
|
|
21
32
|
supported_TA_name = data_source.supported_TA[0].name
|
|
22
33
|
supported_TA_version = data_source.supported_TA[0].version
|
|
23
|
-
supported_TA_url = data_source.supported_TA[0].url or
|
|
34
|
+
supported_TA_url = data_source.supported_TA[0].url or ""
|
|
24
35
|
else:
|
|
25
|
-
supported_TA_name =
|
|
26
|
-
supported_TA_version =
|
|
27
|
-
supported_TA_url =
|
|
28
|
-
writer.writerow(
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
36
|
+
supported_TA_name = ""
|
|
37
|
+
supported_TA_version = ""
|
|
38
|
+
supported_TA_url = ""
|
|
39
|
+
writer.writerow(
|
|
40
|
+
[
|
|
41
|
+
data_source.name,
|
|
42
|
+
data_source.id,
|
|
43
|
+
data_source.author,
|
|
44
|
+
data_source.source,
|
|
45
|
+
data_source.sourcetype,
|
|
46
|
+
data_source.separator,
|
|
47
|
+
supported_TA_name,
|
|
48
|
+
supported_TA_version,
|
|
49
|
+
supported_TA_url,
|
|
50
|
+
data_source.description,
|
|
51
|
+
]
|
|
52
|
+
)
|
|
@@ -1,23 +1,25 @@
|
|
|
1
1
|
import os
|
|
2
|
-
import asyncio
|
|
3
2
|
import sys
|
|
4
3
|
|
|
5
4
|
from pathlib import Path
|
|
6
5
|
|
|
7
|
-
from contentctl.objects.enums import SecurityContentType
|
|
8
6
|
from contentctl.output.jinja_writer import JinjaWriter
|
|
9
7
|
|
|
10
8
|
|
|
11
|
-
class DocMdOutput
|
|
9
|
+
class DocMdOutput:
|
|
12
10
|
index = 0
|
|
13
11
|
files_to_write = 0
|
|
14
|
-
|
|
12
|
+
|
|
15
13
|
def writeObjects(self, objects: list, output_path: str) -> None:
|
|
16
14
|
self.files_to_write = sum([len(obj) for obj in objects])
|
|
17
15
|
self.index = 0
|
|
18
|
-
progress_percent = ((self.index+1)/self.files_to_write) * 100
|
|
19
|
-
if
|
|
20
|
-
print(
|
|
16
|
+
progress_percent = ((self.index + 1) / self.files_to_write) * 100
|
|
17
|
+
if sys.stdout.isatty() and sys.stdin.isatty() and sys.stderr.isatty():
|
|
18
|
+
print(
|
|
19
|
+
f"\r{'Docgen Progress'.rjust(23)}: [{progress_percent:3.0f}%]...",
|
|
20
|
+
end="",
|
|
21
|
+
flush=True,
|
|
22
|
+
)
|
|
21
23
|
|
|
22
24
|
attack_tactics = set()
|
|
23
25
|
datamodels = set()
|
|
@@ -33,24 +35,41 @@ class DocMdOutput():
|
|
|
33
35
|
|
|
34
36
|
if detection.datamodel:
|
|
35
37
|
datamodels.update(detection.datamodel)
|
|
36
|
-
|
|
37
|
-
Path(os.path.join(output_path, 'overview')).mkdir(parents=True, exist_ok=True)
|
|
38
|
-
Path(os.path.join(output_path, 'detections')).mkdir(parents=True, exist_ok=True)
|
|
39
|
-
Path(os.path.join(output_path, 'stories')).mkdir(parents=True, exist_ok=True)
|
|
40
|
-
Path(os.path.join(output_path, 'playbooks')).mkdir(parents=True, exist_ok=True)
|
|
41
38
|
|
|
42
|
-
|
|
43
|
-
|
|
39
|
+
Path(os.path.join(output_path, "overview")).mkdir(parents=True, exist_ok=True)
|
|
40
|
+
Path(os.path.join(output_path, "detections")).mkdir(parents=True, exist_ok=True)
|
|
41
|
+
Path(os.path.join(output_path, "stories")).mkdir(parents=True, exist_ok=True)
|
|
42
|
+
Path(os.path.join(output_path, "playbooks")).mkdir(parents=True, exist_ok=True)
|
|
43
|
+
|
|
44
|
+
JinjaWriter.writeObjectsList(
|
|
45
|
+
"doc_story_page.j2",
|
|
46
|
+
os.path.join(output_path, "overview/stories.md"),
|
|
47
|
+
sorted(objects[0], key=lambda x: x.name),
|
|
48
|
+
)
|
|
49
|
+
self.writeObjectsMd(
|
|
50
|
+
objects[0], os.path.join(output_path, "stories"), "doc_stories.j2"
|
|
51
|
+
)
|
|
52
|
+
|
|
53
|
+
JinjaWriter.writeObjectsList(
|
|
54
|
+
"doc_detection_page.j2",
|
|
55
|
+
os.path.join(output_path, "overview/detections.md"),
|
|
56
|
+
sorted(objects[1], key=lambda x: x.name),
|
|
57
|
+
)
|
|
58
|
+
self.writeObjectsMd(
|
|
59
|
+
objects[1], os.path.join(output_path, "detections"), "doc_detections.j2"
|
|
60
|
+
)
|
|
44
61
|
|
|
45
|
-
JinjaWriter.writeObjectsList(
|
|
46
|
-
|
|
62
|
+
JinjaWriter.writeObjectsList(
|
|
63
|
+
"doc_playbooks_page.j2",
|
|
64
|
+
os.path.join(output_path, "overview/paybooks.md"),
|
|
65
|
+
sorted(objects[2], key=lambda x: x.name),
|
|
66
|
+
)
|
|
67
|
+
self.writeObjectsMd(
|
|
68
|
+
objects[2], os.path.join(output_path, "playbooks"), "doc_playbooks.j2"
|
|
69
|
+
)
|
|
47
70
|
|
|
48
|
-
JinjaWriter.writeObjectsList('doc_playbooks_page.j2', os.path.join(output_path, 'overview/paybooks.md'), sorted(objects[2], key=lambda x: x.name))
|
|
49
|
-
self.writeObjectsMd(objects[2], os.path.join(output_path, 'playbooks'), 'doc_playbooks.j2')
|
|
50
|
-
|
|
51
71
|
print("Done!")
|
|
52
|
-
|
|
53
|
-
|
|
72
|
+
|
|
54
73
|
# def writeNavigationPageObjects(self, objects: list, output_path: str) -> None:
|
|
55
74
|
# for obj in objects:
|
|
56
75
|
# JinjaWriter.writeObject('doc_navigation_pages.j2', os.path.join(output_path, '_pages', obj.lower().replace(' ', '_') + '.md'),
|
|
@@ -61,10 +80,17 @@ class DocMdOutput():
|
|
|
61
80
|
|
|
62
81
|
def writeObjectsMd(self, objects, output_path: str, template_name: str) -> None:
|
|
63
82
|
for obj in objects:
|
|
64
|
-
progress_percent = ((self.index+1)/self.files_to_write) * 100
|
|
65
|
-
self.index+=1
|
|
66
|
-
if
|
|
67
|
-
print(
|
|
68
|
-
|
|
69
|
-
|
|
83
|
+
progress_percent = ((self.index + 1) / self.files_to_write) * 100
|
|
84
|
+
self.index += 1
|
|
85
|
+
if sys.stdout.isatty() and sys.stdin.isatty() and sys.stderr.isatty():
|
|
86
|
+
print(
|
|
87
|
+
f"\r{'Docgen Progress'.rjust(23)}: [{progress_percent:3.0f}%]...",
|
|
88
|
+
end="",
|
|
89
|
+
flush=True,
|
|
90
|
+
)
|
|
70
91
|
|
|
92
|
+
JinjaWriter.writeObject(
|
|
93
|
+
template_name,
|
|
94
|
+
os.path.join(output_path, obj.name.lower().replace(" ", "_") + ".md"),
|
|
95
|
+
obj,
|
|
96
|
+
)
|
|
@@ -4,30 +4,34 @@ from jinja2 import Environment, FileSystemLoader
|
|
|
4
4
|
|
|
5
5
|
|
|
6
6
|
class JinjaWriter:
|
|
7
|
-
|
|
8
7
|
@staticmethod
|
|
9
|
-
def writeObjectsList(template_name
|
|
10
|
-
|
|
8
|
+
def writeObjectsList(template_name: str, output_path: str, objects: list) -> None:
|
|
11
9
|
j2_env = Environment(
|
|
12
|
-
loader=FileSystemLoader(
|
|
13
|
-
|
|
10
|
+
loader=FileSystemLoader(
|
|
11
|
+
os.path.join(os.path.dirname(__file__), "templates")
|
|
12
|
+
),
|
|
13
|
+
trim_blocks=False,
|
|
14
|
+
)
|
|
14
15
|
|
|
15
16
|
template = j2_env.get_template(template_name)
|
|
16
17
|
output = template.render(objects=objects)
|
|
17
|
-
with open(output_path,
|
|
18
|
-
output = output.encode(
|
|
18
|
+
with open(output_path, "w") as f:
|
|
19
|
+
output = output.encode("ascii", "ignore").decode("ascii")
|
|
19
20
|
f.write(output)
|
|
20
21
|
|
|
21
|
-
|
|
22
22
|
@staticmethod
|
|
23
|
-
def writeObject(
|
|
24
|
-
|
|
23
|
+
def writeObject(
|
|
24
|
+
template_name: str, output_path: str, object: dict[str, Any]
|
|
25
|
+
) -> None:
|
|
25
26
|
j2_env = Environment(
|
|
26
|
-
loader=FileSystemLoader(
|
|
27
|
-
|
|
27
|
+
loader=FileSystemLoader(
|
|
28
|
+
os.path.join(os.path.dirname(__file__), "templates")
|
|
29
|
+
),
|
|
30
|
+
trim_blocks=False,
|
|
31
|
+
)
|
|
28
32
|
|
|
29
33
|
template = j2_env.get_template(template_name)
|
|
30
34
|
output = template.render(object=object)
|
|
31
|
-
with open(output_path,
|
|
32
|
-
output = output.encode(
|
|
33
|
-
f.write(output)
|
|
35
|
+
with open(output_path, "w") as f:
|
|
36
|
+
output = output.encode("ascii", "ignore").decode("ascii")
|
|
37
|
+
f.write(output)
|
contentctl/output/json_writer.py
CHANGED
|
@@ -1,19 +1,31 @@
|
|
|
1
1
|
import json
|
|
2
2
|
from typing import Any
|
|
3
|
-
class JsonWriter():
|
|
4
3
|
|
|
4
|
+
|
|
5
|
+
class JsonWriter:
|
|
5
6
|
@staticmethod
|
|
6
|
-
def writeJsonObject(
|
|
7
|
+
def writeJsonObject(
|
|
8
|
+
file_path: str,
|
|
9
|
+
object_name: str,
|
|
10
|
+
objs: list[dict[str, Any]],
|
|
11
|
+
readable_output: bool = True,
|
|
12
|
+
) -> None:
|
|
7
13
|
try:
|
|
8
|
-
with open(file_path,
|
|
14
|
+
with open(file_path, "w") as outfile:
|
|
9
15
|
if readable_output:
|
|
10
16
|
# At the cost of slightly larger filesize, improve the redability significantly
|
|
11
17
|
# by sorting and indenting keys/values
|
|
12
|
-
sorted_objs = sorted(objs, key=lambda o: o[
|
|
13
|
-
json.dump(
|
|
18
|
+
sorted_objs = sorted(objs, key=lambda o: o["name"])
|
|
19
|
+
json.dump(
|
|
20
|
+
{object_name: sorted_objs},
|
|
21
|
+
outfile,
|
|
22
|
+
ensure_ascii=False,
|
|
23
|
+
indent=2,
|
|
24
|
+
)
|
|
14
25
|
else:
|
|
15
|
-
json.dump({object_name:objs}, outfile, ensure_ascii=False)
|
|
26
|
+
json.dump({object_name: objs}, outfile, ensure_ascii=False)
|
|
16
27
|
|
|
17
28
|
except Exception as e:
|
|
18
|
-
|
|
19
|
-
|
|
29
|
+
raise Exception(
|
|
30
|
+
f"Error serializing object to Json File '{file_path}': {str(e)}"
|
|
31
|
+
)
|
contentctl/output/svg_output.py
CHANGED
|
@@ -1,4 +1,3 @@
|
|
|
1
|
-
import os
|
|
2
1
|
import pathlib
|
|
3
2
|
from typing import List, Any
|
|
4
3
|
|
|
@@ -6,50 +5,69 @@ from contentctl.objects.enums import SecurityContentType
|
|
|
6
5
|
from contentctl.output.jinja_writer import JinjaWriter
|
|
7
6
|
from contentctl.objects.enums import DetectionStatus
|
|
8
7
|
from contentctl.objects.detection import Detection
|
|
9
|
-
class SvgOutput():
|
|
10
8
|
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
9
|
+
|
|
10
|
+
class SvgOutput:
|
|
11
|
+
def get_badge_dict(
|
|
12
|
+
self,
|
|
13
|
+
name: str,
|
|
14
|
+
total_detections: List[Detection],
|
|
15
|
+
these_detections: List[Detection],
|
|
16
|
+
) -> dict[str, Any]:
|
|
17
|
+
obj: dict[str, Any] = {}
|
|
18
|
+
obj["name"] = name
|
|
15
19
|
|
|
16
20
|
if name == "Production":
|
|
17
|
-
obj[
|
|
21
|
+
obj["color"] = "Green"
|
|
18
22
|
elif name == "Detections":
|
|
19
|
-
obj[
|
|
23
|
+
obj["color"] = "Green"
|
|
20
24
|
elif name == "Experimental":
|
|
21
|
-
obj[
|
|
25
|
+
obj["color"] = "Yellow"
|
|
22
26
|
elif name == "Deprecated":
|
|
23
|
-
obj[
|
|
27
|
+
obj["color"] = "Red"
|
|
24
28
|
|
|
25
|
-
obj[
|
|
26
|
-
if obj[
|
|
27
|
-
obj[
|
|
29
|
+
obj["count"] = len(total_detections)
|
|
30
|
+
if obj["count"] == 0:
|
|
31
|
+
obj["coverage"] = "NaN"
|
|
28
32
|
else:
|
|
29
|
-
obj[
|
|
30
|
-
obj[
|
|
33
|
+
obj["coverage"] = len(these_detections) / obj["count"]
|
|
34
|
+
obj["coverage"] = "{:.0%}".format(obj["coverage"])
|
|
31
35
|
return obj
|
|
32
|
-
|
|
33
|
-
def writeObjects(self, detections: List[Detection], output_path: pathlib.Path, type: SecurityContentType = None) -> None:
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
total_dict:dict[str,Any] = self.get_badge_dict("Detections", detections, detections)
|
|
38
|
-
production_dict:dict[str,Any] = self.get_badge_dict("% Production", detections, [detection for detection in detections if detection.status == DetectionStatus.production])
|
|
39
|
-
#deprecated_dict = self.get_badge_dict("Deprecated", detections, [detection for detection in detections if detection.status == DetectionStatus.deprecated])
|
|
40
|
-
#experimental_dict = self.get_badge_dict("Experimental", detections, [detection for detection in detections if detection.status == DetectionStatus.experimental])
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
#Total number of detections
|
|
46
|
-
JinjaWriter.writeObject('detection_count.j2', output_path /'detection_count.svg', total_dict)
|
|
47
|
-
#JinjaWriter.writeObject('detection_count.j2', os.path.join(output_path, 'production_count.svg'), production_dict)
|
|
48
|
-
#JinjaWriter.writeObject('detection_count.j2', os.path.join(output_path, 'deprecated_count.svg'), deprecated_dict)
|
|
49
|
-
#JinjaWriter.writeObject('detection_count.j2', os.path.join(output_path, 'experimental_count.svg'), experimental_dict)
|
|
50
|
-
|
|
51
|
-
#Percentage of detections that are production
|
|
52
|
-
JinjaWriter.writeObject('detection_coverage.j2', output_path/'detection_coverage.svg', production_dict)
|
|
53
|
-
#JinjaWriter.writeObject('detection_coverage.j2', os.path.join(output_path, 'detection_coverage.svg'), deprecated_dict)
|
|
54
|
-
#JinjaWriter.writeObject('detection_coverage.j2', os.path.join(output_path, 'detection_coverage.svg'), experimental_dict)
|
|
55
36
|
|
|
37
|
+
def writeObjects(
|
|
38
|
+
self,
|
|
39
|
+
detections: List[Detection],
|
|
40
|
+
output_path: pathlib.Path,
|
|
41
|
+
type: SecurityContentType = None,
|
|
42
|
+
) -> None:
|
|
43
|
+
total_dict: dict[str, Any] = self.get_badge_dict(
|
|
44
|
+
"Detections", detections, detections
|
|
45
|
+
)
|
|
46
|
+
production_dict: dict[str, Any] = self.get_badge_dict(
|
|
47
|
+
"% Production",
|
|
48
|
+
detections,
|
|
49
|
+
[
|
|
50
|
+
detection
|
|
51
|
+
for detection in detections
|
|
52
|
+
if detection.status == DetectionStatus.production
|
|
53
|
+
],
|
|
54
|
+
)
|
|
55
|
+
# deprecated_dict = self.get_badge_dict("Deprecated", detections, [detection for detection in detections if detection.status == DetectionStatus.deprecated])
|
|
56
|
+
# experimental_dict = self.get_badge_dict("Experimental", detections, [detection for detection in detections if detection.status == DetectionStatus.experimental])
|
|
57
|
+
|
|
58
|
+
# Total number of detections
|
|
59
|
+
JinjaWriter.writeObject(
|
|
60
|
+
"detection_count.j2", output_path / "detection_count.svg", total_dict
|
|
61
|
+
)
|
|
62
|
+
# JinjaWriter.writeObject('detection_count.j2', os.path.join(output_path, 'production_count.svg'), production_dict)
|
|
63
|
+
# JinjaWriter.writeObject('detection_count.j2', os.path.join(output_path, 'deprecated_count.svg'), deprecated_dict)
|
|
64
|
+
# JinjaWriter.writeObject('detection_count.j2', os.path.join(output_path, 'experimental_count.svg'), experimental_dict)
|
|
65
|
+
|
|
66
|
+
# Percentage of detections that are production
|
|
67
|
+
JinjaWriter.writeObject(
|
|
68
|
+
"detection_coverage.j2",
|
|
69
|
+
output_path / "detection_coverage.svg",
|
|
70
|
+
production_dict,
|
|
71
|
+
)
|
|
72
|
+
# JinjaWriter.writeObject('detection_coverage.j2', os.path.join(output_path, 'detection_coverage.svg'), deprecated_dict)
|
|
73
|
+
# JinjaWriter.writeObject('detection_coverage.j2', os.path.join(output_path, 'detection_coverage.svg'), experimental_dict)
|
|
@@ -7,7 +7,7 @@
|
|
|
7
7
|
type = detection
|
|
8
8
|
asset_type = {{ detection.tags.asset_type }}
|
|
9
9
|
confidence = medium
|
|
10
|
-
explanation = {{
|
|
10
|
+
explanation = {{ detection.status_aware_description | escapeNewlines() }}
|
|
11
11
|
{% if detection.how_to_implement is defined %}
|
|
12
12
|
how_to_implement = {{ detection.how_to_implement | escapeNewlines() }}
|
|
13
13
|
{% else %}
|
|
@@ -11,7 +11,7 @@ references = {{ story.getReferencesListForJson() | tojson }}
|
|
|
11
11
|
maintainers = [{"company": "{{ story.author_company }}", "email": "{{ story.author_email }}", "name": "{{ story.author_name }}"}]
|
|
12
12
|
spec_version = 3
|
|
13
13
|
searches = {{ story.storyAndInvestigationNamesWithApp(app) | tojson }}
|
|
14
|
-
description = {{ story.
|
|
14
|
+
description = {{ story.status_aware_description | escapeNewlines() }}
|
|
15
15
|
{% if story.narrative is defined %}
|
|
16
16
|
narrative = {{ story.narrative | escapeNewlines() }}
|
|
17
17
|
{% endif %}
|
|
@@ -2,7 +2,7 @@
|
|
|
2
2
|
{% for response_task in objects %}
|
|
3
3
|
[panel://workbench_panel_{{ response_task.lowercase_name }}___response_task]
|
|
4
4
|
label = {{ response_task.name }}
|
|
5
|
-
description = {{ response_task.
|
|
5
|
+
description = {{ response_task.status_aware_description | escapeNewlines() }}
|
|
6
6
|
disabled = 0
|
|
7
7
|
tokens = {\
|
|
8
8
|
{% for token in response_task.inputs %}
|
|
@@ -2,7 +2,7 @@
|
|
|
2
2
|
{% for story in objects %}
|
|
3
3
|
[panel_group://workbench_panel_group_{{ story.lowercase_name}}]
|
|
4
4
|
label = {{ story.name }}
|
|
5
|
-
description = {{ story.
|
|
5
|
+
description = {{ story.status_aware_description | escapeNewlines() }}
|
|
6
6
|
disabled = 0
|
|
7
7
|
|
|
8
8
|
{% if story.workbench_panels is defined %}
|
|
@@ -8,7 +8,7 @@
|
|
|
8
8
|
action.escu = 0
|
|
9
9
|
action.escu.enabled = 1
|
|
10
10
|
action.escu.search_type = support
|
|
11
|
-
description = {{ detection.
|
|
11
|
+
description = {{ detection.status_aware_description | escapeNewlines() }}
|
|
12
12
|
action.escu.creation_date = {{ detection.date }}
|
|
13
13
|
action.escu.modification_date = {{ detection.date }}
|
|
14
14
|
{% if detection.tags.analytic_story is defined %}
|
|
@@ -29,7 +29,7 @@ action.escu.providing_technologies = {{ detection.providing_technologies | tojso
|
|
|
29
29
|
{% else %}
|
|
30
30
|
action.escu.providing_technologies = []
|
|
31
31
|
{% endif %}
|
|
32
|
-
action.escu.eli5 = {{ detection.
|
|
32
|
+
action.escu.eli5 = {{ detection.status_aware_description | escapeNewlines() }}
|
|
33
33
|
{% if detection.how_to_implement is defined %}
|
|
34
34
|
action.escu.how_to_implement = {{ detection.how_to_implement | escapeNewlines() }}
|
|
35
35
|
{% else %}
|
|
@@ -5,16 +5,10 @@
|
|
|
5
5
|
[{{ detection.get_conf_stanza_name(app) }}]
|
|
6
6
|
action.escu = 0
|
|
7
7
|
action.escu.enabled = 1
|
|
8
|
-
{
|
|
9
|
-
description = **WARNING**, this detection has been marked **DEPRECATED** by the Splunk Threat Research Team. This means that it will no longer be maintained or supported. If you have any questions feel free to email us at: research@splunk.com. {{ detection.description | escapeNewlines() }}
|
|
10
|
-
{% elif detection.status == "experimental" %}
|
|
11
|
-
description = **WARNING**, this detection is marked **EXPERIMENTAL** by the Splunk Threat Research Team. This means that the detection has been manually tested but we do not have the associated attack data to perform automated testing or cannot share this attack dataset due to its sensitive nature. If you have any questions feel free to email us at: research@splunk.com. {{ detection.description | escapeNewlines() }}
|
|
12
|
-
{% else %}
|
|
13
|
-
description = {{ detection.description | escapeNewlines() }}
|
|
14
|
-
{% endif %}
|
|
8
|
+
description = {{ detection.status_aware_description | escapeNewlines() }}
|
|
15
9
|
action.escu.mappings = {{ detection.mappings | tojson }}
|
|
16
10
|
action.escu.data_models = {{ detection.datamodel | tojson }}
|
|
17
|
-
action.escu.eli5 = {{ detection.
|
|
11
|
+
action.escu.eli5 = {{ detection.status_aware_description | escapeNewlines() }}
|
|
18
12
|
{% if detection.how_to_implement %}
|
|
19
13
|
action.escu.how_to_implement = {{ detection.how_to_implement | escapeNewlines() }}
|
|
20
14
|
{% else %}
|
|
@@ -9,7 +9,7 @@
|
|
|
9
9
|
action.escu = 0
|
|
10
10
|
action.escu.enabled = 1
|
|
11
11
|
action.escu.search_type = investigative
|
|
12
|
-
description = {{ detection.
|
|
12
|
+
description = {{ detection.status_aware_description | escapeNewlines() }}
|
|
13
13
|
action.escu.creation_date = {{ detection.date }}
|
|
14
14
|
action.escu.modification_date = {{ detection.date }}
|
|
15
15
|
{% if detection.tags.analytic_story is defined %}
|
|
@@ -21,7 +21,7 @@ action.escu.earliest_time_offset = 3600
|
|
|
21
21
|
action.escu.latest_time_offset = 86400
|
|
22
22
|
action.escu.providing_technologies = []
|
|
23
23
|
action.escu.data_models = {{ detection.datamodel | tojson }}
|
|
24
|
-
action.escu.eli5 = {{ detection.
|
|
24
|
+
action.escu.eli5 = {{ detection.status_aware_description | escapeNewlines() }}
|
|
25
25
|
action.escu.how_to_implement = none
|
|
26
26
|
action.escu.known_false_positives = None at this time
|
|
27
27
|
disabled = true
|
|
@@ -13,11 +13,9 @@ default_match = {{ lookup.default_match | lower }}
|
|
|
13
13
|
{% if lookup.case_sensitive_match is defined and lookup.case_sensitive_match != None %}
|
|
14
14
|
case_sensitive_match = {{ lookup.case_sensitive_match | lower }}
|
|
15
15
|
{% endif %}
|
|
16
|
-
{% if lookup.description is defined and lookup.description != None %}
|
|
17
16
|
# description = {{ lookup.description | escapeNewlines() }}
|
|
18
|
-
{%
|
|
19
|
-
|
|
20
|
-
match_type = {{ lookup.match_type }}
|
|
17
|
+
{% if lookup.match_type | length > 0 %}
|
|
18
|
+
match_type = {{ lookup.match_type_to_conf_format }}
|
|
21
19
|
{% endif %}
|
|
22
20
|
{% if lookup.max_matches is defined and lookup.max_matches != None %}
|
|
23
21
|
max_matches = {{ lookup.max_matches }}
|
contentctl/output/yml_writer.py
CHANGED
|
@@ -1,4 +1,3 @@
|
|
|
1
|
-
|
|
2
1
|
import yaml
|
|
3
2
|
from typing import Any
|
|
4
3
|
from enum import StrEnum, IntEnum
|
|
@@ -8,25 +7,22 @@ from enum import StrEnum, IntEnum
|
|
|
8
7
|
# to write to files:
|
|
9
8
|
# yaml.representer.RepresenterError: ('cannot represent an object',.....
|
|
10
9
|
yaml.SafeDumper.add_multi_representer(
|
|
11
|
-
StrEnum,
|
|
12
|
-
yaml.representer.SafeRepresenter.represent_str
|
|
10
|
+
StrEnum, yaml.representer.SafeRepresenter.represent_str
|
|
13
11
|
)
|
|
14
12
|
|
|
15
13
|
yaml.SafeDumper.add_multi_representer(
|
|
16
|
-
IntEnum,
|
|
17
|
-
yaml.representer.SafeRepresenter.represent_int
|
|
14
|
+
IntEnum, yaml.representer.SafeRepresenter.represent_int
|
|
18
15
|
)
|
|
19
16
|
|
|
20
|
-
class YmlWriter:
|
|
21
17
|
|
|
18
|
+
class YmlWriter:
|
|
22
19
|
@staticmethod
|
|
23
|
-
def writeYmlFile(file_path
|
|
24
|
-
|
|
25
|
-
with open(file_path, 'w') as outfile:
|
|
20
|
+
def writeYmlFile(file_path: str, obj: dict[Any, Any]) -> None:
|
|
21
|
+
with open(file_path, "w") as outfile:
|
|
26
22
|
yaml.safe_dump(obj, outfile, default_flow_style=False, sort_keys=False)
|
|
27
23
|
|
|
28
24
|
@staticmethod
|
|
29
|
-
def writeDetection(file_path: str, obj: dict[Any,Any]) -> None:
|
|
25
|
+
def writeDetection(file_path: str, obj: dict[Any, Any]) -> None:
|
|
30
26
|
output = dict()
|
|
31
27
|
output["name"] = obj["name"]
|
|
32
28
|
output["id"] = obj["id"]
|
|
@@ -35,7 +31,7 @@ class YmlWriter:
|
|
|
35
31
|
output["author"] = obj["author"]
|
|
36
32
|
output["type"] = obj["type"]
|
|
37
33
|
output["status"] = obj["status"]
|
|
38
|
-
output["data_source"] = obj[
|
|
34
|
+
output["data_source"] = obj["data_sources"]
|
|
39
35
|
output["description"] = obj["description"]
|
|
40
36
|
output["search"] = obj["search"]
|
|
41
37
|
output["how_to_implement"] = obj["how_to_implement"]
|
|
@@ -45,20 +41,18 @@ class YmlWriter:
|
|
|
45
41
|
output["tests"] = obj["tags"]
|
|
46
42
|
|
|
47
43
|
YmlWriter.writeYmlFile(file_path=file_path, obj=output)
|
|
48
|
-
|
|
44
|
+
|
|
49
45
|
@staticmethod
|
|
50
|
-
def writeStory(file_path: str, obj: dict[Any,Any]) -> None:
|
|
46
|
+
def writeStory(file_path: str, obj: dict[Any, Any]) -> None:
|
|
51
47
|
output = dict()
|
|
52
|
-
output[
|
|
53
|
-
output[
|
|
54
|
-
output[
|
|
55
|
-
output[
|
|
56
|
-
output[
|
|
57
|
-
output[
|
|
58
|
-
output[
|
|
59
|
-
output[
|
|
60
|
-
output[
|
|
48
|
+
output["name"] = obj["name"]
|
|
49
|
+
output["id"] = obj["id"]
|
|
50
|
+
output["version"] = obj["version"]
|
|
51
|
+
output["date"] = obj["date"]
|
|
52
|
+
output["author"] = obj["author"]
|
|
53
|
+
output["description"] = obj["description"]
|
|
54
|
+
output["narrative"] = obj["narrative"]
|
|
55
|
+
output["references"] = obj["references"]
|
|
56
|
+
output["tags"] = obj["tags"]
|
|
61
57
|
|
|
62
58
|
YmlWriter.writeYmlFile(file_path=file_path, obj=output)
|
|
63
|
-
|
|
64
|
-
|
|
@@ -3,6 +3,7 @@ id: bcfd17e8-5461-400a-80a2-3b7d1459220c
|
|
|
3
3
|
version: 1
|
|
4
4
|
date: '2021-02-16'
|
|
5
5
|
author: Michael Haag, Splunk
|
|
6
|
+
status: production
|
|
6
7
|
description: Cobalt Strike is threat emulation software. Red teams and penetration
|
|
7
8
|
testers use Cobalt Strike to demonstrate the risk of a breach and evaluate mature
|
|
8
9
|
security programs. Most recently, Cobalt Strike has become the choice tool by threat
|