contentctl 4.4.7__py3-none-any.whl → 5.0.0a2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- contentctl/actions/build.py +39 -27
- contentctl/actions/detection_testing/DetectionTestingManager.py +0 -1
- contentctl/actions/detection_testing/GitService.py +132 -72
- contentctl/actions/detection_testing/infrastructures/DetectionTestingInfrastructure.py +32 -26
- contentctl/actions/detection_testing/progress_bar.py +6 -6
- contentctl/actions/detection_testing/views/DetectionTestingView.py +4 -4
- contentctl/actions/new_content.py +98 -81
- contentctl/actions/test.py +4 -5
- contentctl/actions/validate.py +2 -1
- contentctl/contentctl.py +114 -80
- contentctl/helper/utils.py +0 -14
- contentctl/input/director.py +5 -5
- contentctl/input/new_content_questions.py +2 -2
- contentctl/input/yml_reader.py +11 -6
- contentctl/objects/abstract_security_content_objects/detection_abstract.py +228 -120
- contentctl/objects/abstract_security_content_objects/security_content_object_abstract.py +5 -7
- contentctl/objects/alert_action.py +2 -1
- contentctl/objects/atomic.py +1 -0
- contentctl/objects/base_test.py +4 -3
- contentctl/objects/base_test_result.py +3 -3
- contentctl/objects/baseline.py +26 -6
- contentctl/objects/baseline_tags.py +2 -3
- contentctl/objects/config.py +789 -596
- contentctl/objects/constants.py +4 -1
- contentctl/objects/correlation_search.py +89 -95
- contentctl/objects/data_source.py +5 -6
- contentctl/objects/deployment.py +2 -10
- contentctl/objects/deployment_email.py +2 -1
- contentctl/objects/deployment_notable.py +2 -1
- contentctl/objects/deployment_phantom.py +2 -1
- contentctl/objects/deployment_rba.py +2 -1
- contentctl/objects/deployment_scheduling.py +2 -1
- contentctl/objects/deployment_slack.py +2 -1
- contentctl/objects/detection_tags.py +7 -42
- contentctl/objects/drilldown.py +1 -0
- contentctl/objects/enums.py +21 -58
- contentctl/objects/investigation.py +6 -5
- contentctl/objects/investigation_tags.py +2 -3
- contentctl/objects/lookup.py +145 -63
- contentctl/objects/macro.py +2 -3
- contentctl/objects/mitre_attack_enrichment.py +2 -2
- contentctl/objects/observable.py +3 -1
- contentctl/objects/playbook_tags.py +5 -1
- contentctl/objects/rba.py +90 -0
- contentctl/objects/risk_event.py +87 -144
- contentctl/objects/story_tags.py +1 -2
- contentctl/objects/test_attack_data.py +2 -1
- contentctl/objects/unit_test_baseline.py +2 -1
- contentctl/output/api_json_output.py +233 -220
- contentctl/output/conf_output.py +51 -44
- contentctl/output/conf_writer.py +201 -125
- contentctl/output/data_source_writer.py +0 -1
- contentctl/output/json_writer.py +2 -4
- contentctl/output/svg_output.py +1 -1
- contentctl/output/templates/analyticstories_detections.j2 +1 -1
- contentctl/output/templates/collections.j2 +1 -1
- contentctl/output/templates/doc_detections.j2 +0 -5
- contentctl/output/templates/savedsearches_detections.j2 +8 -3
- contentctl/output/templates/transforms.j2 +4 -4
- contentctl/output/yml_writer.py +15 -0
- contentctl/templates/detections/endpoint/anomalous_usage_of_7zip.yml +16 -34
- {contentctl-4.4.7.dist-info → contentctl-5.0.0a2.dist-info}/METADATA +5 -4
- {contentctl-4.4.7.dist-info → contentctl-5.0.0a2.dist-info}/RECORD +66 -69
- {contentctl-4.4.7.dist-info → contentctl-5.0.0a2.dist-info}/WHEEL +1 -1
- contentctl/objects/event_source.py +0 -11
- contentctl/output/detection_writer.py +0 -28
- contentctl/output/new_content_yml_output.py +0 -56
- contentctl/output/yml_output.py +0 -66
- {contentctl-4.4.7.dist-info → contentctl-5.0.0a2.dist-info}/LICENSE.md +0 -0
- {contentctl-4.4.7.dist-info → contentctl-5.0.0a2.dist-info}/entry_points.txt +0 -0
|
@@ -442,7 +442,7 @@ class DetectionTestingInfrastructure(BaseModel, abc.ABC):
|
|
|
442
442
|
self.format_pbar_string(
|
|
443
443
|
TestReportingType.GROUP,
|
|
444
444
|
test_group.name,
|
|
445
|
-
FinalTestingStates.SKIP
|
|
445
|
+
FinalTestingStates.SKIP,
|
|
446
446
|
start_time=time.time(),
|
|
447
447
|
set_pbar=False,
|
|
448
448
|
)
|
|
@@ -483,7 +483,7 @@ class DetectionTestingInfrastructure(BaseModel, abc.ABC):
|
|
|
483
483
|
self.format_pbar_string(
|
|
484
484
|
TestReportingType.GROUP,
|
|
485
485
|
test_group.name,
|
|
486
|
-
TestingStates.DONE_GROUP
|
|
486
|
+
TestingStates.DONE_GROUP,
|
|
487
487
|
start_time=setup_results.start_time,
|
|
488
488
|
set_pbar=False,
|
|
489
489
|
)
|
|
@@ -504,7 +504,7 @@ class DetectionTestingInfrastructure(BaseModel, abc.ABC):
|
|
|
504
504
|
self.format_pbar_string(
|
|
505
505
|
TestReportingType.GROUP,
|
|
506
506
|
test_group.name,
|
|
507
|
-
TestingStates.BEGINNING_GROUP
|
|
507
|
+
TestingStates.BEGINNING_GROUP,
|
|
508
508
|
start_time=setup_start_time
|
|
509
509
|
)
|
|
510
510
|
# https://github.com/WoLpH/python-progressbar/issues/164
|
|
@@ -544,7 +544,7 @@ class DetectionTestingInfrastructure(BaseModel, abc.ABC):
|
|
|
544
544
|
self.format_pbar_string(
|
|
545
545
|
TestReportingType.GROUP,
|
|
546
546
|
test_group.name,
|
|
547
|
-
TestingStates.DELETING
|
|
547
|
+
TestingStates.DELETING,
|
|
548
548
|
start_time=test_group_start_time,
|
|
549
549
|
)
|
|
550
550
|
|
|
@@ -632,7 +632,7 @@ class DetectionTestingInfrastructure(BaseModel, abc.ABC):
|
|
|
632
632
|
self.format_pbar_string(
|
|
633
633
|
TestReportingType.UNIT,
|
|
634
634
|
f"{detection.name}:{test.name}",
|
|
635
|
-
FinalTestingStates.SKIP
|
|
635
|
+
FinalTestingStates.SKIP,
|
|
636
636
|
start_time=test_start_time,
|
|
637
637
|
set_pbar=False,
|
|
638
638
|
)
|
|
@@ -664,7 +664,7 @@ class DetectionTestingInfrastructure(BaseModel, abc.ABC):
|
|
|
664
664
|
self.format_pbar_string(
|
|
665
665
|
TestReportingType.UNIT,
|
|
666
666
|
f"{detection.name}:{test.name}",
|
|
667
|
-
FinalTestingStates.ERROR
|
|
667
|
+
FinalTestingStates.ERROR,
|
|
668
668
|
start_time=test_start_time,
|
|
669
669
|
set_pbar=False,
|
|
670
670
|
)
|
|
@@ -724,7 +724,7 @@ class DetectionTestingInfrastructure(BaseModel, abc.ABC):
|
|
|
724
724
|
res = "ERROR"
|
|
725
725
|
link = detection.search
|
|
726
726
|
else:
|
|
727
|
-
res = test.result.status.
|
|
727
|
+
res = test.result.status.upper() # type: ignore
|
|
728
728
|
link = test.result.get_summary_dict()["sid_link"]
|
|
729
729
|
|
|
730
730
|
self.format_pbar_string(
|
|
@@ -755,7 +755,7 @@ class DetectionTestingInfrastructure(BaseModel, abc.ABC):
|
|
|
755
755
|
self.format_pbar_string(
|
|
756
756
|
TestReportingType.UNIT,
|
|
757
757
|
f"{detection.name}:{test.name}",
|
|
758
|
-
FinalTestingStates.PASS
|
|
758
|
+
FinalTestingStates.PASS,
|
|
759
759
|
start_time=test_start_time,
|
|
760
760
|
set_pbar=False,
|
|
761
761
|
)
|
|
@@ -766,7 +766,7 @@ class DetectionTestingInfrastructure(BaseModel, abc.ABC):
|
|
|
766
766
|
self.format_pbar_string(
|
|
767
767
|
TestReportingType.UNIT,
|
|
768
768
|
f"{detection.name}:{test.name}",
|
|
769
|
-
FinalTestingStates.SKIP
|
|
769
|
+
FinalTestingStates.SKIP,
|
|
770
770
|
start_time=test_start_time,
|
|
771
771
|
set_pbar=False,
|
|
772
772
|
)
|
|
@@ -777,7 +777,7 @@ class DetectionTestingInfrastructure(BaseModel, abc.ABC):
|
|
|
777
777
|
self.format_pbar_string(
|
|
778
778
|
TestReportingType.UNIT,
|
|
779
779
|
f"{detection.name}:{test.name}",
|
|
780
|
-
FinalTestingStates.FAIL
|
|
780
|
+
FinalTestingStates.FAIL,
|
|
781
781
|
start_time=test_start_time,
|
|
782
782
|
set_pbar=False,
|
|
783
783
|
)
|
|
@@ -788,7 +788,7 @@ class DetectionTestingInfrastructure(BaseModel, abc.ABC):
|
|
|
788
788
|
self.format_pbar_string(
|
|
789
789
|
TestReportingType.UNIT,
|
|
790
790
|
f"{detection.name}:{test.name}",
|
|
791
|
-
FinalTestingStates.ERROR
|
|
791
|
+
FinalTestingStates.ERROR,
|
|
792
792
|
start_time=test_start_time,
|
|
793
793
|
set_pbar=False,
|
|
794
794
|
)
|
|
@@ -821,7 +821,7 @@ class DetectionTestingInfrastructure(BaseModel, abc.ABC):
|
|
|
821
821
|
test_start_time = time.time()
|
|
822
822
|
|
|
823
823
|
# First, check to see if the test should be skipped (Hunting or Correlation)
|
|
824
|
-
if detection.type in [AnalyticsType.Hunting
|
|
824
|
+
if detection.type in [AnalyticsType.Hunting, AnalyticsType.Correlation]:
|
|
825
825
|
test.skip(
|
|
826
826
|
f"TEST SKIPPED: detection is type {detection.type} and cannot be integration "
|
|
827
827
|
"tested at this time"
|
|
@@ -843,11 +843,11 @@ class DetectionTestingInfrastructure(BaseModel, abc.ABC):
|
|
|
843
843
|
# Determine the reporting state (we should only encounter SKIP/FAIL/ERROR)
|
|
844
844
|
state: str
|
|
845
845
|
if test.result.status == TestResultStatus.SKIP:
|
|
846
|
-
state = FinalTestingStates.SKIP
|
|
846
|
+
state = FinalTestingStates.SKIP
|
|
847
847
|
elif test.result.status == TestResultStatus.FAIL:
|
|
848
|
-
state = FinalTestingStates.FAIL
|
|
848
|
+
state = FinalTestingStates.FAIL
|
|
849
849
|
elif test.result.status == TestResultStatus.ERROR:
|
|
850
|
-
state = FinalTestingStates.ERROR
|
|
850
|
+
state = FinalTestingStates.ERROR
|
|
851
851
|
else:
|
|
852
852
|
raise ValueError(
|
|
853
853
|
f"Status for (integration) '{detection.name}:{test.name}' was preemptively set"
|
|
@@ -891,7 +891,7 @@ class DetectionTestingInfrastructure(BaseModel, abc.ABC):
|
|
|
891
891
|
self.format_pbar_string(
|
|
892
892
|
TestReportingType.INTEGRATION,
|
|
893
893
|
f"{detection.name}:{test.name}",
|
|
894
|
-
FinalTestingStates.FAIL
|
|
894
|
+
FinalTestingStates.FAIL,
|
|
895
895
|
start_time=test_start_time,
|
|
896
896
|
set_pbar=False,
|
|
897
897
|
)
|
|
@@ -935,7 +935,7 @@ class DetectionTestingInfrastructure(BaseModel, abc.ABC):
|
|
|
935
935
|
if test.result is None:
|
|
936
936
|
res = "ERROR"
|
|
937
937
|
else:
|
|
938
|
-
res = test.result.status.
|
|
938
|
+
res = test.result.status.upper() # type: ignore
|
|
939
939
|
|
|
940
940
|
# Get the link to the saved search in this specific instance
|
|
941
941
|
link = f"https://{self.infrastructure.instance_address}:{self.infrastructure.web_ui_port}"
|
|
@@ -968,7 +968,7 @@ class DetectionTestingInfrastructure(BaseModel, abc.ABC):
|
|
|
968
968
|
self.format_pbar_string(
|
|
969
969
|
TestReportingType.INTEGRATION,
|
|
970
970
|
f"{detection.name}:{test.name}",
|
|
971
|
-
FinalTestingStates.PASS
|
|
971
|
+
FinalTestingStates.PASS,
|
|
972
972
|
start_time=test_start_time,
|
|
973
973
|
set_pbar=False,
|
|
974
974
|
)
|
|
@@ -979,7 +979,7 @@ class DetectionTestingInfrastructure(BaseModel, abc.ABC):
|
|
|
979
979
|
self.format_pbar_string(
|
|
980
980
|
TestReportingType.INTEGRATION,
|
|
981
981
|
f"{detection.name}:{test.name}",
|
|
982
|
-
FinalTestingStates.SKIP
|
|
982
|
+
FinalTestingStates.SKIP,
|
|
983
983
|
start_time=test_start_time,
|
|
984
984
|
set_pbar=False,
|
|
985
985
|
)
|
|
@@ -990,7 +990,7 @@ class DetectionTestingInfrastructure(BaseModel, abc.ABC):
|
|
|
990
990
|
self.format_pbar_string(
|
|
991
991
|
TestReportingType.INTEGRATION,
|
|
992
992
|
f"{detection.name}:{test.name}",
|
|
993
|
-
FinalTestingStates.FAIL
|
|
993
|
+
FinalTestingStates.FAIL,
|
|
994
994
|
start_time=test_start_time,
|
|
995
995
|
set_pbar=False,
|
|
996
996
|
)
|
|
@@ -1001,7 +1001,7 @@ class DetectionTestingInfrastructure(BaseModel, abc.ABC):
|
|
|
1001
1001
|
self.format_pbar_string(
|
|
1002
1002
|
TestReportingType.INTEGRATION,
|
|
1003
1003
|
f"{detection.name}:{test.name}",
|
|
1004
|
-
FinalTestingStates.ERROR
|
|
1004
|
+
FinalTestingStates.ERROR,
|
|
1005
1005
|
start_time=test_start_time,
|
|
1006
1006
|
set_pbar=False,
|
|
1007
1007
|
)
|
|
@@ -1077,7 +1077,7 @@ class DetectionTestingInfrastructure(BaseModel, abc.ABC):
|
|
|
1077
1077
|
self.format_pbar_string(
|
|
1078
1078
|
TestReportingType.UNIT,
|
|
1079
1079
|
f"{detection.name}:{test.name}",
|
|
1080
|
-
TestingStates.PROCESSING
|
|
1080
|
+
TestingStates.PROCESSING,
|
|
1081
1081
|
start_time=start_time
|
|
1082
1082
|
)
|
|
1083
1083
|
|
|
@@ -1086,7 +1086,7 @@ class DetectionTestingInfrastructure(BaseModel, abc.ABC):
|
|
|
1086
1086
|
self.format_pbar_string(
|
|
1087
1087
|
TestReportingType.UNIT,
|
|
1088
1088
|
f"{detection.name}:{test.name}",
|
|
1089
|
-
TestingStates.SEARCHING
|
|
1089
|
+
TestingStates.SEARCHING,
|
|
1090
1090
|
start_time=start_time,
|
|
1091
1091
|
)
|
|
1092
1092
|
|
|
@@ -1094,6 +1094,7 @@ class DetectionTestingInfrastructure(BaseModel, abc.ABC):
|
|
|
1094
1094
|
job = self.get_conn().search(query=search, **kwargs)
|
|
1095
1095
|
results = JSONResultsReader(job.results(output_mode="json"))
|
|
1096
1096
|
|
|
1097
|
+
# TODO (cmcginley): @ljstella you're removing this ultimately, right?
|
|
1097
1098
|
# Consolidate a set of the distinct observable field names
|
|
1098
1099
|
observable_fields_set = set([o.name for o in detection.tags.observable]) # keeping this around for later
|
|
1099
1100
|
risk_object_fields_set = set([o.name for o in detection.tags.observable if "Victim" in o.role ]) # just the "Risk Objects"
|
|
@@ -1121,7 +1122,10 @@ class DetectionTestingInfrastructure(BaseModel, abc.ABC):
|
|
|
1121
1122
|
missing_risk_objects = risk_object_fields_set - results_fields_set
|
|
1122
1123
|
if len(missing_risk_objects) > 0:
|
|
1123
1124
|
# Report a failure in such cases
|
|
1124
|
-
e = Exception(
|
|
1125
|
+
e = Exception(
|
|
1126
|
+
f"The risk object field(s) {missing_risk_objects} are missing in the "
|
|
1127
|
+
"detection results"
|
|
1128
|
+
)
|
|
1125
1129
|
test.result.set_job_content(
|
|
1126
1130
|
job.content,
|
|
1127
1131
|
self.infrastructure,
|
|
@@ -1137,6 +1141,8 @@ class DetectionTestingInfrastructure(BaseModel, abc.ABC):
|
|
|
1137
1141
|
# on a field. In this case, the field will appear but will not contain any values
|
|
1138
1142
|
current_empty_fields: set[str] = set()
|
|
1139
1143
|
|
|
1144
|
+
# TODO (cmcginley): @ljstella is this something we're keeping for testing as
|
|
1145
|
+
# well?
|
|
1140
1146
|
for field in observable_fields_set:
|
|
1141
1147
|
if result.get(field, 'null') == 'null':
|
|
1142
1148
|
if field in risk_object_fields_set:
|
|
@@ -1289,7 +1295,7 @@ class DetectionTestingInfrastructure(BaseModel, abc.ABC):
|
|
|
1289
1295
|
self.format_pbar_string(
|
|
1290
1296
|
TestReportingType.GROUP,
|
|
1291
1297
|
test_group.name,
|
|
1292
|
-
TestingStates.DOWNLOADING
|
|
1298
|
+
TestingStates.DOWNLOADING,
|
|
1293
1299
|
start_time=test_group_start_time
|
|
1294
1300
|
)
|
|
1295
1301
|
|
|
@@ -1307,7 +1313,7 @@ class DetectionTestingInfrastructure(BaseModel, abc.ABC):
|
|
|
1307
1313
|
self.format_pbar_string(
|
|
1308
1314
|
TestReportingType.GROUP,
|
|
1309
1315
|
test_group.name,
|
|
1310
|
-
TestingStates.REPLAYING
|
|
1316
|
+
TestingStates.REPLAYING,
|
|
1311
1317
|
start_time=test_group_start_time
|
|
1312
1318
|
)
|
|
1313
1319
|
|
|
@@ -1,10 +1,10 @@
|
|
|
1
1
|
import time
|
|
2
|
-
from enum import
|
|
2
|
+
from enum import StrEnum
|
|
3
3
|
from tqdm import tqdm
|
|
4
4
|
import datetime
|
|
5
5
|
|
|
6
6
|
|
|
7
|
-
class TestReportingType(
|
|
7
|
+
class TestReportingType(StrEnum):
|
|
8
8
|
"""
|
|
9
9
|
5-char identifiers for the type of testing being reported on
|
|
10
10
|
"""
|
|
@@ -21,7 +21,7 @@ class TestReportingType(str, Enum):
|
|
|
21
21
|
INTEGRATION = "INTEG"
|
|
22
22
|
|
|
23
23
|
|
|
24
|
-
class TestingStates(
|
|
24
|
+
class TestingStates(StrEnum):
|
|
25
25
|
"""
|
|
26
26
|
Defined testing states
|
|
27
27
|
"""
|
|
@@ -40,10 +40,10 @@ class TestingStates(str, Enum):
|
|
|
40
40
|
|
|
41
41
|
|
|
42
42
|
# the longest length of any state
|
|
43
|
-
LONGEST_STATE = max(len(w
|
|
43
|
+
LONGEST_STATE = max(len(w) for w in TestingStates)
|
|
44
44
|
|
|
45
45
|
|
|
46
|
-
class FinalTestingStates(
|
|
46
|
+
class FinalTestingStates(StrEnum):
|
|
47
47
|
"""
|
|
48
48
|
The possible final states for a test (for pbar reporting)
|
|
49
49
|
"""
|
|
@@ -82,7 +82,7 @@ def format_pbar_string(
|
|
|
82
82
|
:returns: a formatted string for use w/ pbar
|
|
83
83
|
"""
|
|
84
84
|
# Extract and ljust our various fields
|
|
85
|
-
field_one = test_reporting_type
|
|
85
|
+
field_one = test_reporting_type
|
|
86
86
|
field_two = test_name.ljust(MAX_TEST_NAME_LENGTH)
|
|
87
87
|
field_three = state.ljust(LONGEST_STATE)
|
|
88
88
|
field_four = datetime.timedelta(seconds=round(time.time() - start_time))
|
|
@@ -110,11 +110,11 @@ class DetectionTestingView(BaseModel, abc.ABC):
|
|
|
110
110
|
total_skipped += 1
|
|
111
111
|
|
|
112
112
|
# Aggregate production status metrics
|
|
113
|
-
if detection.status == DetectionStatus.production
|
|
113
|
+
if detection.status == DetectionStatus.production:
|
|
114
114
|
total_production += 1
|
|
115
|
-
elif detection.status == DetectionStatus.experimental
|
|
115
|
+
elif detection.status == DetectionStatus.experimental:
|
|
116
116
|
total_experimental += 1
|
|
117
|
-
elif detection.status == DetectionStatus.deprecated
|
|
117
|
+
elif detection.status == DetectionStatus.deprecated:
|
|
118
118
|
total_deprecated += 1
|
|
119
119
|
|
|
120
120
|
# Check if the detection is manual_test
|
|
@@ -178,7 +178,7 @@ class DetectionTestingView(BaseModel, abc.ABC):
|
|
|
178
178
|
# Construct and return the larger results dict
|
|
179
179
|
result_dict = {
|
|
180
180
|
"summary": {
|
|
181
|
-
"mode": self.config.
|
|
181
|
+
"mode": self.config.mode.mode_name,
|
|
182
182
|
"enable_integration_testing": self.config.enable_integration_testing,
|
|
183
183
|
"success": overall_success,
|
|
184
184
|
"total_detections": total_detections,
|
|
@@ -1,77 +1,115 @@
|
|
|
1
|
-
|
|
2
|
-
|
|
3
1
|
from dataclasses import dataclass
|
|
4
2
|
import questionary
|
|
5
3
|
from typing import Any
|
|
6
4
|
from contentctl.input.new_content_questions import NewContentQuestions
|
|
7
|
-
from contentctl.output.new_content_yml_output import NewContentYmlOutput
|
|
8
5
|
from contentctl.objects.config import new, NewContentType
|
|
9
6
|
import uuid
|
|
10
7
|
from datetime import datetime
|
|
11
8
|
import pathlib
|
|
12
9
|
from contentctl.objects.abstract_security_content_objects.security_content_object_abstract import SecurityContentObject_Abstract
|
|
13
10
|
from contentctl.output.yml_writer import YmlWriter
|
|
14
|
-
|
|
11
|
+
from contentctl.objects.enums import AssetType
|
|
12
|
+
from contentctl.objects.constants import SES_OBSERVABLE_TYPE_MAPPING, SES_OBSERVABLE_ROLE_MAPPING
|
|
15
13
|
class NewContent:
|
|
14
|
+
UPDATE_PREFIX = "__UPDATE__"
|
|
15
|
+
|
|
16
|
+
DEFAULT_DRILLDOWN_DEF = [
|
|
17
|
+
{
|
|
18
|
+
"name": f'View the detection results for - "${UPDATE_PREFIX}FIRST_RISK_OBJECT$" and "${UPDATE_PREFIX}SECOND_RISK_OBJECT$"',
|
|
19
|
+
"search": f'%original_detection_search% | search "${UPDATE_PREFIX}FIRST_RISK_OBJECT = "${UPDATE_PREFIX}FIRST_RISK_OBJECT$" second_observable_type_here = "${UPDATE_PREFIX}SECOND_RISK_OBJECT$"',
|
|
20
|
+
"earliest_offset": '$info_min_time$',
|
|
21
|
+
"latest_offset": '$info_max_time$'
|
|
22
|
+
},
|
|
23
|
+
{
|
|
24
|
+
"name": f'View risk events for the last 7 days for - "${UPDATE_PREFIX}FIRST_RISK_OBJECT$" and "${UPDATE_PREFIX}SECOND_RISK_OBJECT$"',
|
|
25
|
+
"search": f'| from datamodel Risk.All_Risk | search normalized_risk_object IN ("${UPDATE_PREFIX}FIRST_RISK_OBJECT$", "${UPDATE_PREFIX}SECOND_RISK_OBJECT$") starthoursago=168 | stats count min(_time) as firstTime max(_time) as lastTime values(search_name) as "Search Name" values(risk_message) as "Risk Message" values(analyticstories) as "Analytic Stories" values(annotations._all) as "Annotations" values(annotations.mitre_attack.mitre_tactic) as "ATT&CK Tactics" by normalized_risk_object | `security_content_ctime(firstTime)` | `security_content_ctime(lastTime)`',
|
|
26
|
+
"earliest_offset": '$info_min_time$',
|
|
27
|
+
"latest_offset": '$info_max_time$'
|
|
28
|
+
}
|
|
29
|
+
]
|
|
30
|
+
|
|
16
31
|
|
|
17
|
-
def buildDetection(self)->dict[str,Any]:
|
|
32
|
+
def buildDetection(self) -> tuple[dict[str, Any], str]:
|
|
18
33
|
questions = NewContentQuestions.get_questions_detection()
|
|
19
|
-
answers: dict[str,str] = questionary.prompt(
|
|
20
|
-
questions,
|
|
21
|
-
kbi_msg="User did not answer all of the prompt questions. Exiting..."
|
|
34
|
+
answers: dict[str, str] = questionary.prompt(
|
|
35
|
+
questions,
|
|
36
|
+
kbi_msg="User did not answer all of the prompt questions. Exiting...",
|
|
37
|
+
)
|
|
22
38
|
if not answers:
|
|
23
39
|
raise ValueError("User didn't answer one or more questions!")
|
|
24
|
-
answers.update(answers)
|
|
25
|
-
answers['name'] = answers['detection_name']
|
|
26
|
-
del answers['detection_name']
|
|
27
|
-
answers['id'] = str(uuid.uuid4())
|
|
28
|
-
answers['version'] = 1
|
|
29
|
-
answers['date'] = datetime.today().strftime('%Y-%m-%d')
|
|
30
|
-
answers['author'] = answers['detection_author']
|
|
31
|
-
del answers['detection_author']
|
|
32
|
-
answers['data_source'] = answers['data_source']
|
|
33
|
-
answers['type'] = answers['detection_type']
|
|
34
|
-
del answers['detection_type']
|
|
35
|
-
answers['status'] = "production" #start everything as production since that's what we INTEND the content to become
|
|
36
|
-
answers['description'] = 'UPDATE_DESCRIPTION'
|
|
37
|
-
file_name = answers['name'].replace(' ', '_').replace('-','_').replace('.','_').replace('/','_').lower()
|
|
38
|
-
answers['search'] = answers['detection_search'] + ' | `' + file_name + '_filter`'
|
|
39
|
-
del answers['detection_search']
|
|
40
|
-
answers['how_to_implement'] = 'UPDATE_HOW_TO_IMPLEMENT'
|
|
41
|
-
answers['known_false_positives'] = 'UPDATE_KNOWN_FALSE_POSITIVES'
|
|
42
|
-
answers['references'] = ['REFERENCE']
|
|
43
|
-
answers['tags'] = dict()
|
|
44
|
-
answers['tags']['analytic_story'] = ['UPDATE_STORY_NAME']
|
|
45
|
-
answers['tags']['asset_type'] = 'UPDATE asset_type'
|
|
46
|
-
answers['tags']['confidence'] = 'UPDATE value between 1-100'
|
|
47
|
-
answers['tags']['impact'] = 'UPDATE value between 1-100'
|
|
48
|
-
answers['tags']['message'] = 'UPDATE message'
|
|
49
|
-
answers['tags']['mitre_attack_id'] = [x.strip() for x in answers['mitre_attack_ids'].split(',')]
|
|
50
|
-
answers['tags']['observable'] = [{'name': 'UPDATE', 'type': 'UPDATE', 'role': ['UPDATE']}]
|
|
51
|
-
answers['tags']['product'] = ['Splunk Enterprise','Splunk Enterprise Security','Splunk Cloud']
|
|
52
|
-
answers['tags']['required_fields'] = ['UPDATE']
|
|
53
|
-
answers['tags']['risk_score'] = 'UPDATE (impact * confidence)/100'
|
|
54
|
-
answers['tags']['security_domain'] = answers['security_domain']
|
|
55
|
-
del answers["security_domain"]
|
|
56
|
-
answers['tags']['cve'] = ['UPDATE WITH CVE(S) IF APPLICABLE']
|
|
57
|
-
|
|
58
|
-
#generate the tests section
|
|
59
|
-
answers['tests'] = [
|
|
60
|
-
{
|
|
61
|
-
'name': "True Positive Test",
|
|
62
|
-
'attack_data': [
|
|
63
|
-
{
|
|
64
|
-
'data': "https://github.com/splunk/contentctl/wiki",
|
|
65
|
-
"sourcetype": "UPDATE SOURCETYPE",
|
|
66
|
-
"source": "UPDATE SOURCE"
|
|
67
|
-
}
|
|
68
|
-
]
|
|
69
|
-
}
|
|
70
|
-
]
|
|
71
|
-
del answers["mitre_attack_ids"]
|
|
72
|
-
return answers
|
|
73
40
|
|
|
74
|
-
|
|
41
|
+
data_source_field = (
|
|
42
|
+
answers["data_source"] if len(answers["data_source"]) > 0 else [f"{NewContent.UPDATE_PREFIX} zero or more data_sources"]
|
|
43
|
+
)
|
|
44
|
+
file_name = (
|
|
45
|
+
answers["detection_name"]
|
|
46
|
+
.replace(" ", "_")
|
|
47
|
+
.replace("-", "_")
|
|
48
|
+
.replace(".", "_")
|
|
49
|
+
.replace("/", "_")
|
|
50
|
+
.lower()
|
|
51
|
+
)
|
|
52
|
+
|
|
53
|
+
#Minimum lenght for a mitre tactic is 5 characters: T1000
|
|
54
|
+
if len(answers["mitre_attack_ids"]) >= 5:
|
|
55
|
+
mitre_attack_ids = [x.strip() for x in answers["mitre_attack_ids"].split(",")]
|
|
56
|
+
else:
|
|
57
|
+
#string was too short, so just put a placeholder
|
|
58
|
+
mitre_attack_ids = [f"{NewContent.UPDATE_PREFIX} zero or more mitre_attack_ids"]
|
|
59
|
+
|
|
60
|
+
output_file_answers: dict[str, Any] = {
|
|
61
|
+
"name": answers["detection_name"],
|
|
62
|
+
"id": str(uuid.uuid4()),
|
|
63
|
+
"version": 1,
|
|
64
|
+
"date": datetime.today().strftime("%Y-%m-%d"),
|
|
65
|
+
"author": answers["detection_author"],
|
|
66
|
+
"status": "production", # start everything as production since that's what we INTEND the content to become
|
|
67
|
+
"type": answers["detection_type"],
|
|
68
|
+
"description": f"{NewContent.UPDATE_PREFIX} by providing a description of your search",
|
|
69
|
+
"data_source": data_source_field,
|
|
70
|
+
"search": f"{answers['detection_search']} | `{file_name}_filter`",
|
|
71
|
+
"how_to_implement": f"{NewContent.UPDATE_PREFIX} how to implement your search",
|
|
72
|
+
"known_false_positives": f"{NewContent.UPDATE_PREFIX} known false positives for your search",
|
|
73
|
+
"references": [f"{NewContent.UPDATE_PREFIX} zero or more http references to provide more information about your search"],
|
|
74
|
+
"drilldown_searches": NewContent.DEFAULT_DRILLDOWN_DEF,
|
|
75
|
+
"tags": {
|
|
76
|
+
"analytic_story": [f"{NewContent.UPDATE_PREFIX} by providing zero or more analytic stories"],
|
|
77
|
+
"asset_type": f"{NewContent.UPDATE_PREFIX} by providing and asset type from {list(AssetType._value2member_map_)}",
|
|
78
|
+
"confidence": f"{NewContent.UPDATE_PREFIX} by providing a value between 1-100",
|
|
79
|
+
"impact": f"{NewContent.UPDATE_PREFIX} by providing a value between 1-100",
|
|
80
|
+
"message": f"{NewContent.UPDATE_PREFIX} by providing a risk message. Fields in your search results can be referenced using $fieldName$",
|
|
81
|
+
"mitre_attack_id": mitre_attack_ids,
|
|
82
|
+
"observable": [
|
|
83
|
+
{"name": f"{NewContent.UPDATE_PREFIX} the field name of the observable. This is a field that exists in your search results.", "type": f"{NewContent.UPDATE_PREFIX} the type of your observable from the list {list(SES_OBSERVABLE_TYPE_MAPPING.keys())}.", "role": [f"{NewContent.UPDATE_PREFIX} the role from the list {list(SES_OBSERVABLE_ROLE_MAPPING.keys())}"]}
|
|
84
|
+
],
|
|
85
|
+
"product": [
|
|
86
|
+
"Splunk Enterprise",
|
|
87
|
+
"Splunk Enterprise Security",
|
|
88
|
+
"Splunk Cloud",
|
|
89
|
+
],
|
|
90
|
+
"security_domain": answers["security_domain"],
|
|
91
|
+
"cve": [f"{NewContent.UPDATE_PREFIX} with CVE(s) if applicable"],
|
|
92
|
+
},
|
|
93
|
+
"tests": [
|
|
94
|
+
{
|
|
95
|
+
"name": "True Positive Test",
|
|
96
|
+
"attack_data": [
|
|
97
|
+
{
|
|
98
|
+
"data": f"{NewContent.UPDATE_PREFIX} the data file to replay. Go to https://github.com/splunk/contentctl/wiki for information about the format of this field",
|
|
99
|
+
"sourcetype": f"{NewContent.UPDATE_PREFIX} the sourcetype of your data file.",
|
|
100
|
+
"source": f"{NewContent.UPDATE_PREFIX} the source of your datafile",
|
|
101
|
+
}
|
|
102
|
+
],
|
|
103
|
+
}
|
|
104
|
+
],
|
|
105
|
+
}
|
|
106
|
+
|
|
107
|
+
if answers["detection_type"] not in ["TTP", "Anomaly", "Correlation"]:
|
|
108
|
+
del output_file_answers["drilldown_searches"]
|
|
109
|
+
|
|
110
|
+
return output_file_answers, answers['detection_kind']
|
|
111
|
+
|
|
112
|
+
def buildStory(self) -> dict[str, Any]:
|
|
75
113
|
questions = NewContentQuestions.get_questions_story()
|
|
76
114
|
answers = questionary.prompt(
|
|
77
115
|
questions,
|
|
@@ -96,12 +134,11 @@ class NewContent:
|
|
|
96
134
|
del answers['usecase']
|
|
97
135
|
answers['tags']['cve'] = ['UPDATE WITH CVE(S) IF APPLICABLE']
|
|
98
136
|
return answers
|
|
99
|
-
|
|
100
137
|
|
|
101
138
|
def execute(self, input_dto: new) -> None:
|
|
102
139
|
if input_dto.type == NewContentType.detection:
|
|
103
|
-
content_dict = self.buildDetection()
|
|
104
|
-
subdirectory = pathlib.Path('detections') /
|
|
140
|
+
content_dict, detection_kind = self.buildDetection()
|
|
141
|
+
subdirectory = pathlib.Path('detections') / detection_kind
|
|
105
142
|
elif input_dto.type == NewContentType.story:
|
|
106
143
|
content_dict = self.buildStory()
|
|
107
144
|
subdirectory = pathlib.Path('stories')
|
|
@@ -111,23 +148,3 @@ class NewContent:
|
|
|
111
148
|
full_output_path = input_dto.path / subdirectory / SecurityContentObject_Abstract.contentNameToFileName(content_dict.get('name'))
|
|
112
149
|
YmlWriter.writeYmlFile(str(full_output_path), content_dict)
|
|
113
150
|
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
def writeObjectNewContent(self, object: dict, subdirectory_name: str, type: NewContentType) -> None:
|
|
117
|
-
if type == NewContentType.detection:
|
|
118
|
-
file_path = os.path.join(self.output_path, 'detections', subdirectory_name, self.convertNameToFileName(object['name'], object['tags']['product']))
|
|
119
|
-
output_folder = pathlib.Path(self.output_path)/'detections'/subdirectory_name
|
|
120
|
-
#make sure the output folder exists for this detection
|
|
121
|
-
output_folder.mkdir(exist_ok=True)
|
|
122
|
-
|
|
123
|
-
YmlWriter.writeDetection(file_path, object)
|
|
124
|
-
print("Successfully created detection " + file_path)
|
|
125
|
-
|
|
126
|
-
elif type == NewContentType.story:
|
|
127
|
-
file_path = os.path.join(self.output_path, 'stories', self.convertNameToFileName(object['name'], object['tags']['product']))
|
|
128
|
-
YmlWriter.writeStory(file_path, object)
|
|
129
|
-
print("Successfully created story " + file_path)
|
|
130
|
-
|
|
131
|
-
else:
|
|
132
|
-
raise(Exception(f"Object Must be Story or Detection, but is not: {object}"))
|
|
133
|
-
|
contentctl/actions/test.py
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
from dataclasses import dataclass
|
|
2
2
|
from typing import List
|
|
3
3
|
|
|
4
|
-
from contentctl.objects.config import test_common
|
|
4
|
+
from contentctl.objects.config import test_common, Selected, Changes
|
|
5
5
|
from contentctl.objects.enums import DetectionTestingMode, DetectionStatus, AnalyticsType
|
|
6
6
|
from contentctl.objects.detection import Detection
|
|
7
7
|
|
|
@@ -78,10 +78,9 @@ class Test:
|
|
|
78
78
|
input_dto=manager_input_dto, output_dto=output_dto
|
|
79
79
|
)
|
|
80
80
|
|
|
81
|
-
mode = input_dto.config.getModeName()
|
|
82
81
|
if len(input_dto.detections) == 0:
|
|
83
82
|
print(
|
|
84
|
-
f"With Detection Testing Mode '{mode}', there were [0] detections found to test."
|
|
83
|
+
f"With Detection Testing Mode '{input_dto.config.mode.mode_name}', there were [0] detections found to test."
|
|
85
84
|
"\nAs such, we will quit immediately."
|
|
86
85
|
)
|
|
87
86
|
# Directly call stop so that the summary.yml will be generated. Of course it will not
|
|
@@ -89,8 +88,8 @@ class Test:
|
|
|
89
88
|
# detections were tested.
|
|
90
89
|
file.stop()
|
|
91
90
|
else:
|
|
92
|
-
print(f"MODE: [{mode}] - Test [{len(input_dto.detections)}] detections")
|
|
93
|
-
if mode
|
|
91
|
+
print(f"MODE: [{input_dto.config.mode.mode_name}] - Test [{len(input_dto.detections)}] detections")
|
|
92
|
+
if isinstance(input_dto.config.mode, Selected) or isinstance(input_dto.config.mode, Changes):
|
|
94
93
|
files_string = '\n- '.join(
|
|
95
94
|
[str(pathlib.Path(detection.file_path).relative_to(input_dto.config.path)) for detection in input_dto.detections]
|
|
96
95
|
)
|
contentctl/actions/validate.py
CHANGED
|
@@ -6,6 +6,7 @@ from contentctl.objects.config import validate
|
|
|
6
6
|
from contentctl.enrichments.attack_enrichment import AttackEnrichment
|
|
7
7
|
from contentctl.enrichments.cve_enrichment import CveEnrichment
|
|
8
8
|
from contentctl.objects.atomic import AtomicEnrichment
|
|
9
|
+
from contentctl.objects.lookup import FileBackedLookup
|
|
9
10
|
from contentctl.helper.utils import Utils
|
|
10
11
|
from contentctl.objects.data_source import DataSource
|
|
11
12
|
from contentctl.helper.splunk_app import SplunkApp
|
|
@@ -64,7 +65,7 @@ class Validate:
|
|
|
64
65
|
lookupsDirectory = repo_path/"lookups"
|
|
65
66
|
|
|
66
67
|
# Get all of the files referneced by Lookups
|
|
67
|
-
usedLookupFiles:list[pathlib.Path] = [lookup.filename for lookup in director_output_dto.lookups if lookup
|
|
68
|
+
usedLookupFiles:list[pathlib.Path] = [lookup.filename for lookup in director_output_dto.lookups if isinstance(lookup, FileBackedLookup)] + [lookup.file_path for lookup in director_output_dto.lookups if lookup.file_path is not None]
|
|
68
69
|
|
|
69
70
|
# Get all of the mlmodel and csv files in the lookups directory
|
|
70
71
|
csvAndMlmodelFiles = Utils.get_security_content_files_from_directory(lookupsDirectory, allowedFileExtensions=[".yml",".csv",".mlmodel"], fileExtensionsToReturn=[".csv",".mlmodel"])
|