contentctl 4.4.7__py3-none-any.whl → 5.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (123) hide show
  1. contentctl/__init__.py +1 -1
  2. contentctl/actions/build.py +102 -57
  3. contentctl/actions/deploy_acs.py +29 -24
  4. contentctl/actions/detection_testing/DetectionTestingManager.py +66 -42
  5. contentctl/actions/detection_testing/GitService.py +134 -76
  6. contentctl/actions/detection_testing/generate_detection_coverage_badge.py +48 -30
  7. contentctl/actions/detection_testing/infrastructures/DetectionTestingInfrastructure.py +192 -147
  8. contentctl/actions/detection_testing/infrastructures/DetectionTestingInfrastructureContainer.py +45 -32
  9. contentctl/actions/detection_testing/progress_bar.py +9 -6
  10. contentctl/actions/detection_testing/views/DetectionTestingView.py +16 -19
  11. contentctl/actions/detection_testing/views/DetectionTestingViewCLI.py +1 -5
  12. contentctl/actions/detection_testing/views/DetectionTestingViewFile.py +2 -2
  13. contentctl/actions/detection_testing/views/DetectionTestingViewWeb.py +1 -4
  14. contentctl/actions/doc_gen.py +9 -5
  15. contentctl/actions/initialize.py +45 -33
  16. contentctl/actions/inspect.py +118 -61
  17. contentctl/actions/new_content.py +155 -108
  18. contentctl/actions/release_notes.py +276 -146
  19. contentctl/actions/reporting.py +23 -19
  20. contentctl/actions/test.py +33 -28
  21. contentctl/actions/validate.py +55 -34
  22. contentctl/api.py +54 -45
  23. contentctl/contentctl.py +124 -90
  24. contentctl/enrichments/attack_enrichment.py +112 -72
  25. contentctl/enrichments/cve_enrichment.py +34 -28
  26. contentctl/enrichments/splunk_app_enrichment.py +38 -36
  27. contentctl/helper/link_validator.py +101 -78
  28. contentctl/helper/splunk_app.py +69 -41
  29. contentctl/helper/utils.py +58 -53
  30. contentctl/input/director.py +68 -36
  31. contentctl/input/new_content_questions.py +27 -35
  32. contentctl/input/yml_reader.py +28 -18
  33. contentctl/objects/abstract_security_content_objects/detection_abstract.py +303 -259
  34. contentctl/objects/abstract_security_content_objects/security_content_object_abstract.py +115 -52
  35. contentctl/objects/alert_action.py +10 -9
  36. contentctl/objects/annotated_types.py +1 -1
  37. contentctl/objects/atomic.py +65 -54
  38. contentctl/objects/base_test.py +5 -3
  39. contentctl/objects/base_test_result.py +19 -11
  40. contentctl/objects/baseline.py +62 -30
  41. contentctl/objects/baseline_tags.py +30 -24
  42. contentctl/objects/config.py +790 -597
  43. contentctl/objects/constants.py +33 -56
  44. contentctl/objects/correlation_search.py +150 -136
  45. contentctl/objects/dashboard.py +55 -41
  46. contentctl/objects/data_source.py +16 -17
  47. contentctl/objects/deployment.py +43 -44
  48. contentctl/objects/deployment_email.py +3 -2
  49. contentctl/objects/deployment_notable.py +4 -2
  50. contentctl/objects/deployment_phantom.py +7 -6
  51. contentctl/objects/deployment_rba.py +3 -2
  52. contentctl/objects/deployment_scheduling.py +3 -2
  53. contentctl/objects/deployment_slack.py +3 -2
  54. contentctl/objects/detection.py +5 -2
  55. contentctl/objects/detection_metadata.py +1 -0
  56. contentctl/objects/detection_stanza.py +7 -2
  57. contentctl/objects/detection_tags.py +58 -103
  58. contentctl/objects/drilldown.py +66 -34
  59. contentctl/objects/enums.py +81 -100
  60. contentctl/objects/errors.py +16 -24
  61. contentctl/objects/integration_test.py +3 -3
  62. contentctl/objects/integration_test_result.py +1 -0
  63. contentctl/objects/investigation.py +59 -36
  64. contentctl/objects/investigation_tags.py +30 -19
  65. contentctl/objects/lookup.py +304 -101
  66. contentctl/objects/macro.py +55 -39
  67. contentctl/objects/manual_test.py +3 -3
  68. contentctl/objects/manual_test_result.py +1 -0
  69. contentctl/objects/mitre_attack_enrichment.py +17 -16
  70. contentctl/objects/notable_action.py +2 -1
  71. contentctl/objects/notable_event.py +1 -3
  72. contentctl/objects/playbook.py +37 -35
  73. contentctl/objects/playbook_tags.py +23 -13
  74. contentctl/objects/rba.py +96 -0
  75. contentctl/objects/risk_analysis_action.py +15 -11
  76. contentctl/objects/risk_event.py +110 -160
  77. contentctl/objects/risk_object.py +1 -0
  78. contentctl/objects/savedsearches_conf.py +9 -7
  79. contentctl/objects/security_content_object.py +5 -2
  80. contentctl/objects/story.py +54 -49
  81. contentctl/objects/story_tags.py +56 -45
  82. contentctl/objects/test_attack_data.py +2 -1
  83. contentctl/objects/test_group.py +5 -2
  84. contentctl/objects/threat_object.py +1 -0
  85. contentctl/objects/throttling.py +27 -18
  86. contentctl/objects/unit_test.py +3 -4
  87. contentctl/objects/unit_test_baseline.py +5 -5
  88. contentctl/objects/unit_test_result.py +6 -6
  89. contentctl/output/api_json_output.py +233 -220
  90. contentctl/output/attack_nav_output.py +21 -21
  91. contentctl/output/attack_nav_writer.py +29 -37
  92. contentctl/output/conf_output.py +235 -172
  93. contentctl/output/conf_writer.py +201 -125
  94. contentctl/output/data_source_writer.py +38 -26
  95. contentctl/output/doc_md_output.py +53 -27
  96. contentctl/output/jinja_writer.py +19 -15
  97. contentctl/output/json_writer.py +21 -11
  98. contentctl/output/svg_output.py +56 -38
  99. contentctl/output/templates/analyticstories_detections.j2 +2 -2
  100. contentctl/output/templates/analyticstories_stories.j2 +1 -1
  101. contentctl/output/templates/collections.j2 +1 -1
  102. contentctl/output/templates/doc_detections.j2 +0 -5
  103. contentctl/output/templates/es_investigations_investigations.j2 +1 -1
  104. contentctl/output/templates/es_investigations_stories.j2 +1 -1
  105. contentctl/output/templates/savedsearches_baselines.j2 +2 -2
  106. contentctl/output/templates/savedsearches_detections.j2 +10 -11
  107. contentctl/output/templates/savedsearches_investigations.j2 +2 -2
  108. contentctl/output/templates/transforms.j2 +6 -8
  109. contentctl/output/yml_writer.py +29 -20
  110. contentctl/templates/detections/endpoint/anomalous_usage_of_7zip.yml +16 -34
  111. contentctl/templates/stories/cobalt_strike.yml +1 -0
  112. {contentctl-4.4.7.dist-info → contentctl-5.0.0.dist-info}/METADATA +5 -4
  113. contentctl-5.0.0.dist-info/RECORD +168 -0
  114. {contentctl-4.4.7.dist-info → contentctl-5.0.0.dist-info}/WHEEL +1 -1
  115. contentctl/actions/initialize_old.py +0 -245
  116. contentctl/objects/event_source.py +0 -11
  117. contentctl/objects/observable.py +0 -37
  118. contentctl/output/detection_writer.py +0 -28
  119. contentctl/output/new_content_yml_output.py +0 -56
  120. contentctl/output/yml_output.py +0 -66
  121. contentctl-4.4.7.dist-info/RECORD +0 -173
  122. {contentctl-4.4.7.dist-info → contentctl-5.0.0.dist-info}/LICENSE.md +0 -0
  123. {contentctl-4.4.7.dist-info → contentctl-5.0.0.dist-info}/entry_points.txt +0 -0
@@ -1,65 +1,65 @@
1
1
  from __future__ import annotations
2
- from typing import TYPE_CHECKING, Union, Optional, List, Any, Annotated
3
- import re
2
+
4
3
  import pathlib
5
- from enum import Enum
4
+ import re
5
+ from enum import StrEnum
6
+ from typing import TYPE_CHECKING, Annotated, Any, List, Optional, Union
6
7
 
7
8
  from pydantic import (
8
- field_validator,
9
- model_validator,
10
- ValidationInfo,
11
9
  Field,
10
+ FilePath,
11
+ ValidationInfo,
12
12
  computed_field,
13
+ field_validator,
13
14
  model_serializer,
14
- ConfigDict,
15
- FilePath
15
+ model_validator,
16
16
  )
17
17
 
18
+ from contentctl.objects.lookup import FileBackedLookup, KVStoreLookup, Lookup
18
19
  from contentctl.objects.macro import Macro
19
- from contentctl.objects.lookup import Lookup
20
+
20
21
  if TYPE_CHECKING:
21
22
  from contentctl.input.director import DirectorOutputDto
22
23
  from contentctl.objects.baseline import Baseline
23
24
  from contentctl.objects.config import CustomApp
24
-
25
- from contentctl.objects.security_content_object import SecurityContentObject
26
- from contentctl.objects.enums import AnalyticsType
27
- from contentctl.objects.enums import DataModel
28
- from contentctl.objects.enums import DetectionStatus
29
- from contentctl.objects.enums import NistCategory
30
25
 
31
- from contentctl.objects.detection_tags import DetectionTags
32
- from contentctl.objects.deployment import Deployment
33
- from contentctl.objects.unit_test import UnitTest
34
- from contentctl.objects.manual_test import ManualTest
35
- from contentctl.objects.test_group import TestGroup
36
- from contentctl.objects.integration_test import IntegrationTest
37
- from contentctl.objects.data_source import DataSource
38
- from contentctl.objects.base_test_result import TestResultStatus
39
- from contentctl.objects.drilldown import Drilldown, DRILLDOWN_SEARCH_PLACEHOLDER
40
- from contentctl.objects.enums import ProvidingTechnology
41
- from contentctl.enrichments.cve_enrichment import CveEnrichmentObj
42
26
  import datetime
27
+
28
+ from contentctl.enrichments.cve_enrichment import CveEnrichmentObj
29
+ from contentctl.objects.base_test_result import TestResultStatus
43
30
  from contentctl.objects.constants import (
31
+ CONTENTCTL_DETECTION_STANZA_NAME_FORMAT_TEMPLATE,
32
+ CONTENTCTL_MAX_SEARCH_NAME_LENGTH,
44
33
  ES_MAX_STANZA_LENGTH,
45
34
  ES_SEARCH_STANZA_NAME_FORMAT_AFTER_CLONING_IN_PRODUCT_TEMPLATE,
46
- CONTENTCTL_MAX_SEARCH_NAME_LENGTH,
47
- CONTENTCTL_DETECTION_STANZA_NAME_FORMAT_TEMPLATE
48
35
  )
36
+ from contentctl.objects.data_source import DataSource
37
+ from contentctl.objects.deployment import Deployment
38
+ from contentctl.objects.detection_tags import DetectionTags
39
+ from contentctl.objects.drilldown import DRILLDOWN_SEARCH_PLACEHOLDER, Drilldown
40
+ from contentctl.objects.enums import (
41
+ AnalyticsType,
42
+ DataModel,
43
+ DetectionStatus,
44
+ NistCategory,
45
+ ProvidingTechnology,
46
+ )
47
+ from contentctl.objects.integration_test import IntegrationTest
48
+ from contentctl.objects.manual_test import ManualTest
49
+ from contentctl.objects.rba import RBAObject
50
+ from contentctl.objects.security_content_object import SecurityContentObject
51
+ from contentctl.objects.test_group import TestGroup
52
+ from contentctl.objects.unit_test import UnitTest
49
53
 
50
54
  MISSING_SOURCES: set[str] = set()
51
55
 
52
56
  # Those AnalyticsTypes that we do not test via contentctl
53
- SKIPPED_ANALYTICS_TYPES: set[str] = {
54
- AnalyticsType.Correlation.value
55
- }
57
+ SKIPPED_ANALYTICS_TYPES: set[str] = {AnalyticsType.Correlation}
56
58
 
57
59
 
58
- # TODO (#266): disable the use_enum_values configuration
59
60
  class Detection_Abstract(SecurityContentObject):
60
- model_config = ConfigDict(use_enum_values=True)
61
- name:str = Field(...,max_length=CONTENTCTL_MAX_SEARCH_NAME_LENGTH)
62
- #contentType: SecurityContentType = SecurityContentType.detections
61
+ name: str = Field(..., max_length=CONTENTCTL_MAX_SEARCH_NAME_LENGTH)
62
+ # contentType: SecurityContentType = SecurityContentType.detections
63
63
  type: AnalyticsType = Field(...)
64
64
  status: DetectionStatus = Field(...)
65
65
  data_source: list[str] = []
@@ -67,14 +67,15 @@ class Detection_Abstract(SecurityContentObject):
67
67
  search: str = Field(...)
68
68
  how_to_implement: str = Field(..., min_length=4)
69
69
  known_false_positives: str = Field(..., min_length=4)
70
+ rba: Optional[RBAObject] = Field(default=None)
70
71
  explanation: None | str = Field(
71
72
  default=None,
72
- exclude=True, #Don't serialize this value when dumping the object
73
+ exclude=True, # Don't serialize this value when dumping the object
73
74
  description="Provide an explanation to be included "
74
75
  "in the 'Explanation' field of the Detection in "
75
76
  "the Use Case Library. If this field is not "
76
77
  "defined in the YML, it will default to the "
77
- "value of the 'description' field when "
78
+ "value of the 'description' field when "
78
79
  "serialized in analyticstories_detections.j2",
79
80
  )
80
81
 
@@ -85,36 +86,49 @@ class Detection_Abstract(SecurityContentObject):
85
86
  # default mode, 'smart'
86
87
  # https://docs.pydantic.dev/latest/concepts/unions/#left-to-right-mode
87
88
  # https://github.com/pydantic/pydantic/issues/9101#issuecomment-2019032541
88
- tests: List[Annotated[Union[UnitTest, IntegrationTest, ManualTest], Field(union_mode='left_to_right')]] = []
89
+ tests: List[
90
+ Annotated[
91
+ Union[UnitTest, IntegrationTest, ManualTest],
92
+ Field(union_mode="left_to_right"),
93
+ ]
94
+ ] = []
89
95
  # A list of groups of tests, relying on the same data
90
96
  test_groups: list[TestGroup] = []
91
97
 
92
98
  data_source_objects: list[DataSource] = []
93
- drilldown_searches: list[Drilldown] = Field(default=[], description="A list of Drilldowns that should be included with this search")
99
+ drilldown_searches: list[Drilldown] = Field(
100
+ default=[],
101
+ description="A list of Drilldowns that should be included with this search",
102
+ )
94
103
 
95
- def get_conf_stanza_name(self, app:CustomApp)->str:
96
- stanza_name = CONTENTCTL_DETECTION_STANZA_NAME_FORMAT_TEMPLATE.format(app_label=app.label, detection_name=self.name)
104
+ def get_conf_stanza_name(self, app: CustomApp) -> str:
105
+ stanza_name = CONTENTCTL_DETECTION_STANZA_NAME_FORMAT_TEMPLATE.format(
106
+ app_label=app.label, detection_name=self.name
107
+ )
97
108
  self.check_conf_stanza_max_length(stanza_name)
98
109
  return stanza_name
99
-
100
110
 
101
- def get_action_dot_correlationsearch_dot_label(self, app:CustomApp, max_stanza_length:int=ES_MAX_STANZA_LENGTH)->str:
111
+ def get_action_dot_correlationsearch_dot_label(
112
+ self, app: CustomApp, max_stanza_length: int = ES_MAX_STANZA_LENGTH
113
+ ) -> str:
102
114
  stanza_name = self.get_conf_stanza_name(app)
103
- stanza_name_after_saving_in_es = ES_SEARCH_STANZA_NAME_FORMAT_AFTER_CLONING_IN_PRODUCT_TEMPLATE.format(
104
- security_domain_value = self.tags.security_domain.value,
105
- search_name = stanza_name
115
+ stanza_name_after_saving_in_es = (
116
+ ES_SEARCH_STANZA_NAME_FORMAT_AFTER_CLONING_IN_PRODUCT_TEMPLATE.format(
117
+ security_domain_value=self.tags.security_domain, search_name=stanza_name
106
118
  )
107
-
108
-
119
+ )
120
+
109
121
  if len(stanza_name_after_saving_in_es) > max_stanza_length:
110
- raise ValueError(f"label may only be {max_stanza_length} characters to allow updating in-product, "
111
- f"but stanza was actually {len(stanza_name_after_saving_in_es)} characters: '{stanza_name_after_saving_in_es}' ")
112
-
113
- return stanza_name
122
+ raise ValueError(
123
+ f"label may only be {max_stanza_length} characters to allow updating in-product, "
124
+ f"but stanza was actually {len(stanza_name_after_saving_in_es)} characters: '{stanza_name_after_saving_in_es}' "
125
+ )
126
+
127
+ return stanza_name
114
128
 
115
129
  @field_validator("search", mode="before")
116
130
  @classmethod
117
- def validate_presence_of_filter_macro(cls, value:str, info:ValidationInfo)->str:
131
+ def validate_presence_of_filter_macro(cls, value: str, info: ValidationInfo) -> str:
118
132
  """
119
133
  Validates that, if required to be present, the filter macro is present with the proper name.
120
134
  The filter macro MUST be derived from the name of the detection
@@ -128,7 +142,7 @@ class Detection_Abstract(SecurityContentObject):
128
142
  Returns:
129
143
  str: The search, as an SPL formatted string.
130
144
  """
131
-
145
+
132
146
  # Otherwise, the search is SPL.
133
147
 
134
148
  # In the future, we will may add support that makes the inclusion of the
@@ -168,7 +182,7 @@ class Detection_Abstract(SecurityContentObject):
168
182
  the model from the list of unit tests. Also, preemptively skips all manual tests, as well as
169
183
  tests for experimental/deprecated detections and Correlation type detections.
170
184
  """
171
-
185
+
172
186
  # Since ManualTest and UnitTest are not differentiable without looking at the manual_test
173
187
  # tag, Pydantic builds all tests as UnitTest objects. If we see the manual_test flag, we
174
188
  # convert these to ManualTest
@@ -181,10 +195,7 @@ class Detection_Abstract(SecurityContentObject):
181
195
  f"but encountered a {type(test)}."
182
196
  )
183
197
  # Create the manual test and skip it upon creation (cannot test via contentctl)
184
- manual_test = ManualTest(
185
- name=test.name,
186
- attack_data=test.attack_data
187
- )
198
+ manual_test = ManualTest(name=test.name, attack_data=test.attack_data)
188
199
  tmp.append(manual_test)
189
200
  self.tests = tmp
190
201
 
@@ -210,8 +221,10 @@ class Detection_Abstract(SecurityContentObject):
210
221
  # https://docs.pydantic.dev/latest/api/config/#pydantic.config.ConfigDict.populate_by_name
211
222
 
212
223
  # Skip tests for non-production detections
213
- if self.status != DetectionStatus.production.value: # type: ignore
214
- self.skip_all_tests(f"TEST SKIPPED: Detection is non-production ({self.status})")
224
+ if self.status != DetectionStatus.production:
225
+ self.skip_all_tests(
226
+ f"TEST SKIPPED: Detection is non-production ({self.status})"
227
+ )
215
228
 
216
229
  # Skip tests for detecton types like Correlation which are not supported via contentctl
217
230
  if self.type in SKIPPED_ANALYTICS_TYPES:
@@ -238,7 +251,10 @@ class Detection_Abstract(SecurityContentObject):
238
251
  # If the result/status of any test has not yet been set, return None
239
252
  if test.result is None or test.result.status is None:
240
253
  return None
241
- elif test.result.status == TestResultStatus.ERROR or test.result.status == TestResultStatus.FAIL:
254
+ elif (
255
+ test.result.status == TestResultStatus.ERROR
256
+ or test.result.status == TestResultStatus.FAIL
257
+ ):
242
258
  # If any test failed or errored, return fail (we don't return the error state at
243
259
  # the aggregate detection level)
244
260
  return TestResultStatus.FAIL
@@ -263,29 +279,24 @@ class Detection_Abstract(SecurityContentObject):
263
279
  @computed_field
264
280
  @property
265
281
  def datamodel(self) -> List[DataModel]:
266
- return [dm for dm in DataModel if dm.value in self.search]
267
-
268
-
269
-
282
+ return [dm for dm in DataModel if dm in self.search]
270
283
 
271
284
  @computed_field
272
285
  @property
273
286
  def source(self) -> str:
274
287
  return self.file_path.absolute().parent.name
275
-
276
288
 
277
289
  deployment: Deployment = Field({})
278
290
 
279
291
  @computed_field
280
292
  @property
281
293
  def annotations(self) -> dict[str, Union[List[str], int, str]]:
282
-
283
294
  annotations_dict: dict[str, str | list[str] | int] = {}
284
- annotations_dict["analytic_story"] = [story.name for story in self.tags.analytic_story]
285
- annotations_dict["confidence"] = self.tags.confidence
295
+ annotations_dict["analytic_story"] = [
296
+ story.name for story in self.tags.analytic_story
297
+ ]
286
298
  if len(self.tags.cve or []) > 0:
287
299
  annotations_dict["cve"] = self.tags.cve
288
- annotations_dict["impact"] = self.tags.impact
289
300
  annotations_dict["type"] = self.type
290
301
  annotations_dict["type_list"] = [self.type]
291
302
  # annotations_dict["version"] = self.version
@@ -308,13 +319,15 @@ class Detection_Abstract(SecurityContentObject):
308
319
  def mappings(self) -> dict[str, List[str]]:
309
320
  mappings: dict[str, Any] = {}
310
321
  if len(self.tags.cis20) > 0:
311
- mappings["cis20"] = [tag.value for tag in self.tags.cis20]
322
+ mappings["cis20"] = [tag for tag in self.tags.cis20]
312
323
  if len(self.tags.kill_chain_phases) > 0:
313
- mappings['kill_chain_phases'] = [phase.value for phase in self.tags.kill_chain_phases]
324
+ mappings["kill_chain_phases"] = [
325
+ phase for phase in self.tags.kill_chain_phases
326
+ ]
314
327
  if len(self.tags.mitre_attack_id) > 0:
315
- mappings['mitre_attack'] = self.tags.mitre_attack_id
328
+ mappings["mitre_attack"] = self.tags.mitre_attack_id
316
329
  if len(self.tags.nist) > 0:
317
- mappings['nist'] = [category.value for category in self.tags.nist]
330
+ mappings["nist"] = [category for category in self.tags.nist]
318
331
 
319
332
  # No need to sort the dict! It has been constructed in-order.
320
333
  # However, if this logic is changed, then consider reordering or
@@ -329,8 +342,10 @@ class Detection_Abstract(SecurityContentObject):
329
342
 
330
343
  def cve_enrichment_func(self, __context: Any):
331
344
  if len(self.cve_enrichment) > 0:
332
- raise ValueError(f"Error, field 'cve_enrichment' should be empty and "
333
- f"dynamically populated at runtime. Instead, this field contained: {self.cve_enrichment}")
345
+ raise ValueError(
346
+ f"Error, field 'cve_enrichment' should be empty and "
347
+ f"dynamically populated at runtime. Instead, this field contained: {self.cve_enrichment}"
348
+ )
334
349
 
335
350
  output_dto: Union[DirectorOutputDto, None] = __context.get("output_dto", None)
336
351
  if output_dto is None:
@@ -340,7 +355,11 @@ class Detection_Abstract(SecurityContentObject):
340
355
 
341
356
  for cve_id in self.tags.cve:
342
357
  try:
343
- enriched_cves.append(output_dto.cve_enrichment.enrich_cve(cve_id, raise_exception_on_failure=False))
358
+ enriched_cves.append(
359
+ output_dto.cve_enrichment.enrich_cve(
360
+ cve_id, raise_exception_on_failure=False
361
+ )
362
+ )
344
363
  except Exception as e:
345
364
  raise ValueError(f"{e}")
346
365
  self.cve_enrichment = enriched_cves
@@ -352,7 +371,7 @@ class Detection_Abstract(SecurityContentObject):
352
371
  @property
353
372
  def nes_fields(self) -> Optional[str]:
354
373
  if self.deployment.alert_action.notable is not None:
355
- return ','.join(self.deployment.alert_action.notable.nes_fields)
374
+ return ",".join(self.deployment.alert_action.notable.nes_fields)
356
375
  else:
357
376
  return None
358
377
 
@@ -361,70 +380,28 @@ class Detection_Abstract(SecurityContentObject):
361
380
  def providing_technologies(self) -> List[ProvidingTechnology]:
362
381
  return ProvidingTechnology.getProvidingTechFromSearch(self.search)
363
382
 
364
- # TODO (#247): Refactor the risk property of detection_abstract
365
383
  @computed_field
366
384
  @property
367
385
  def risk(self) -> list[dict[str, Any]]:
368
386
  risk_objects: list[dict[str, str | int]] = []
369
- # TODO (#246): "User Name" type should map to a "user" risk object and not "other"
370
- risk_object_user_types = {'user', 'username', 'email address'}
371
- risk_object_system_types = {'device', 'endpoint', 'hostname', 'ip address'}
372
- process_threat_object_types = {'process name', 'process'}
373
- file_threat_object_types = {'file name', 'file', 'file hash'}
374
- url_threat_object_types = {'url string', 'url'}
375
- ip_threat_object_types = {'ip address'}
376
-
377
- for entity in self.tags.observable:
378
- risk_object: dict[str, str | int] = dict()
379
- if 'Victim' in entity.role and entity.type.lower() in risk_object_user_types:
380
- risk_object['risk_object_type'] = 'user'
381
- risk_object['risk_object_field'] = entity.name
382
- risk_object['risk_score'] = self.tags.risk_score
383
- risk_objects.append(risk_object)
384
-
385
- elif 'Victim' in entity.role and entity.type.lower() in risk_object_system_types:
386
- risk_object['risk_object_type'] = 'system'
387
- risk_object['risk_object_field'] = entity.name
388
- risk_object['risk_score'] = self.tags.risk_score
389
- risk_objects.append(risk_object)
390
-
391
- elif 'Attacker' in entity.role and entity.type.lower() in process_threat_object_types:
392
- risk_object['threat_object_field'] = entity.name
393
- risk_object['threat_object_type'] = "process"
394
- risk_objects.append(risk_object)
395
-
396
- elif 'Attacker' in entity.role and entity.type.lower() in file_threat_object_types:
397
- risk_object['threat_object_field'] = entity.name
398
- risk_object['threat_object_type'] = "file_name"
399
- risk_objects.append(risk_object)
400
-
401
- elif 'Attacker' in entity.role and entity.type.lower() in ip_threat_object_types:
402
- risk_object['threat_object_field'] = entity.name
403
- risk_object['threat_object_type'] = "ip_address"
404
- risk_objects.append(risk_object)
405
-
406
- elif 'Attacker' in entity.role and entity.type.lower() in url_threat_object_types:
407
- risk_object['threat_object_field'] = entity.name
408
- risk_object['threat_object_type'] = "url"
409
- risk_objects.append(risk_object)
410
-
411
- elif 'Attacker' in entity.role:
412
- risk_object['threat_object_field'] = entity.name
413
- risk_object['threat_object_type'] = entity.type.lower()
414
- risk_objects.append(risk_object)
415
-
416
- else:
417
- risk_object['risk_object_type'] = 'other'
418
- risk_object['risk_object_field'] = entity.name
419
- risk_object['risk_score'] = self.tags.risk_score
420
- risk_objects.append(risk_object)
421
- continue
422
387
 
388
+ for entity in self.rba.risk_objects:
389
+ risk_object: dict[str, str | int] = dict()
390
+ risk_object["risk_object_type"] = entity.type
391
+ risk_object["risk_object_field"] = entity.field
392
+ risk_object["risk_score"] = entity.score
393
+ risk_objects.append(risk_object)
394
+
395
+ for entity in self.rba.threat_objects:
396
+ threat_object: dict[str, str] = dict()
397
+ threat_object["threat_object_field"] = entity.field
398
+ threat_object["threat_object_type"] = entity.type
399
+ risk_objects.append(threat_object)
423
400
  return risk_objects
424
401
 
425
402
  @computed_field
426
403
  @property
427
- def metadata(self) -> dict[str, str|float]:
404
+ def metadata(self) -> dict[str, str | float]:
428
405
  # NOTE: we ignore the type error around self.status because we are using Pydantic's
429
406
  # use_enum_values configuration
430
407
  # https://docs.pydantic.dev/latest/api/config/#pydantic.config.ConfigDict.populate_by_name
@@ -434,10 +411,19 @@ class Detection_Abstract(SecurityContentObject):
434
411
  # dict below) should not have any impact, but renaming or removing any of these fields will
435
412
  # break the `inspect` action.
436
413
  return {
437
- 'detection_id': str(self.id),
438
- 'deprecated': '1' if self.status == DetectionStatus.deprecated.value else '0', # type: ignore
439
- 'detection_version': str(self.version),
440
- 'publish_time': datetime.datetime(self.date.year,self.date.month,self.date.day,0,0,0,0,tzinfo=datetime.timezone.utc).timestamp()
414
+ "detection_id": str(self.id),
415
+ "deprecated": "1" if self.status == DetectionStatus.deprecated else "0", # type: ignore
416
+ "detection_version": str(self.version),
417
+ "publish_time": datetime.datetime(
418
+ self.date.year,
419
+ self.date.month,
420
+ self.date.day,
421
+ 0,
422
+ 0,
423
+ 0,
424
+ 0,
425
+ tzinfo=datetime.timezone.utc,
426
+ ).timestamp(),
441
427
  }
442
428
 
443
429
  @model_serializer
@@ -456,6 +442,11 @@ class Detection_Abstract(SecurityContentObject):
456
442
  "source": self.source,
457
443
  "nes_fields": self.nes_fields,
458
444
  }
445
+ if self.rba is not None:
446
+ model["risk_severity"] = self.rba.severity
447
+ model["tags"]["risk_score"] = self.rba.risk_score
448
+ else:
449
+ model["tags"]["risk_score"] = 0
459
450
 
460
451
  # Only a subset of macro fields are required:
461
452
  all_macros: list[dict[str, str | list[str]]] = []
@@ -463,43 +454,44 @@ class Detection_Abstract(SecurityContentObject):
463
454
  macro_dump: dict[str, str | list[str]] = {
464
455
  "name": macro.name,
465
456
  "definition": macro.definition,
466
- "description": macro.description
457
+ "description": macro.description,
467
458
  }
468
459
  if len(macro.arguments) > 0:
469
- macro_dump['arguments'] = macro.arguments
460
+ macro_dump["arguments"] = macro.arguments
470
461
 
471
462
  all_macros.append(macro_dump)
472
- model['macros'] = all_macros # type: ignore
463
+ model["macros"] = all_macros # type: ignore
473
464
 
474
465
  all_lookups: list[dict[str, str | int | None]] = []
475
466
  for lookup in self.lookups:
476
- if lookup.collection is not None:
467
+ if isinstance(lookup, KVStoreLookup):
477
468
  all_lookups.append(
478
469
  {
479
470
  "name": lookup.name,
480
471
  "description": lookup.description,
481
472
  "collection": lookup.collection,
482
473
  "case_sensitive_match": None,
483
- "fields_list": lookup.fields_list
474
+ "fields_list": lookup.fields_to_fields_list_conf_format,
484
475
  }
485
476
  )
486
- elif lookup.filename is not None:
477
+ elif isinstance(lookup, FileBackedLookup):
487
478
  all_lookups.append(
488
479
  {
489
480
  "name": lookup.name,
490
481
  "description": lookup.description,
491
482
  "filename": lookup.filename.name,
492
483
  "default_match": "true" if lookup.default_match else "false",
493
- "case_sensitive_match": "true" if lookup.case_sensitive_match else "false",
494
- "match_type": lookup.match_type,
484
+ "case_sensitive_match": "true"
485
+ if lookup.case_sensitive_match
486
+ else "false",
487
+ "match_type": lookup.match_type_to_conf_format,
495
488
  "min_matches": lookup.min_matches,
496
- "fields_list": lookup.fields_list
497
489
  }
498
490
  )
499
- model['lookups'] = all_lookups # type: ignore
491
+ model["lookups"] = all_lookups # type: ignore
500
492
 
501
493
  # Combine fields from this model with fields from parent
502
- super_fields.update(model) # type: ignore
494
+ super_fields.update(model) # type: ignore
503
495
 
504
496
  # return the model
505
497
  return super_fields
@@ -532,7 +524,7 @@ class Detection_Abstract(SecurityContentObject):
532
524
  updated_data_source_names: set[str] = set()
533
525
 
534
526
  for ds in self.data_source:
535
- split_data_sources = {d.strip() for d in ds.split('AND')}
527
+ split_data_sources = {d.strip() for d in ds.split("AND")}
536
528
  updated_data_source_names.update(split_data_sources)
537
529
 
538
530
  sources = sorted(list(updated_data_source_names))
@@ -541,7 +533,9 @@ class Detection_Abstract(SecurityContentObject):
541
533
  missing_sources: list[str] = []
542
534
  for source in sources:
543
535
  try:
544
- matched_data_sources += DataSource.mapNamesToSecurityContentObjects([source], director)
536
+ matched_data_sources += DataSource.mapNamesToSecurityContentObjects(
537
+ [source], director
538
+ )
545
539
  except Exception:
546
540
  # We gobble this up and add it to a global set so that we
547
541
  # can print it ONCE at the end of the build of datasources.
@@ -558,7 +552,7 @@ class Detection_Abstract(SecurityContentObject):
558
552
  self.data_source_objects = matched_data_sources
559
553
 
560
554
  for story in self.tags.analytic_story:
561
- story.detections.append(self)
555
+ story.detections.append(self)
562
556
 
563
557
  self.cve_enrichment_func(__context)
564
558
 
@@ -569,32 +563,39 @@ class Detection_Abstract(SecurityContentObject):
569
563
  # 1 of the drilldowns contains the string Drilldown.SEARCH_PLACEHOLDER.
570
564
  # This is presently a requirement when 1 or more drilldowns are added to a detection.
571
565
  # Note that this is only required for production searches that are not hunting
572
-
573
- if self.type == AnalyticsType.Hunting.value or self.status != DetectionStatus.production.value:
574
- #No additional check need to happen on the potential drilldowns.
566
+
567
+ if (
568
+ self.type == AnalyticsType.Hunting
569
+ or self.status != DetectionStatus.production
570
+ ):
571
+ # No additional check need to happen on the potential drilldowns.
575
572
  pass
576
573
  else:
577
574
  found_placeholder = False
578
575
  if len(self.drilldown_searches) < 2:
579
- raise ValueError(f"This detection is required to have 2 drilldown_searches, but only has [{len(self.drilldown_searches)}]")
576
+ raise ValueError(
577
+ f"This detection is required to have 2 drilldown_searches, but only has [{len(self.drilldown_searches)}]"
578
+ )
580
579
  for drilldown in self.drilldown_searches:
581
580
  if DRILLDOWN_SEARCH_PLACEHOLDER in drilldown.search:
582
581
  found_placeholder = True
583
582
  if not found_placeholder:
584
- raise ValueError("Detection has one or more drilldown_searches, but none of them "
585
- f"contained '{DRILLDOWN_SEARCH_PLACEHOLDER}. This is a requirement "
586
- "if drilldown_searches are defined.'")
587
-
583
+ raise ValueError(
584
+ "Detection has one or more drilldown_searches, but none of them "
585
+ f"contained '{DRILLDOWN_SEARCH_PLACEHOLDER}. This is a requirement "
586
+ "if drilldown_searches are defined.'"
587
+ )
588
+
588
589
  # Update the search fields with the original search, if required
589
590
  for drilldown in self.drilldown_searches:
590
591
  drilldown.perform_search_substitutions(self)
591
592
 
592
- #For experimental purposes, add the default drilldowns
593
- #self.drilldown_searches.extend(Drilldown.constructDrilldownsFromDetection(self))
593
+ # For experimental purposes, add the default drilldowns
594
+ # self.drilldown_searches.extend(Drilldown.constructDrilldownsFromDetection(self))
594
595
 
595
596
  @property
596
- def drilldowns_in_JSON(self) -> list[dict[str,str]]:
597
- """This function is required for proper JSON
597
+ def drilldowns_in_JSON(self) -> list[dict[str, str]]:
598
+ """This function is required for proper JSON
598
599
  serializiation of drilldowns to occur in savedsearches.conf.
599
600
  It returns the list[Drilldown] as a list[dict].
600
601
  Without this function, the jinja template is unable
@@ -602,24 +603,26 @@ class Detection_Abstract(SecurityContentObject):
602
603
 
603
604
  Returns:
604
605
  list[dict[str,str]]: List of Drilldowns dumped to dict format
605
- """
606
+ """
606
607
  return [drilldown.model_dump() for drilldown in self.drilldown_searches]
607
608
 
608
- @field_validator('lookups', mode="before")
609
+ @field_validator("lookups", mode="before")
609
610
  @classmethod
610
- def getDetectionLookups(cls, v:list[str], info:ValidationInfo) -> list[Lookup]:
611
- director:DirectorOutputDto = info.context.get("output_dto",None)
612
-
613
- search:Union[str,None] = info.data.get("search",None)
611
+ def getDetectionLookups(cls, v: list[str], info: ValidationInfo) -> list[Lookup]:
612
+ director: DirectorOutputDto = info.context.get("output_dto", None)
613
+
614
+ search: Union[str, None] = info.data.get("search", None)
614
615
  if search is None:
615
616
  raise ValueError("Search was None - is this file missing the search field?")
616
-
617
+
617
618
  lookups = Lookup.get_lookups(search, director)
618
619
  return lookups
619
620
 
620
- @field_validator('baselines', mode="before")
621
+ @field_validator("baselines", mode="before")
621
622
  @classmethod
622
- def mapDetectionNamesToBaselineObjects(cls, v: list[str], info: ValidationInfo) -> List[Baseline]:
623
+ def mapDetectionNamesToBaselineObjects(
624
+ cls, v: list[str], info: ValidationInfo
625
+ ) -> List[Baseline]:
623
626
  if len(v) > 0:
624
627
  raise ValueError(
625
628
  "Error, baselines are constructed automatically at runtime. Please do not include this field."
@@ -627,7 +630,9 @@ class Detection_Abstract(SecurityContentObject):
627
630
 
628
631
  name: Union[str, None] = info.data.get("name", None)
629
632
  if name is None:
630
- raise ValueError("Error, cannot get Baselines because the Detection does not have a 'name' defined.")
633
+ raise ValueError(
634
+ "Error, cannot get Baselines because the Detection does not have a 'name' defined."
635
+ )
631
636
 
632
637
  if info.context is None:
633
638
  raise ValueError("ValidationInfo.context unexpectedly null")
@@ -638,14 +643,16 @@ class Detection_Abstract(SecurityContentObject):
638
643
  # This matching is a bit strange, because baseline.tags.detections starts as a list of strings, but
639
644
  # is eventually updated to a list of Detections as we construct all of the detection objects.
640
645
  detection_names = [
641
- detection_name for detection_name in baseline.tags.detections if isinstance(detection_name, str)
646
+ detection_name
647
+ for detection_name in baseline.tags.detections
648
+ if isinstance(detection_name, str)
642
649
  ]
643
650
  if name in detection_names:
644
651
  baselines.append(baseline)
645
652
 
646
653
  return baselines
647
654
 
648
- @field_validator('macros', mode="before")
655
+ @field_validator("macros", mode="before")
649
656
  @classmethod
650
657
  def getDetectionMacros(cls, v: list[str], info: ValidationInfo) -> list[Macro]:
651
658
  if info.context is None:
@@ -661,21 +668,25 @@ class Detection_Abstract(SecurityContentObject):
661
668
  message = f"Expected 'search_name' to be a string, instead it was [{type(search_name)}]"
662
669
  assert isinstance(search_name, str), message
663
670
 
664
- filter_macro_name = search_name.replace(' ', '_')\
665
- .replace('-', '_')\
666
- .replace('.', '_')\
667
- .replace('/', '_')\
668
- .lower()\
669
- + '_filter'
671
+ filter_macro_name = (
672
+ search_name.replace(" ", "_")
673
+ .replace("-", "_")
674
+ .replace(".", "_")
675
+ .replace("/", "_")
676
+ .lower()
677
+ + "_filter"
678
+ )
670
679
  try:
671
- filter_macro = Macro.mapNamesToSecurityContentObjects([filter_macro_name], director)[0]
680
+ filter_macro = Macro.mapNamesToSecurityContentObjects(
681
+ [filter_macro_name], director
682
+ )[0]
672
683
  except Exception:
673
684
  # Filter macro did not exist, so create one at runtime
674
685
  filter_macro = Macro.model_validate(
675
686
  {
676
687
  "name": filter_macro_name,
677
- "definition": 'search *',
678
- "description": 'Update this macro to limit the output results to filter out false positives.'
688
+ "definition": "search *",
689
+ "description": "Update this macro to limit the output results to filter out false positives.",
679
690
  }
680
691
  )
681
692
  director.addContentToDictMappings(filter_macro)
@@ -698,12 +709,12 @@ class Detection_Abstract(SecurityContentObject):
698
709
 
699
710
  @field_validator("enabled_by_default", mode="before")
700
711
  def only_enabled_if_production_status(cls, v: Any, info: ValidationInfo) -> bool:
701
- '''
712
+ """
702
713
  A detection can ONLY be enabled by default if it is a PRODUCTION detection.
703
714
  If not (for example, it is EXPERIMENTAL or DEPRECATED) then we will throw an exception.
704
715
  Similarly, a detection MUST be schedulable, meaning that it must be Anomaly, Correleation, or TTP.
705
716
  We will not allow Hunting searches to be enabled by default.
706
- '''
717
+ """
707
718
  if v is False:
708
719
  return v
709
720
 
@@ -713,94 +724,117 @@ class Detection_Abstract(SecurityContentObject):
713
724
  if status != DetectionStatus.production:
714
725
  errors.append(
715
726
  f"status is '{status.name}'. Detections that are enabled by default MUST be "
716
- f"'{DetectionStatus.production.value}'"
717
- )
727
+ f"'{DetectionStatus.production}'"
728
+ )
718
729
 
719
- if searchType not in [AnalyticsType.Anomaly, AnalyticsType.Correlation, AnalyticsType.TTP]:
730
+ if searchType not in [
731
+ AnalyticsType.Anomaly,
732
+ AnalyticsType.Correlation,
733
+ AnalyticsType.TTP,
734
+ ]:
720
735
  errors.append(
721
- f"type is '{searchType.value}'. Detections that are enabled by default MUST be one"
736
+ f"type is '{searchType}'. Detections that are enabled by default MUST be one"
722
737
  " of the following types: "
723
- f"{[AnalyticsType.Anomaly.value, AnalyticsType.Correlation.value, AnalyticsType.TTP.value]}")
738
+ f"{[AnalyticsType.Anomaly, AnalyticsType.Correlation, AnalyticsType.TTP]}"
739
+ )
724
740
  if len(errors) > 0:
725
741
  error_message = "\n - ".join(errors)
726
- raise ValueError(f"Detection is 'enabled_by_default: true' however \n - {error_message}")
742
+ raise ValueError(
743
+ f"Detection is 'enabled_by_default: true' however \n - {error_message}"
744
+ )
727
745
 
728
746
  return v
729
747
 
730
748
  @model_validator(mode="after")
731
749
  def addTags_nist(self):
732
- if self.type == AnalyticsType.TTP.value:
750
+ if self.type == AnalyticsType.TTP:
733
751
  self.tags.nist = [NistCategory.DE_CM]
734
752
  else:
735
753
  self.tags.nist = [NistCategory.DE_AE]
736
754
  return self
737
-
738
755
 
739
756
  @model_validator(mode="after")
740
757
  def ensureThrottlingFieldsExist(self):
741
- '''
758
+ """
742
759
  For throttling to work properly, the fields to throttle on MUST
743
760
  exist in the search itself. If not, then we cannot apply the throttling
744
- '''
761
+ """
745
762
  if self.tags.throttling is None:
746
763
  # No throttling configured for this detection
747
764
  return self
748
765
 
749
- missing_fields:list[str] = [field for field in self.tags.throttling.fields if field not in self.search]
766
+ missing_fields: list[str] = [
767
+ field for field in self.tags.throttling.fields if field not in self.search
768
+ ]
750
769
  if len(missing_fields) > 0:
751
- raise ValueError(f"The following throttle fields were missing from the search: {missing_fields}")
770
+ raise ValueError(
771
+ f"The following throttle fields were missing from the search: {missing_fields}"
772
+ )
752
773
 
753
774
  else:
754
775
  # All throttling fields present in search
755
776
  return self
756
-
757
-
758
777
 
759
778
  @model_validator(mode="after")
760
- def ensureProperObservablesExist(self):
779
+ def ensureProperRBAConfig(self):
761
780
  """
762
- If a detections is PRODUCTION and either TTP or ANOMALY, then it MUST have an Observable with the VICTIM role.
781
+ If a detection has an RBA deployment and is PRODUCTION, then it must have an RBA config, with at least one risk object
763
782
 
764
783
  Returns:
765
- self: Returns itself if the valdiation passes
784
+ self: Returns itself if the validation passes
766
785
  """
767
- # NOTE: we ignore the type error around self.status because we are using Pydantic's
768
- # use_enum_values configuration
769
- # https://docs.pydantic.dev/latest/api/config/#pydantic.config.ConfigDict.populate_by_name
770
- if self.status not in [DetectionStatus.production.value]: # type: ignore
771
- # Only perform this validation on production detections
772
- return self
773
786
 
774
- if self.type not in [AnalyticsType.TTP.value, AnalyticsType.Anomaly.value]:
775
- # Only perform this validation on TTP and Anomaly detections
776
- return self
787
+ if (
788
+ self.deployment.alert_action.rba is None
789
+ or self.deployment.alert_action.rba.enabled is False
790
+ ):
791
+ # confirm we don't have an RBA config
792
+ if self.rba is None:
793
+ return self
794
+ else:
795
+ raise ValueError(
796
+ "Detection does not have a matching RBA deployment config, the RBA portion should be omitted."
797
+ )
798
+ else:
799
+ if self.rba is None:
800
+ raise ValueError(
801
+ "Detection is expected to have an RBA object based on its deployment config"
802
+ )
803
+ else:
804
+ if len(self.rba.risk_objects) > 0: # type: ignore
805
+ return self
806
+ else:
807
+ raise ValueError(
808
+ "Detection expects an RBA config with at least one risk object."
809
+ )
777
810
 
778
- # Detection is required to have a victim
779
- roles: list[str] = []
780
- for observable in self.tags.observable:
781
- roles.extend(observable.role)
811
+ @model_validator(mode="after")
812
+ def search_rba_fields_exist_validate(self):
813
+ # Return immediately if RBA isn't required
814
+ if (
815
+ self.deployment.alert_action.rba.enabled is False
816
+ or self.deployment.alert_action.rba is None
817
+ ) and self.rba is None: # type: ignore
818
+ return self
782
819
 
783
- if roles.count("Victim") == 0:
820
+ # Raise error if RBA isn't present
821
+ if self.rba is None:
784
822
  raise ValueError(
785
- "Error, there must be AT LEAST 1 Observable with the role 'Victim' declared in "
786
- "Detection.tags.observables. However, none were found."
823
+ "RBA is required for this detection based on its deployment config"
787
824
  )
825
+ risk_fields = [ob.field.lower() for ob in self.rba.risk_objects]
826
+ threat_fields = [ob.field.lower() for ob in self.rba.threat_objects]
827
+ rba_fields = risk_fields + threat_fields
788
828
 
789
- # Exactly one victim was found
790
- return self
791
-
792
- @model_validator(mode="after")
793
- def search_observables_exist_validate(self):
794
- observable_fields = [ob.name.lower() for ob in self.tags.observable]
795
-
796
- # All $field$ fields from the message must appear in the search
797
829
  field_match_regex = r"\$([^\s.]*)\$"
798
830
 
799
831
  missing_fields: set[str]
800
- if self.tags.message:
801
- matches = re.findall(field_match_regex, self.tags.message.lower())
832
+ if self.rba.message:
833
+ matches = re.findall(field_match_regex, self.rba.message.lower())
802
834
  message_fields = [match.replace("$", "").lower() for match in matches]
803
- missing_fields = set([field for field in observable_fields if field not in self.search.lower()])
835
+ missing_fields = set(
836
+ [field for field in rba_fields if field not in self.search.lower()]
837
+ )
804
838
  else:
805
839
  message_fields = []
806
840
  missing_fields = set()
@@ -808,32 +842,28 @@ class Detection_Abstract(SecurityContentObject):
808
842
  error_messages: list[str] = []
809
843
  if len(missing_fields) > 0:
810
844
  error_messages.append(
811
- "The following fields are declared as observables, but do not exist in the "
845
+ "The following fields are declared in the rba config, but do not exist in the "
812
846
  f"search: {missing_fields}"
813
847
  )
814
-
815
- missing_fields = set([field for field in message_fields if field not in self.search.lower()])
848
+ missing_fields = set(
849
+ [field for field in message_fields if field not in self.search.lower()]
850
+ )
816
851
  if len(missing_fields) > 0:
817
852
  error_messages.append(
818
853
  "The following fields are used as fields in the message, but do not exist in "
819
854
  f"the search: {missing_fields}"
820
855
  )
821
856
 
822
- # NOTE: we ignore the type error around self.status because we are using Pydantic's
823
- # use_enum_values configuration
824
- # https://docs.pydantic.dev/latest/api/config/#pydantic.config.ConfigDict.populate_by_name
825
- if len(error_messages) > 0 and self.status == DetectionStatus.production.value: # type: ignore
857
+ if len(error_messages) > 0 and self.status == DetectionStatus.production:
826
858
  msg = (
827
- "Use of fields in observables/messages that do not appear in search:\n\t- "
859
+ "Use of fields in rba/messages that do not appear in search:\n\t- "
828
860
  "\n\t- ".join(error_messages)
829
861
  )
830
862
  raise ValueError(msg)
831
-
832
- # Found everything
833
863
  return self
834
864
 
835
865
  @field_validator("tests", mode="before")
836
- def ensure_yml_test_is_unittest(cls, v:list[dict]):
866
+ def ensure_yml_test_is_unittest(cls, v: list[dict]):
837
867
  """The typing for the tests field allows it to be one of
838
868
  a number of different types of tests. However, ONLY
839
869
  UnitTest should be allowed to be defined in the YML
@@ -849,17 +879,17 @@ class Detection_Abstract(SecurityContentObject):
849
879
  it into a different type of test
850
880
 
851
881
  Args:
852
- v (list[dict]): list of dicts read from the yml.
882
+ v (list[dict]): list of dicts read from the yml.
853
883
  Each one SHOULD be a valid UnitTest. If we cannot
854
884
  construct a valid unitTest from it, a ValueError should be raised
855
885
 
856
886
  Returns:
857
- _type_: The input of the function, assuming no
887
+ _type_: The input of the function, assuming no
858
888
  ValueError is raised.
859
- """
860
- valueErrors:list[ValueError] = []
889
+ """
890
+ valueErrors: list[ValueError] = []
861
891
  for unitTest in v:
862
- #This raises a ValueError on a failed UnitTest.
892
+ # This raises a ValueError on a failed UnitTest.
863
893
  try:
864
894
  UnitTest.model_validate(unitTest)
865
895
  except ValueError as e:
@@ -869,16 +899,13 @@ class Detection_Abstract(SecurityContentObject):
869
899
  # All of these can be constructred as UnitTests with no
870
900
  # Exceptions, so let the normal flow continue
871
901
  return v
872
-
873
902
 
874
903
  @field_validator("tests")
875
904
  def tests_validate(
876
- cls,
877
- v: list[UnitTest | IntegrationTest | ManualTest],
878
- info: ValidationInfo
905
+ cls, v: list[UnitTest | IntegrationTest | ManualTest], info: ValidationInfo
879
906
  ) -> list[UnitTest | IntegrationTest | ManualTest]:
880
907
  # Only production analytics require tests
881
- if info.data.get("status", "") != DetectionStatus.production.value:
908
+ if info.data.get("status", "") != DetectionStatus.production:
882
909
  return v
883
910
 
884
911
  # All types EXCEPT Correlation MUST have test(s). Any other type, including newly defined
@@ -895,7 +922,8 @@ class Detection_Abstract(SecurityContentObject):
895
922
  # Ensure that there is at least 1 test
896
923
  if len(v) == 0:
897
924
  raise ValueError(
898
- "At least one test is REQUIRED for production detection: " + info.data.get("name", "NO NAME FOUND")
925
+ "At least one test is REQUIRED for production detection: "
926
+ + info.data.get("name", "NO NAME FOUND")
899
927
  )
900
928
 
901
929
  # No issues - at least one test provided for production type requiring testing
@@ -967,13 +995,29 @@ class Detection_Abstract(SecurityContentObject):
967
995
  def get_summary(
968
996
  self,
969
997
  detection_fields: list[str] = [
970
- "name", "type", "status", "test_status", "source", "data_source", "search", "file_path"
998
+ "name",
999
+ "type",
1000
+ "status",
1001
+ "test_status",
1002
+ "source",
1003
+ "data_source",
1004
+ "search",
1005
+ "file_path",
971
1006
  ],
972
1007
  detection_field_aliases: dict[str, str] = {
973
- "status": "production_status", "test_status": "status", "source": "source_category"
1008
+ "status": "production_status",
1009
+ "test_status": "status",
1010
+ "source": "source_category",
974
1011
  },
975
1012
  tags_fields: list[str] = ["manual_test"],
976
- test_result_fields: list[str] = ["success", "message", "exception", "status", "duration", "wait_duration"],
1013
+ test_result_fields: list[str] = [
1014
+ "success",
1015
+ "message",
1016
+ "exception",
1017
+ "status",
1018
+ "duration",
1019
+ "wait_duration",
1020
+ ],
977
1021
  test_job_fields: list[str] = ["resultCount", "runDuration"],
978
1022
  ) -> dict[str, Any]:
979
1023
  """
@@ -991,7 +1035,7 @@ class Detection_Abstract(SecurityContentObject):
991
1035
  value = getattr(self, field)
992
1036
 
993
1037
  # Enums and Path objects cannot be serialized directly, so we convert it to a string
994
- if isinstance(value, Enum) or isinstance(value, pathlib.Path):
1038
+ if isinstance(value, StrEnum) or isinstance(value, pathlib.Path):
995
1039
  value = str(value)
996
1040
 
997
1041
  # Alias any fields as needed
@@ -1013,7 +1057,7 @@ class Detection_Abstract(SecurityContentObject):
1013
1057
  # Initialize the dict as a mapping of strings to str/bool
1014
1058
  result: dict[str, Union[str, bool]] = {
1015
1059
  "name": test.name,
1016
- "test_type": test.test_type.value
1060
+ "test_type": test.test_type,
1017
1061
  }
1018
1062
 
1019
1063
  # If result is not None, get a summary of the test result w/ the requested fields
@@ -1030,7 +1074,7 @@ class Detection_Abstract(SecurityContentObject):
1030
1074
  result["message"] = "NO RESULT - Test not run"
1031
1075
 
1032
1076
  # Add the result to our list
1033
- summary_dict["tests"].append(result) # type: ignore
1077
+ summary_dict["tests"].append(result) # type: ignore
1034
1078
 
1035
1079
  # Return the summary
1036
1080