contentctl 4.2.1__py3-none-any.whl → 4.2.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (36) hide show
  1. contentctl/actions/detection_testing/infrastructures/DetectionTestingInfrastructure.py +41 -47
  2. contentctl/actions/detection_testing/infrastructures/DetectionTestingInfrastructureContainer.py +1 -1
  3. contentctl/actions/detection_testing/views/DetectionTestingView.py +1 -4
  4. contentctl/actions/validate.py +40 -1
  5. contentctl/enrichments/attack_enrichment.py +6 -8
  6. contentctl/enrichments/cve_enrichment.py +3 -3
  7. contentctl/helper/splunk_app.py +263 -0
  8. contentctl/input/director.py +1 -1
  9. contentctl/input/ssa_detection_builder.py +8 -6
  10. contentctl/objects/abstract_security_content_objects/detection_abstract.py +362 -336
  11. contentctl/objects/abstract_security_content_objects/security_content_object_abstract.py +117 -103
  12. contentctl/objects/atomic.py +7 -10
  13. contentctl/objects/base_test.py +1 -1
  14. contentctl/objects/base_test_result.py +7 -5
  15. contentctl/objects/baseline_tags.py +2 -30
  16. contentctl/objects/config.py +5 -4
  17. contentctl/objects/correlation_search.py +316 -96
  18. contentctl/objects/data_source.py +7 -2
  19. contentctl/objects/detection_tags.py +128 -102
  20. contentctl/objects/errors.py +18 -0
  21. contentctl/objects/lookup.py +3 -1
  22. contentctl/objects/mitre_attack_enrichment.py +3 -3
  23. contentctl/objects/notable_event.py +20 -0
  24. contentctl/objects/observable.py +20 -26
  25. contentctl/objects/risk_analysis_action.py +2 -2
  26. contentctl/objects/risk_event.py +315 -0
  27. contentctl/objects/ssa_detection_tags.py +1 -1
  28. contentctl/objects/story_tags.py +2 -2
  29. contentctl/objects/unit_test.py +1 -9
  30. contentctl/output/data_source_writer.py +4 -4
  31. contentctl/output/templates/savedsearches_detections.j2 +0 -8
  32. {contentctl-4.2.1.dist-info → contentctl-4.2.4.dist-info}/METADATA +5 -8
  33. {contentctl-4.2.1.dist-info → contentctl-4.2.4.dist-info}/RECORD +36 -32
  34. {contentctl-4.2.1.dist-info → contentctl-4.2.4.dist-info}/LICENSE.md +0 -0
  35. {contentctl-4.2.1.dist-info → contentctl-4.2.4.dist-info}/WHEEL +0 -0
  36. {contentctl-4.2.1.dist-info → contentctl-4.2.4.dist-info}/entry_points.txt +0 -0
@@ -1,16 +1,23 @@
1
1
  from __future__ import annotations
2
- from typing import TYPE_CHECKING,Union, Optional, List, Any, Annotated
3
- import os.path
2
+ from typing import TYPE_CHECKING, Union, Optional, List, Any, Annotated
4
3
  import re
5
4
  import pathlib
6
- from pydantic import BaseModel, field_validator, model_validator, ValidationInfo, Field, computed_field, model_serializer,ConfigDict, FilePath
7
-
5
+ from pydantic import (
6
+ field_validator,
7
+ model_validator,
8
+ ValidationInfo,
9
+ Field,
10
+ computed_field,
11
+ model_serializer,
12
+ ConfigDict,
13
+ FilePath
14
+ )
8
15
  from contentctl.objects.macro import Macro
9
16
  from contentctl.objects.lookup import Lookup
10
17
  if TYPE_CHECKING:
11
18
  from contentctl.input.director import DirectorOutputDto
12
19
  from contentctl.objects.baseline import Baseline
13
-
20
+
14
21
  from contentctl.objects.security_content_object import SecurityContentObject
15
22
  from contentctl.objects.enums import AnalyticsType
16
23
  from contentctl.objects.enums import DataModel
@@ -22,24 +29,24 @@ from contentctl.objects.deployment import Deployment
22
29
  from contentctl.objects.unit_test import UnitTest
23
30
  from contentctl.objects.test_group import TestGroup
24
31
  from contentctl.objects.integration_test import IntegrationTest
25
- from contentctl.objects.event_source import EventSource
26
32
  from contentctl.objects.data_source import DataSource
27
33
 
28
- #from contentctl.objects.playbook import Playbook
34
+ # from contentctl.objects.playbook import Playbook
29
35
  from contentctl.objects.enums import ProvidingTechnology
30
36
  from contentctl.enrichments.cve_enrichment import CveEnrichmentObj
31
37
 
32
- MISSING_SOURCES:set[str] = set()
38
+ MISSING_SOURCES: set[str] = set()
39
+
33
40
 
34
41
  class Detection_Abstract(SecurityContentObject):
35
42
  model_config = ConfigDict(use_enum_values=True)
36
-
37
- #contentType: SecurityContentType = SecurityContentType.detections
43
+
44
+ # contentType: SecurityContentType = SecurityContentType.detections
38
45
  type: AnalyticsType = Field(...)
39
46
  status: DetectionStatus = Field(...)
40
47
  data_source: list[str] = []
41
48
  tags: DetectionTags = Field(...)
42
- search: Union[str, dict[str,Any]] = Field(...)
49
+ search: Union[str, dict[str, Any]] = Field(...)
43
50
  how_to_implement: str = Field(..., min_length=4)
44
51
  known_false_positives: str = Field(..., min_length=4)
45
52
 
@@ -52,65 +59,76 @@ class Detection_Abstract(SecurityContentObject):
52
59
  # https://github.com/pydantic/pydantic/issues/9101#issuecomment-2019032541
53
60
  tests: List[Annotated[Union[UnitTest, IntegrationTest], Field(union_mode='left_to_right')]] = []
54
61
  # A list of groups of tests, relying on the same data
55
- test_groups: Union[list[TestGroup], None] = Field(None,validate_default=True)
62
+ test_groups: Union[list[TestGroup], None] = Field(None, validate_default=True)
56
63
 
57
64
  data_source_objects: list[DataSource] = []
58
65
 
59
-
60
66
  @field_validator("search", mode="before")
61
67
  @classmethod
62
- def validate_presence_of_filter_macro(cls, value:Union[str, dict[str,Any]], info:ValidationInfo)->Union[str, dict[str,Any]]:
68
+ def validate_presence_of_filter_macro(
69
+ cls,
70
+ value: Union[str, dict[str, Any]],
71
+ info: ValidationInfo
72
+ ) -> Union[str, dict[str, Any]]:
63
73
  """
64
74
  Validates that, if required to be present, the filter macro is present with the proper name.
65
75
  The filter macro MUST be derived from the name of the detection
66
76
 
67
77
 
68
78
  Args:
69
- value (Union[str, dict[str,Any]]): The search. It can either be a string (and should be SPL)
70
- or a dict, in which case it is Sigma-formatted.
71
- info (ValidationInfo): The validation info can contain a number of different objects. Today it only contains the director.
79
+ value (Union[str, dict[str,Any]]): The search. It can either be a string (and should be
80
+ SPL or a dict, in which case it is Sigma-formatted.
81
+ info (ValidationInfo): The validation info can contain a number of different objects.
82
+ Today it only contains the director.
72
83
 
73
84
  Returns:
74
85
  Union[str, dict[str,Any]]: The search, either in sigma or SPL format.
75
- """
76
-
77
- if isinstance(value,dict):
78
- #If the search is a dict, then it is in Sigma format so return it
86
+ """
87
+
88
+ if isinstance(value, dict):
89
+ # If the search is a dict, then it is in Sigma format so return it
79
90
  return value
80
-
91
+
81
92
  # Otherwise, the search is SPL.
82
-
83
-
84
- # In the future, we will may add support that makes the inclusion of the
85
- # filter macro optional or automatically generates it for searches that
93
+
94
+ # In the future, we will may add support that makes the inclusion of the
95
+ # filter macro optional or automatically generates it for searches that
86
96
  # do not have it. For now, continue to require that all searches have a filter macro.
87
97
  FORCE_FILTER_MACRO = True
88
98
  if not FORCE_FILTER_MACRO:
89
99
  return value
90
-
100
+
91
101
  # Get the required macro name, which is derived from the search name.
92
102
  # Note that a separate validation ensures that the file name matches the content name
93
- name:Union[str,None] = info.data.get("name",None)
103
+ name: Union[str, None] = info.data.get("name", None)
94
104
  if name is None:
95
- #The search was sigma formatted (or failed other validation and was None), so we will not validate macros in it
96
- raise ValueError("Cannot validate filter macro, field 'name' (which is required to validate the macro) was missing from the detection YML.")
97
-
98
- #Get the file name without the extension. Note this is not a full path!
105
+ # The search was sigma formatted (or failed other validation and was None), so we will
106
+ # not validate macros in it
107
+ raise ValueError(
108
+ "Cannot validate filter macro, field 'name' (which is required to validate the "
109
+ "macro) was missing from the detection YML."
110
+ )
111
+
112
+ # Get the file name without the extension. Note this is not a full path!
99
113
  file_name = pathlib.Path(cls.contentNameToFileName(name)).stem
100
114
  file_name_with_filter = f"`{file_name}_filter`"
101
-
102
- if file_name_with_filter not in value:
103
- raise ValueError(f"Detection does not contain the EXACT filter macro {file_name_with_filter}. "
104
- "This filter macro MUST be present in the search. It usually placed at the end "
105
- "of the search and is useful for environment-specific filtering of False Positive or noisy results.")
106
-
107
- return value
108
115
 
116
+ if file_name_with_filter not in value:
117
+ raise ValueError(
118
+ f"Detection does not contain the EXACT filter macro {file_name_with_filter}. "
119
+ "This filter macro MUST be present in the search. It usually placed at the end "
120
+ "of the search and is useful for environment-specific filtering of False Positive or noisy results."
121
+ )
109
122
 
123
+ return value
110
124
 
111
125
  @field_validator("test_groups")
112
126
  @classmethod
113
- def validate_test_groups(cls, value:Union[None, List[TestGroup]], info:ValidationInfo) -> Union[List[TestGroup], None]:
127
+ def validate_test_groups(
128
+ cls,
129
+ value: Union[None, List[TestGroup]],
130
+ info: ValidationInfo
131
+ ) -> Union[List[TestGroup], None]:
114
132
  """
115
133
  Validates the `test_groups` field and constructs the model from the list of unit tests
116
134
  if no explicit construct was provided
@@ -123,65 +141,62 @@ class Detection_Abstract(SecurityContentObject):
123
141
 
124
142
  # iterate over the unit tests and create a TestGroup (and as a result, an IntegrationTest) for each
125
143
  test_groups: list[TestGroup] = []
126
- for unit_test in info.data.get("tests"):
127
- test_group = TestGroup.derive_from_unit_test(unit_test, info.data.get("name"))
144
+ tests: list[UnitTest | IntegrationTest] = info.data.get("tests") # type: ignore
145
+ unit_test: UnitTest
146
+ for unit_test in tests: # type: ignore
147
+ test_group = TestGroup.derive_from_unit_test(unit_test, info.data.get("name")) # type: ignore
128
148
  test_groups.append(test_group)
129
149
 
130
150
  # now add each integration test to the list of tests
131
151
  for test_group in test_groups:
132
- info.data.get("tests").append(test_group.integration_test)
152
+ tests.append(test_group.integration_test)
133
153
  return test_groups
134
154
 
135
-
136
155
  @computed_field
137
156
  @property
138
- def datamodel(self)->List[DataModel]:
157
+ def datamodel(self) -> List[DataModel]:
139
158
  if isinstance(self.search, str):
140
159
  return [dm for dm in DataModel if dm.value in self.search]
141
160
  else:
142
161
  return []
143
-
144
162
 
145
163
  @computed_field
146
164
  @property
147
- def source(self)->str:
148
- if self.file_path is not None:
149
- return self.file_path.absolute().parent.name
150
- else:
151
- raise ValueError(f"Cannot get 'source' for detection {self.name} - 'file_path' was None.")
165
+ def source(self) -> str:
166
+ return self.file_path.absolute().parent.name
152
167
 
153
168
  deployment: Deployment = Field({})
154
-
169
+
155
170
  @computed_field
156
171
  @property
157
- def annotations(self)->dict[str,Union[List[str],int,str]]:
172
+ def annotations(self) -> dict[str, Union[List[str], int, str]]:
158
173
 
159
- annotations_dict:dict[str, Union[List[str], int]] = {}
160
- annotations_dict["analytic_story"]=[story.name for story in self.tags.analytic_story]
174
+ annotations_dict: dict[str, str | list[str] | int] = {}
175
+ annotations_dict["analytic_story"] = [story.name for story in self.tags.analytic_story]
161
176
  annotations_dict["confidence"] = self.tags.confidence
162
177
  if len(self.tags.cve or []) > 0:
163
- annotations_dict["cve"] = self.tags.cve
178
+ annotations_dict["cve"] = self.tags.cve
164
179
  annotations_dict["impact"] = self.tags.impact
165
180
  annotations_dict["type"] = self.type
166
- #annotations_dict["version"] = self.version
181
+ # annotations_dict["version"] = self.version
167
182
 
168
183
  annotations_dict["data_source"] = self.data_source
169
184
 
170
- #The annotations object is a superset of the mappings object.
185
+ # The annotations object is a superset of the mappings object.
171
186
  # So start with the mapping object.
172
187
  annotations_dict.update(self.mappings)
173
-
174
- #Make sure that the results are sorted for readability/easier diffs
188
+
189
+ # Make sure that the results are sorted for readability/easier diffs
175
190
  return dict(sorted(annotations_dict.items(), key=lambda item: item[0]))
176
-
177
- #playbooks: list[Playbook] = []
178
-
179
- baselines: list[Baseline] = Field([],validate_default=True)
180
-
191
+
192
+ # playbooks: list[Playbook] = []
193
+
194
+ baselines: list[Baseline] = Field([], validate_default=True)
195
+
181
196
  @computed_field
182
197
  @property
183
- def mappings(self)->dict[str, List[str]]:
184
- mappings:dict[str,Any] = {}
198
+ def mappings(self) -> dict[str, List[str]]:
199
+ mappings: dict[str, Any] = {}
185
200
  if len(self.tags.cis20) > 0:
186
201
  mappings["cis20"] = [tag.value for tag in self.tags.cis20]
187
202
  if len(self.tags.kill_chain_phases) > 0:
@@ -189,32 +204,29 @@ class Detection_Abstract(SecurityContentObject):
189
204
  if len(self.tags.mitre_attack_id) > 0:
190
205
  mappings['mitre_attack'] = self.tags.mitre_attack_id
191
206
  if len(self.tags.nist) > 0:
192
- mappings['nist'] = [category.value for category in self.tags.nist]
193
-
194
-
207
+ mappings['nist'] = [category.value for category in self.tags.nist]
208
+
195
209
  # No need to sort the dict! It has been constructed in-order.
196
210
  # However, if this logic is changed, then consider reordering or
197
211
  # adding the sort back!
198
- #return dict(sorted(mappings.items(), key=lambda item: item[0]))
212
+ # return dict(sorted(mappings.items(), key=lambda item: item[0]))
199
213
  return mappings
200
214
 
201
- macros: list[Macro] = Field([],validate_default=True)
202
- lookups: list[Lookup] = Field([],validate_default=True)
215
+ macros: list[Macro] = Field([], validate_default=True)
216
+ lookups: list[Lookup] = Field([], validate_default=True)
203
217
 
204
218
  cve_enrichment: list[CveEnrichmentObj] = Field([], validate_default=True)
205
-
206
- @model_validator(mode="after")
207
- def cve_enrichment_func(self, info:ValidationInfo):
219
+
220
+ def cve_enrichment_func(self, __context: Any):
208
221
  if len(self.cve_enrichment) > 0:
209
222
  raise ValueError(f"Error, field 'cve_enrichment' should be empty and "
210
223
  f"dynamically populated at runtime. Instead, this field contained: {self.cve_enrichment}")
211
224
 
212
- output_dto:Union[DirectorOutputDto,None]= info.context.get("output_dto",None)
225
+ output_dto: Union[DirectorOutputDto, None] = __context.get("output_dto", None)
213
226
  if output_dto is None:
214
227
  raise ValueError("Context not provided to detection model post validator")
215
-
216
-
217
- enriched_cves:list[CveEnrichmentObj] = []
228
+
229
+ enriched_cves: list[CveEnrichmentObj] = []
218
230
 
219
231
  for cve_id in self.tags.cve:
220
232
  try:
@@ -223,42 +235,39 @@ class Detection_Abstract(SecurityContentObject):
223
235
  raise ValueError(f"{e}")
224
236
  self.cve_enrichment = enriched_cves
225
237
  return self
226
-
227
238
 
228
239
  splunk_app_enrichment: Optional[List[dict]] = None
229
-
240
+
230
241
  @computed_field
231
242
  @property
232
- def nes_fields(self)->Optional[str]:
243
+ def nes_fields(self) -> Optional[str]:
233
244
  if self.deployment.alert_action.notable is not None:
234
245
  return ','.join(self.deployment.alert_action.notable.nes_fields)
235
246
  else:
236
247
  return None
237
-
248
+
238
249
  @computed_field
239
250
  @property
240
- def providing_technologies(self)->List[ProvidingTechnology]:
251
+ def providing_technologies(self) -> List[ProvidingTechnology]:
241
252
  if isinstance(self.search, str):
242
253
  return ProvidingTechnology.getProvidingTechFromSearch(self.search)
243
254
  else:
244
- #Dict-formatted searches (sigma) will not have providing technologies
255
+ # Dict-formatted searches (sigma) will not have providing technologies
245
256
  return []
246
-
257
+
247
258
  @computed_field
248
259
  @property
249
- def risk(self)->list[dict[str,Any]]:
250
- risk_objects = []
260
+ def risk(self) -> list[dict[str, Any]]:
261
+ risk_objects: list[dict[str, str | int]] = []
251
262
  risk_object_user_types = {'user', 'username', 'email address'}
252
263
  risk_object_system_types = {'device', 'endpoint', 'hostname', 'ip address'}
253
- process_threat_object_types = {'process name','process'}
254
- file_threat_object_types = {'file name','file', 'file hash'}
255
- url_threat_object_types = {'url string','url'}
264
+ process_threat_object_types = {'process name', 'process'}
265
+ file_threat_object_types = {'file name', 'file', 'file hash'}
266
+ url_threat_object_types = {'url string', 'url'}
256
267
  ip_threat_object_types = {'ip address'}
257
268
 
258
-
259
269
  for entity in self.tags.observable:
260
-
261
- risk_object = dict()
270
+ risk_object: dict[str, str | int] = dict()
262
271
  if 'Victim' in entity.role and entity.type.lower() in risk_object_user_types:
263
272
  risk_object['risk_object_type'] = 'user'
264
273
  risk_object['risk_object_field'] = entity.name
@@ -274,22 +283,22 @@ class Detection_Abstract(SecurityContentObject):
274
283
  elif 'Attacker' in entity.role and entity.type.lower() in process_threat_object_types:
275
284
  risk_object['threat_object_field'] = entity.name
276
285
  risk_object['threat_object_type'] = "process"
277
- risk_objects.append(risk_object)
286
+ risk_objects.append(risk_object)
278
287
 
279
288
  elif 'Attacker' in entity.role and entity.type.lower() in file_threat_object_types:
280
289
  risk_object['threat_object_field'] = entity.name
281
290
  risk_object['threat_object_type'] = "file_name"
282
- risk_objects.append(risk_object)
291
+ risk_objects.append(risk_object)
283
292
 
284
293
  elif 'Attacker' in entity.role and entity.type.lower() in ip_threat_object_types:
285
294
  risk_object['threat_object_field'] = entity.name
286
295
  risk_object['threat_object_type'] = "ip_address"
287
- risk_objects.append(risk_object)
296
+ risk_objects.append(risk_object)
288
297
 
289
298
  elif 'Attacker' in entity.role and entity.type.lower() in url_threat_object_types:
290
299
  risk_object['threat_object_field'] = entity.name
291
300
  risk_object['threat_object_type'] = "url"
292
- risk_objects.append(risk_object)
301
+ risk_objects.append(risk_object)
293
302
 
294
303
  else:
295
304
  risk_object['risk_object_type'] = 'other'
@@ -298,38 +307,41 @@ class Detection_Abstract(SecurityContentObject):
298
307
  risk_objects.append(risk_object)
299
308
  continue
300
309
 
301
-
302
310
  return risk_objects
303
311
 
304
-
305
-
306
312
  @computed_field
307
313
  @property
308
- def metadata(self)->dict[str,str]:
309
- return {'detection_id':str(self.id),
310
- 'deprecated':'1' if self.status==DetectionStatus.deprecated.value else '0',
311
- 'detection_version':str(self.version)}
314
+ def metadata(self) -> dict[str, str]:
315
+ # NOTE: we ignore the type error around self.status because we are using Pydantic's
316
+ # use_enum_values configuration
317
+ # https://docs.pydantic.dev/latest/api/config/#pydantic.config.ConfigDict.populate_by_name
318
+ return {
319
+ 'detection_id': str(self.id),
320
+ 'deprecated': '1' if self.status == DetectionStatus.deprecated.value else '0', # type: ignore
321
+ 'detection_version': str(self.version)
322
+ }
312
323
 
313
324
  @model_serializer
314
325
  def serialize_model(self):
315
- #Call serializer for parent
326
+ # Call serializer for parent
316
327
  super_fields = super().serialize_model()
317
-
318
- #All fields custom to this model
319
- model= {
328
+
329
+ # All fields custom to this model
330
+ model = {
320
331
  "tags": self.tags.model_dump(),
321
332
  "type": self.type,
322
333
  "search": self.search,
323
- "how_to_implement":self.how_to_implement,
324
- "known_false_positives":self.known_false_positives,
334
+ "how_to_implement": self.how_to_implement,
335
+ "known_false_positives": self.known_false_positives,
325
336
  "datamodel": self.datamodel,
326
337
  "source": self.source,
327
338
  "nes_fields": self.nes_fields,
328
339
  }
329
- #Only a subset of macro fields are required:
330
- all_macros = []
340
+
341
+ # Only a subset of macro fields are required:
342
+ all_macros: list[dict[str, str | list[str]]] = []
331
343
  for macro in self.macros:
332
- macro_dump:dict = {
344
+ macro_dump: dict[str, str | list[str]] = {
333
345
  "name": macro.name,
334
346
  "definition": macro.definition,
335
347
  "description": macro.description
@@ -338,228 +350,226 @@ class Detection_Abstract(SecurityContentObject):
338
350
  macro_dump['arguments'] = macro.arguments
339
351
 
340
352
  all_macros.append(macro_dump)
341
- model['macros'] = all_macros
353
+ model['macros'] = all_macros # type: ignore
342
354
 
343
-
344
- all_lookups = []
355
+ all_lookups: list[dict[str, str | int | None]] = []
345
356
  for lookup in self.lookups:
346
357
  if lookup.collection is not None:
347
- all_lookups.append({
348
- "name":lookup.name,
349
- "description":lookup.description,
350
- "collection":lookup.collection,
351
- "case_sensitive_match": None,
352
- "fields_list":lookup.fields_list})
358
+ all_lookups.append(
359
+ {
360
+ "name": lookup.name,
361
+ "description": lookup.description,
362
+ "collection": lookup.collection,
363
+ "case_sensitive_match": None,
364
+ "fields_list": lookup.fields_list
365
+ }
366
+ )
353
367
  elif lookup.filename is not None:
354
- all_lookups.append({
355
- "name":lookup.name,
356
- "description":lookup.description,
357
- "filename": lookup.filename.name,
358
- "default_match":"true" if lookup.default_match else "false",
359
- "case_sensitive_match": "true" if lookup.case_sensitive_match else "false",
360
- "match_type":lookup.match_type,
361
- "min_matches":lookup.min_matches,
362
- "fields_list":lookup.fields_list})
363
- model['lookups'] = all_lookups
364
-
365
-
366
- #Combine fields from this model with fields from parent
367
- super_fields.update(model)
368
-
369
- #return the model
368
+ all_lookups.append(
369
+ {
370
+ "name": lookup.name,
371
+ "description": lookup.description,
372
+ "filename": lookup.filename.name,
373
+ "default_match": "true" if lookup.default_match else "false",
374
+ "case_sensitive_match": "true" if lookup.case_sensitive_match else "false",
375
+ "match_type": lookup.match_type,
376
+ "min_matches": lookup.min_matches,
377
+ "fields_list": lookup.fields_list
378
+ }
379
+ )
380
+ model['lookups'] = all_lookups # type: ignore
381
+
382
+ # Combine fields from this model with fields from parent
383
+ super_fields.update(model) # type: ignore
384
+
385
+ # return the model
370
386
  return super_fields
371
387
 
388
+ def model_post_init(self, __context: Any) -> None:
389
+ super().model_post_init(__context)
390
+ director: Optional[DirectorOutputDto] = __context.get("output_dto", None)
372
391
 
373
- def model_post_init(self, ctx:dict[str,Any]):
374
- # director: Optional[DirectorOutputDto] = ctx.get("output_dto",None)
375
- # if not isinstance(director,DirectorOutputDto):
376
- # raise ValueError("DirectorOutputDto was not passed in context of Detection model_post_init")
377
- director: Optional[DirectorOutputDto] = ctx.get("output_dto",None)
378
-
379
- #Ensure that all baselines link to this detection
392
+ # Ensure that all baselines link to this detection
380
393
  for baseline in self.baselines:
381
- new_detections = []
394
+ new_detections: list[Detection_Abstract | str] = []
382
395
  replaced = False
383
396
  for d in baseline.tags.detections:
384
- if isinstance(d,str) and self.name==d:
385
- new_detections.append(self)
386
- replaced = True
387
- else:
388
- new_detections.append(d)
397
+ if isinstance(d, str) and self.name == d:
398
+ new_detections.append(self)
399
+ replaced = True
400
+ else:
401
+ new_detections.append(d)
389
402
  if replaced is False:
390
- raise ValueError(f"Error, failed to replace detection reference in Baseline '{baseline.name}' to detection '{self.name}'")
403
+ raise ValueError(
404
+ f"Error, failed to replace detection reference in Baseline '{baseline.name}' "
405
+ f"to detection '{self.name}'"
406
+ )
391
407
  baseline.tags.detections = new_detections
392
408
 
393
409
  # Data source may be defined 1 on each line, OR they may be defined as
394
410
  # SOUCE_1 AND ANOTHERSOURCE AND A_THIRD_SOURCE
395
411
  # if more than 1 data source is required for a detection (for example, because it includes a join)
396
412
  # Parse and update the list to resolve individual names and remove potential duplicates
397
- updated_data_source_names:set[str] = set()
398
-
413
+ updated_data_source_names: set[str] = set()
414
+
399
415
  for ds in self.data_source:
400
416
  split_data_sources = {d.strip() for d in ds.split('AND')}
401
417
  updated_data_source_names.update(split_data_sources)
402
-
418
+
403
419
  sources = sorted(list(updated_data_source_names))
404
-
405
- matched_data_sources:list[DataSource] = []
406
- missing_sources:list[str] = []
420
+
421
+ matched_data_sources: list[DataSource] = []
422
+ missing_sources: list[str] = []
407
423
  for source in sources:
408
424
  try:
409
425
  matched_data_sources += DataSource.mapNamesToSecurityContentObjects([source], director)
410
- except Exception as data_source_mapping_exception:
426
+ except Exception:
411
427
  # We gobble this up and add it to a global set so that we
412
428
  # can print it ONCE at the end of the build of datasources.
413
429
  # This will be removed later as per the note below
414
430
  MISSING_SOURCES.add(source)
415
-
431
+
416
432
  if len(missing_sources) > 0:
417
433
  # This will be changed to ValueError when we have a complete list of data sources
418
- print(f"WARNING: The following exception occurred when mapping the data_source field to DataSource objects:{missing_sources}")
419
-
434
+ print(
435
+ "WARNING: The following exception occurred when mapping the data_source field to "
436
+ f"DataSource objects:{missing_sources}"
437
+ )
438
+
420
439
  self.data_source_objects = matched_data_sources
421
440
 
422
441
  for story in self.tags.analytic_story:
423
- story.detections.append(self)
424
- return self
442
+ story.detections.append(self)
443
+
444
+ self.cve_enrichment_func(__context)
425
445
 
426
-
427
- @field_validator('lookups',mode="before")
446
+ @field_validator('lookups', mode="before")
428
447
  @classmethod
429
- def getDetectionLookups(cls, v:list[str], info:ValidationInfo)->list[Lookup]:
430
- director:DirectorOutputDto = info.context.get("output_dto",None)
431
-
432
- search:Union[str,dict] = info.data.get("search",None)
433
- if not isinstance(search,str):
434
- #The search was sigma formatted (or failed other validation and was None), so we will not validate macros in it
448
+ def getDetectionLookups(cls, v: list[str], info: ValidationInfo) -> list[Lookup]:
449
+ if info.context is None:
450
+ raise ValueError("ValidationInfo.context unexpectedly null")
451
+
452
+ director: DirectorOutputDto = info.context.get("output_dto", None)
453
+
454
+ search: Union[str, dict[str, Any], None] = info.data.get("search", None)
455
+ if not isinstance(search, str):
456
+ # The search was sigma formatted (or failed other validation and was None), so we will
457
+ # not validate macros in it
435
458
  return []
436
-
437
- lookups= Lookup.get_lookups(search, director)
459
+
460
+ lookups = Lookup.get_lookups(search, director)
438
461
  return lookups
439
462
 
440
- @field_validator('baselines',mode="before")
463
+ @field_validator('baselines', mode="before")
441
464
  @classmethod
442
- def mapDetectionNamesToBaselineObjects(cls, v:list[str], info:ValidationInfo)->List[Baseline]:
465
+ def mapDetectionNamesToBaselineObjects(cls, v: list[str], info: ValidationInfo) -> List[Baseline]:
443
466
  if len(v) > 0:
444
- raise ValueError("Error, baselines are constructed automatically at runtime. Please do not include this field.")
467
+ raise ValueError(
468
+ "Error, baselines are constructed automatically at runtime. Please do not include this field."
469
+ )
445
470
 
446
-
447
- name:Union[str,None] = info.data.get("name",None)
471
+ name: Union[str, None] = info.data.get("name", None)
448
472
  if name is None:
449
473
  raise ValueError("Error, cannot get Baselines because the Detection does not have a 'name' defined.")
450
-
451
- director:DirectorOutputDto = info.context.get("output_dto",None)
452
- baselines:List[Baseline] = []
474
+
475
+ if info.context is None:
476
+ raise ValueError("ValidationInfo.context unexpectedly null")
477
+
478
+ director: DirectorOutputDto = info.context.get("output_dto", None)
479
+ baselines: List[Baseline] = []
453
480
  for baseline in director.baselines:
454
- # This matching is a bit strange, because baseline.tags.detections starts as a list of strings, but
455
- # is eventually updated to a list of Detections as we construct all of the detection objects.
456
- if name in [detection_name for detection_name in baseline.tags.detections if isinstance(detection_name,str)]:
481
+ # This matching is a bit strange, because baseline.tags.detections starts as a list of strings, but
482
+ # is eventually updated to a list of Detections as we construct all of the detection objects.
483
+ detection_names = [
484
+ detection_name for detection_name in baseline.tags.detections if isinstance(detection_name, str)
485
+ ]
486
+ if name in detection_names:
457
487
  baselines.append(baseline)
458
488
 
459
489
  return baselines
460
490
 
461
- @field_validator('macros',mode="before")
491
+ @field_validator('macros', mode="before")
462
492
  @classmethod
463
- def getDetectionMacros(cls, v:list[str], info:ValidationInfo)->list[Macro]:
464
- director:DirectorOutputDto = info.context.get("output_dto",None)
465
-
466
- search:Union[str,dict] = info.data.get("search",None)
467
- if not isinstance(search,str):
468
- #The search was sigma formatted (or failed other validation and was None), so we will not validate macros in it
493
+ def getDetectionMacros(cls, v: list[str], info: ValidationInfo) -> list[Macro]:
494
+ if info.context is None:
495
+ raise ValueError("ValidationInfo.context unexpectedly null")
496
+
497
+ director: DirectorOutputDto = info.context.get("output_dto", None)
498
+
499
+ search: str | dict[str, Any] | None = info.data.get("search", None)
500
+ if not isinstance(search, str):
501
+ # The search was sigma formatted (or failed other validation and was None), so we will
502
+ # not validate macros in it
469
503
  return []
470
-
471
- search_name:Union[str,Any] = info.data.get("name",None)
472
- assert isinstance(search_name,str), f"Expected 'search_name' to be a string, instead it was [{type(search_name)}]"
473
-
474
-
475
-
476
- filter_macro_name = search_name.replace(' ', '_').replace('-', '_').replace('.', '_').replace('/', '_').lower() + '_filter'
477
- try:
504
+
505
+ search_name: Union[str, Any] = info.data.get("name", None)
506
+ message = f"Expected 'search_name' to be a string, instead it was [{type(search_name)}]"
507
+ assert isinstance(search_name, str), message
508
+
509
+ filter_macro_name = search_name.replace(' ', '_')\
510
+ .replace('-', '_')\
511
+ .replace('.', '_')\
512
+ .replace('/', '_')\
513
+ .lower()\
514
+ + '_filter'
515
+ try:
478
516
  filter_macro = Macro.mapNamesToSecurityContentObjects([filter_macro_name], director)[0]
479
- except:
517
+ except Exception:
480
518
  # Filter macro did not exist, so create one at runtime
481
- filter_macro = Macro.model_validate({"name":filter_macro_name,
482
- "definition":'search *',
483
- "description":'Update this macro to limit the output results to filter out false positives.'})
519
+ filter_macro = Macro.model_validate(
520
+ {
521
+ "name": filter_macro_name,
522
+ "definition": 'search *',
523
+ "description": 'Update this macro to limit the output results to filter out false positives.'
524
+ }
525
+ )
484
526
  director.addContentToDictMappings(filter_macro)
485
-
527
+
486
528
  macros_from_search = Macro.get_macros(search, director)
487
-
488
- return macros_from_search
489
529
 
490
- def get_content_dependencies(self)->list[SecurityContentObject]:
491
- #Do this separately to satisfy type checker
530
+ return macros_from_search
531
+
532
+ def get_content_dependencies(self) -> list[SecurityContentObject]:
533
+ # Do this separately to satisfy type checker
492
534
  objects: list[SecurityContentObject] = []
493
- objects += self.macros
494
- objects += self.lookups
535
+ objects += self.macros
536
+ objects += self.lookups
495
537
  return objects
496
-
497
-
538
+
498
539
  @field_validator("deployment", mode="before")
499
- def getDeployment(cls, v:Any, info:ValidationInfo)->Deployment:
500
- return Deployment.getDeployment(v,info)
501
- return SecurityContentObject.getDeploymentFromType(info.data.get("type",None), info)
502
- # director: Optional[DirectorOutputDto] = info.context.get("output_dto",None)
503
- # if not director:
504
- # raise ValueError("Cannot set deployment - DirectorOutputDto not passed to Detection Constructor in context")
505
-
506
-
507
- # typeField = info.data.get("type",None)
508
-
509
- # deps = [deployment for deployment in director.deployments if deployment.type == typeField]
510
- # if len(deps) == 1:
511
- # return deps[0]
512
- # elif len(deps) == 0:
513
- # raise ValueError(f"Failed to find Deployment for type '{typeField}' "\
514
- # f"from possible {[deployment.type for deployment in director.deployments]}")
515
- # else:
516
- # raise ValueError(f"Found more than 1 ({len(deps)}) Deployment for type '{typeField}' "\
517
- # f"from possible {[deployment.type for deployment in director.deployments]}")
518
-
519
-
520
- @staticmethod
521
- def get_detections_from_filenames(detection_filenames:set[str], all_detections:list[Detection_Abstract])->list[Detection_Abstract]:
522
- detection_filenames = set(str(pathlib.Path(filename).absolute()) for filename in detection_filenames)
523
- detection_dict = SecurityContentObject.create_filename_to_content_dict(all_detections)
540
+ def getDeployment(cls, v: Any, info: ValidationInfo) -> Deployment:
541
+ return Deployment.getDeployment(v, info)
524
542
 
525
- try:
526
- return [detection_dict[detection_filename] for detection_filename in detection_filenames]
527
- except Exception as e:
528
- raise Exception(f"Failed to find detection object for modified detection: {str(e)}")
529
-
530
-
531
- # @validator("type")
532
- # def type_valid(cls, v, values):
533
- # if v.lower() not in [el.name.lower() for el in AnalyticsType]:
534
- # raise ValueError("not valid analytics type: " + values["name"])
535
- # return v
536
-
537
-
538
- @field_validator("enabled_by_default",mode="before")
539
- def only_enabled_if_production_status(cls,v:Any,info:ValidationInfo)->bool:
543
+ @field_validator("enabled_by_default", mode="before")
544
+ def only_enabled_if_production_status(cls, v: Any, info: ValidationInfo) -> bool:
540
545
  '''
541
546
  A detection can ONLY be enabled by default if it is a PRODUCTION detection.
542
547
  If not (for example, it is EXPERIMENTAL or DEPRECATED) then we will throw an exception.
543
548
  Similarly, a detection MUST be schedulable, meaning that it must be Anomaly, Correleation, or TTP.
544
549
  We will not allow Hunting searches to be enabled by default.
545
550
  '''
546
- if v == False:
551
+ if v is False:
547
552
  return v
548
-
553
+
549
554
  status = DetectionStatus(info.data.get("status"))
550
555
  searchType = AnalyticsType(info.data.get("type"))
551
- errors = []
556
+ errors: list[str] = []
552
557
  if status != DetectionStatus.production:
553
- errors.append(f"status is '{status.name}'. Detections that are enabled by default MUST be '{DetectionStatus.production.value}'")
554
-
555
- if searchType not in [AnalyticsType.Anomaly, AnalyticsType.Correlation, AnalyticsType.TTP]:
556
- errors.append(f"type is '{searchType.value}'. Detections that are enabled by default MUST be one of the following types: {[AnalyticsType.Anomaly.value, AnalyticsType.Correlation.value, AnalyticsType.TTP.value]}")
558
+ errors.append(
559
+ f"status is '{status.name}'. Detections that are enabled by default MUST be "
560
+ f"'{DetectionStatus.production.value}'"
561
+ )
562
+
563
+ if searchType not in [AnalyticsType.Anomaly, AnalyticsType.Correlation, AnalyticsType.TTP]:
564
+ errors.append(
565
+ f"type is '{searchType.value}'. Detections that are enabled by default MUST be one"
566
+ " of the following types: "
567
+ f"{[AnalyticsType.Anomaly.value, AnalyticsType.Correlation.value, AnalyticsType.TTP.value]}")
557
568
  if len(errors) > 0:
558
569
  error_message = "\n - ".join(errors)
559
570
  raise ValueError(f"Detection is 'enabled_by_default: true' however \n - {error_message}")
560
-
571
+
561
572
  return v
562
-
563
573
 
564
574
  @model_validator(mode="after")
565
575
  def addTags_nist(self):
@@ -568,133 +578,147 @@ class Detection_Abstract(SecurityContentObject):
568
578
  else:
569
579
  self.tags.nist = [NistCategory.DE_AE]
570
580
  return self
571
-
581
+
572
582
  @model_validator(mode="after")
573
583
  def ensureProperObservablesExist(self):
574
584
  """
575
585
  If a detections is PRODUCTION and either TTP or ANOMALY, then it MUST have an Observable with the VICTIM role.
576
586
 
577
587
  Returns:
578
- self: Returns itself if the valdiation passes
588
+ self: Returns itself if the valdiation passes
579
589
  """
580
- if self.status not in [DetectionStatus.production.value]:
590
+ # NOTE: we ignore the type error around self.status because we are using Pydantic's
591
+ # use_enum_values configuration
592
+ # https://docs.pydantic.dev/latest/api/config/#pydantic.config.ConfigDict.populate_by_name
593
+ if self.status not in [DetectionStatus.production.value]: # type: ignore
581
594
  # Only perform this validation on production detections
582
595
  return self
583
596
 
584
597
  if self.type not in [AnalyticsType.TTP.value, AnalyticsType.Anomaly.value]:
585
598
  # Only perform this validation on TTP and Anomaly detections
586
- return self
587
-
588
- #Detection is required to have a victim
589
- roles = []
599
+ return self
600
+
601
+ # Detection is required to have a victim
602
+ roles: list[str] = []
590
603
  for observable in self.tags.observable:
591
604
  roles.extend(observable.role)
592
-
605
+
593
606
  if roles.count("Victim") == 0:
594
- raise ValueError(f"Error, there must be AT LEAST 1 Observable with the role 'Victim' declared in Detection.tags.observables. However, none were found.")
595
-
607
+ raise ValueError(
608
+ "Error, there must be AT LEAST 1 Observable with the role 'Victim' declared in "
609
+ "Detection.tags.observables. However, none were found."
610
+ )
611
+
596
612
  # Exactly one victim was found
597
613
  return self
598
-
599
614
 
600
615
  @model_validator(mode="after")
601
616
  def search_observables_exist_validate(self):
602
-
603
617
  if isinstance(self.search, str):
604
-
618
+
605
619
  observable_fields = [ob.name.lower() for ob in self.tags.observable]
606
-
607
- #All $field$ fields from the message must appear in the search
620
+
621
+ # All $field$ fields from the message must appear in the search
608
622
  field_match_regex = r"\$([^\s.]*)\$"
609
-
610
-
623
+
624
+ missing_fields: set[str]
611
625
  if self.tags.message:
612
- message_fields = [match.replace("$", "").lower() for match in re.findall(field_match_regex, self.tags.message.lower())]
626
+ matches = re.findall(field_match_regex, self.tags.message.lower())
627
+ message_fields = [match.replace("$", "").lower() for match in matches]
613
628
  missing_fields = set([field for field in observable_fields if field not in self.search.lower()])
614
629
  else:
615
630
  message_fields = []
616
631
  missing_fields = set()
617
-
618
632
 
619
- error_messages = []
633
+ error_messages: list[str] = []
620
634
  if len(missing_fields) > 0:
621
- error_messages.append(f"The following fields are declared as observables, but do not exist in the search: {missing_fields}")
635
+ error_messages.append(
636
+ "The following fields are declared as observables, but do not exist in the "
637
+ f"search: {missing_fields}"
638
+ )
622
639
 
623
-
624
640
  missing_fields = set([field for field in message_fields if field not in self.search.lower()])
625
641
  if len(missing_fields) > 0:
626
- error_messages.append(f"The following fields are used as fields in the message, but do not exist in the search: {missing_fields}")
627
-
628
- if len(error_messages) > 0 and self.status == DetectionStatus.production.value:
629
- msg = "Use of fields in observables/messages that do not appear in search:\n\t- "+ "\n\t- ".join(error_messages)
630
- raise(ValueError(msg))
631
-
642
+ error_messages.append(
643
+ "The following fields are used as fields in the message, but do not exist in "
644
+ f"the search: {missing_fields}"
645
+ )
646
+
647
+ # NOTE: we ignore the type error around self.status because we are using Pydantic's
648
+ # use_enum_values configuration
649
+ # https://docs.pydantic.dev/latest/api/config/#pydantic.config.ConfigDict.populate_by_name
650
+ if len(error_messages) > 0 and self.status == DetectionStatus.production.value: # type: ignore
651
+ msg = (
652
+ "Use of fields in observables/messages that do not appear in search:\n\t- "
653
+ "\n\t- ".join(error_messages)
654
+ )
655
+ raise ValueError(msg)
656
+
632
657
  # Found everything
633
658
  return self
634
-
635
659
 
636
660
  @model_validator(mode='after')
637
661
  def ensurePresenceOfRequiredTests(self):
638
- # TODO (cmcginley): Fix detection_abstract.tests_validate so that it surfaces validation errors
639
- # (e.g. a lack of tests) to the final results, instead of just showing a failed detection w/
640
- # no tests (maybe have a message propagated at the detection level? do a separate coverage
641
- # check as part of validation?):
642
-
643
-
644
- #Only production analytics require tests
645
- if self.status != DetectionStatus.production.value:
662
+ # NOTE: we ignore the type error around self.status because we are using Pydantic's
663
+ # use_enum_values configuration
664
+ # https://docs.pydantic.dev/latest/api/config/#pydantic.config.ConfigDict.populate_by_name
665
+
666
+ # Only production analytics require tests
667
+ if self.status != DetectionStatus.production.value: # type: ignore
646
668
  return self
647
-
669
+
648
670
  # All types EXCEPT Correlation MUST have test(s). Any other type, including newly defined types, requires them.
649
671
  # Accordingly, we do not need to do additional checks if the type is Correlation
650
672
  if self.type in set([AnalyticsType.Correlation.value]):
651
673
  return self
652
-
674
+
653
675
  if self.tags.manual_test is not None:
654
676
  for test in self.tests:
655
- test.skip(f"TEST SKIPPED: Detection marked as 'manual_test' with explanation: '{self.tags.manual_test}'")
677
+ test.skip(
678
+ f"TEST SKIPPED: Detection marked as 'manual_test' with explanation: '{self.tags.manual_test}'"
679
+ )
656
680
 
657
681
  if len(self.tests) == 0:
658
682
  raise ValueError(f"At least one test is REQUIRED for production detection: {self.name}")
659
-
660
683
 
661
684
  return self
662
685
 
663
686
  @field_validator("tests")
664
- def tests_validate(cls, v, info:ValidationInfo):
665
- # TODO (cmcginley): Fix detection_abstract.tests_validate so that it surfaces validation errors
666
- # (e.g. a lack of tests) to the final results, instead of just showing a failed detection w/
667
- # no tests (maybe have a message propagated at the detection level? do a separate coverage
668
- # check as part of validation?):
669
-
670
-
671
- #Only production analytics require tests
672
- if info.data.get("status","") != DetectionStatus.production.value:
687
+ def tests_validate(
688
+ cls,
689
+ v: list[UnitTest | IntegrationTest],
690
+ info: ValidationInfo
691
+ ) -> list[UnitTest | IntegrationTest]:
692
+ # Only production analytics require tests
693
+ if info.data.get("status", "") != DetectionStatus.production.value:
673
694
  return v
674
-
695
+
675
696
  # All types EXCEPT Correlation MUST have test(s). Any other type, including newly defined types, requires them.
676
697
  # Accordingly, we do not need to do additional checks if the type is Correlation
677
- if info.data.get("type","") in set([AnalyticsType.Correlation.value]):
698
+ if info.data.get("type", "") in set([AnalyticsType.Correlation.value]):
678
699
  return v
679
-
680
-
681
- # Ensure that there is at least 1 test
700
+
701
+ # Ensure that there is at least 1 test
682
702
  if len(v) == 0:
683
- if info.data.get("tags",None) and info.data.get("tags").manual_test is not None:
703
+ if info.data.get("tags", None) and info.data.get("tags").manual_test is not None: # type: ignore
684
704
  # Detections that are manual_test MAY have detections, but it is not required. If they
685
705
  # do not have one, then create one which will be a placeholder.
686
706
  # Note that this fake UnitTest (and by extension, Integration Test) will NOT be generated
687
707
  # if there ARE test(s) defined for a Detection.
688
- placeholder_test = UnitTest(name="PLACEHOLDER FOR DETECTION TAGGED MANUAL_TEST WITH NO TESTS SPECIFIED IN YML FILE", attack_data=[])
708
+ placeholder_test = UnitTest( # type: ignore
709
+ name="PLACEHOLDER FOR DETECTION TAGGED MANUAL_TEST WITH NO TESTS SPECIFIED IN YML FILE",
710
+ attack_data=[]
711
+ )
689
712
  return [placeholder_test]
690
-
691
- else:
692
- raise ValueError("At least one test is REQUIRED for production detection: " + info.data.get("name", "NO NAME FOUND"))
693
713
 
714
+ else:
715
+ raise ValueError(
716
+ "At least one test is REQUIRED for production detection: " + info.data.get("name", "NO NAME FOUND")
717
+ )
694
718
 
695
- #No issues - at least one test provided for production type requiring testing
719
+ # No issues - at least one test provided for production type requiring testing
696
720
  return v
697
-
721
+
698
722
  def all_tests_successful(self) -> bool:
699
723
  """
700
724
  Checks that all tests in the detection succeeded. If no tests are defined, consider that a
@@ -732,7 +756,7 @@ class Detection_Abstract(SecurityContentObject):
732
756
  detection_fields: list[str] = ["name", "search"],
733
757
  test_result_fields: list[str] = ["success", "message", "exception", "status", "duration", "wait_duration"],
734
758
  test_job_fields: list[str] = ["resultCount", "runDuration"],
735
- ) -> dict:
759
+ ) -> dict[str, Any]:
736
760
  """
737
761
  Aggregates a dictionary summarizing the detection model, including all test results
738
762
  :param detection_fields: the fields of the top level detection to gather
@@ -741,7 +765,7 @@ class Detection_Abstract(SecurityContentObject):
741
765
  :returns: a dict summary
742
766
  """
743
767
  # Init the summary dict
744
- summary_dict = {}
768
+ summary_dict: dict[str, Any] = {}
745
769
 
746
770
  # Grab the top level detection fields
747
771
  for field in detection_fields:
@@ -773,16 +797,18 @@ class Detection_Abstract(SecurityContentObject):
773
797
  result["message"] = "NO RESULT - Test not run"
774
798
 
775
799
  # Add the result to our list
776
- summary_dict["tests"].append(result)
800
+ summary_dict["tests"].append(result) # type: ignore
777
801
 
778
802
  # Return the summary
779
803
 
780
804
  return summary_dict
781
805
 
782
-
783
- def getMetadata(self)->dict[str,str]:
784
- return {'detection_id':str(self.id),
785
- 'deprecated':'1' if self.status==DetectionStatus.deprecated.value else '0',
786
- 'detection_version':str(self.version)}
787
-
788
-
806
+ def getMetadata(self) -> dict[str, str]:
807
+ # NOTE: we ignore the type error around self.status because we are using Pydantic's
808
+ # use_enum_values configuration
809
+ # https://docs.pydantic.dev/latest/api/config/#pydantic.config.ConfigDict.populate_by_name
810
+ return {
811
+ 'detection_id': str(self.id),
812
+ 'deprecated': '1' if self.status == DetectionStatus.deprecated.value else '0', # type: ignore
813
+ 'detection_version': str(self.version)
814
+ }