contentctl 4.3.5__py3-none-any.whl → 4.4.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (49) hide show
  1. contentctl/actions/build.py +1 -0
  2. contentctl/actions/detection_testing/GitService.py +10 -10
  3. contentctl/actions/detection_testing/infrastructures/DetectionTestingInfrastructure.py +68 -38
  4. contentctl/actions/detection_testing/infrastructures/DetectionTestingInfrastructureContainer.py +5 -1
  5. contentctl/actions/detection_testing/views/DetectionTestingViewWeb.py +10 -8
  6. contentctl/actions/inspect.py +6 -4
  7. contentctl/actions/new_content.py +10 -2
  8. contentctl/actions/release_notes.py +5 -3
  9. contentctl/actions/validate.py +2 -1
  10. contentctl/enrichments/cve_enrichment.py +6 -7
  11. contentctl/input/director.py +14 -12
  12. contentctl/input/new_content_questions.py +9 -42
  13. contentctl/objects/abstract_security_content_objects/detection_abstract.py +147 -7
  14. contentctl/objects/abstract_security_content_objects/security_content_object_abstract.py +17 -9
  15. contentctl/objects/base_test_result.py +7 -7
  16. contentctl/objects/baseline.py +12 -18
  17. contentctl/objects/baseline_tags.py +2 -5
  18. contentctl/objects/config.py +15 -9
  19. contentctl/objects/constants.py +30 -0
  20. contentctl/objects/correlation_search.py +79 -114
  21. contentctl/objects/dashboard.py +100 -0
  22. contentctl/objects/deployment.py +20 -5
  23. contentctl/objects/detection_tags.py +22 -20
  24. contentctl/objects/drilldown.py +70 -0
  25. contentctl/objects/enums.py +26 -22
  26. contentctl/objects/investigation.py +23 -15
  27. contentctl/objects/investigation_tags.py +4 -3
  28. contentctl/objects/lookup.py +8 -1
  29. contentctl/objects/macro.py +16 -7
  30. contentctl/objects/notable_event.py +6 -5
  31. contentctl/objects/risk_analysis_action.py +4 -4
  32. contentctl/objects/risk_event.py +8 -7
  33. contentctl/objects/story.py +4 -16
  34. contentctl/objects/throttling.py +46 -0
  35. contentctl/output/conf_output.py +4 -0
  36. contentctl/output/conf_writer.py +20 -3
  37. contentctl/output/templates/analyticstories_detections.j2 +2 -2
  38. contentctl/output/templates/analyticstories_investigations.j2 +5 -5
  39. contentctl/output/templates/analyticstories_stories.j2 +1 -1
  40. contentctl/output/templates/savedsearches_baselines.j2 +2 -3
  41. contentctl/output/templates/savedsearches_detections.j2 +12 -7
  42. contentctl/output/templates/savedsearches_investigations.j2 +3 -4
  43. contentctl/templates/detections/endpoint/anomalous_usage_of_7zip.yml +10 -1
  44. {contentctl-4.3.5.dist-info → contentctl-4.4.1.dist-info}/METADATA +3 -2
  45. {contentctl-4.3.5.dist-info → contentctl-4.4.1.dist-info}/RECORD +48 -46
  46. {contentctl-4.3.5.dist-info → contentctl-4.4.1.dist-info}/WHEEL +1 -1
  47. contentctl/output/templates/finding_report.j2 +0 -30
  48. {contentctl-4.3.5.dist-info → contentctl-4.4.1.dist-info}/LICENSE.md +0 -0
  49. {contentctl-4.3.5.dist-info → contentctl-4.4.1.dist-info}/entry_points.txt +0 -0
@@ -1,10 +1,11 @@
1
1
  import logging
2
2
  import time
3
3
  import json
4
- from typing import Union, Optional, Any
4
+ from typing import Any
5
5
  from enum import Enum
6
+ from functools import cached_property
6
7
 
7
- from pydantic import BaseModel, validator, Field, PrivateAttr
8
+ from pydantic import ConfigDict, BaseModel, computed_field, Field, PrivateAttr
8
9
  from splunklib.results import JSONResultsReader, Message # type: ignore
9
10
  from splunklib.binding import HTTPError, ResponseReader # type: ignore
10
11
  import splunklib.client as splunklib # type: ignore
@@ -15,7 +16,7 @@ from contentctl.objects.notable_action import NotableAction
15
16
  from contentctl.objects.base_test_result import TestResultStatus
16
17
  from contentctl.objects.integration_test_result import IntegrationTestResult
17
18
  from contentctl.actions.detection_testing.progress_bar import (
18
- format_pbar_string,
19
+ format_pbar_string, # type: ignore
19
20
  TestReportingType,
20
21
  TestingStates
21
22
  )
@@ -178,13 +179,14 @@ class PbarData(BaseModel):
178
179
  :param fq_test_name: the fully qualifed (fq) test name ("<detection_name>:<test_name>") used for logging
179
180
  :param start_time: the start time used for logging
180
181
  """
181
- pbar: tqdm
182
+ pbar: tqdm # type: ignore
182
183
  fq_test_name: str
183
184
  start_time: float
184
185
 
185
- class Config:
186
- # needed to support the tqdm type
187
- arbitrary_types_allowed = True
186
+ # needed to support the tqdm type
187
+ model_config = ConfigDict(
188
+ arbitrary_types_allowed=True
189
+ )
188
190
 
189
191
 
190
192
  class CorrelationSearch(BaseModel):
@@ -197,143 +199,110 @@ class CorrelationSearch(BaseModel):
197
199
  :param pbar_data: the encapsulated info needed for logging w/ pbar
198
200
  :param test_index: the index attack data is forwarded to for testing (optionally used in cleanup)
199
201
  """
200
- ## The following three fields are explicitly needed at instantiation # noqa: E266
201
-
202
202
  # the detection associated with the correlation search (e.g. "Windows Modify Registry EnableLinkedConnections")
203
- detection: Detection
203
+ detection: Detection = Field(...)
204
204
 
205
205
  # a Service instance representing a connection to a Splunk instance
206
- service: splunklib.Service
206
+ service: splunklib.Service = Field(...)
207
207
 
208
208
  # the encapsulated info needed for logging w/ pbar
209
- pbar_data: PbarData
210
-
211
- ## The following field is optional for instantiation # noqa: E266
209
+ pbar_data: PbarData = Field(...)
212
210
 
213
211
  # The index attack data is sent to; can be None if we are relying on the caller to do our
214
212
  # cleanup of this index
215
- test_index: Optional[str] = Field(default=None, min_length=1)
216
-
217
- ## All remaining fields can be derived from other fields or have intentional defaults that # noqa: E266
218
- ## should not be changed (validators should prevent instantiating some of these fields directly # noqa: E266
219
- ## to prevent undefined behavior) # noqa: E266
213
+ test_index: str | None = Field(default=None, min_length=1)
220
214
 
221
215
  # The logger to use (logs all go to a null pipe unless ENABLE_LOGGING is set to True, so as not
222
216
  # to conflict w/ tqdm)
223
- logger: logging.Logger = Field(default_factory=get_logger)
224
-
225
- # The search name (e.g. "ESCU - Windows Modify Registry EnableLinkedConnections - Rule")
226
- name: Optional[str] = None
227
-
228
- # The path to the saved search on the Splunk instance
229
- splunk_path: Optional[str] = None
230
-
231
- # A model of the saved search as provided by splunklib
232
- saved_search: Optional[splunklib.SavedSearch] = None
217
+ logger: logging.Logger = Field(default_factory=get_logger, init=False)
233
218
 
234
219
  # The set of indexes to clear on cleanup
235
- indexes_to_purge: set[str] = set()
220
+ indexes_to_purge: set[str] = Field(default=set(), init=False)
236
221
 
237
222
  # The risk analysis adaptive response action (if defined)
238
- risk_analysis_action: Union[RiskAnalysisAction, None] = None
223
+ _risk_analysis_action: RiskAnalysisAction | None = PrivateAttr(default=None)
239
224
 
240
225
  # The notable adaptive response action (if defined)
241
- notable_action: Union[NotableAction, None] = None
226
+ _notable_action: NotableAction | None = PrivateAttr(default=None)
242
227
 
243
228
  # The list of risk events found
244
- _risk_events: Optional[list[RiskEvent]] = PrivateAttr(default=None)
229
+ _risk_events: list[RiskEvent] | None = PrivateAttr(default=None)
245
230
 
246
231
  # The list of notable events found
247
- _notable_events: Optional[list[NotableEvent]] = PrivateAttr(default=None)
232
+ _notable_events: list[NotableEvent] | None = PrivateAttr(default=None)
248
233
 
249
- class Config:
250
- # needed to allow fields w/ types like SavedSearch
251
- arbitrary_types_allowed = True
252
- # We want to have more ridgid typing
253
- extra = 'forbid'
234
+ # Need arbitrary types to allow fields w/ types like SavedSearch; we also want to forbid
235
+ # unexpected fields
236
+ model_config = ConfigDict(
237
+ arbitrary_types_allowed=True,
238
+ extra='forbid'
239
+ )
254
240
 
255
- @validator("name", always=True)
256
- @classmethod
257
- def _convert_detection_to_search_name(cls, v, values) -> str:
258
- """
259
- Validate name and derive if None
260
- """
261
- if "detection" not in values:
262
- raise ValueError("detection missing; name is dependent on detection")
241
+ def model_post_init(self, __context: Any) -> None:
242
+ super().model_post_init(__context)
263
243
 
264
- expected_name = f"ESCU - {values['detection'].name} - Rule"
265
- if v is not None and v != expected_name:
266
- raise ValueError(
267
- "name must be derived from detection; leave as None and it will be derived automatically"
268
- )
269
- return expected_name
244
+ # Parse the initial values for the risk/notable actions
245
+ self._parse_risk_and_notable_actions()
270
246
 
271
- @validator("splunk_path", always=True)
272
- @classmethod
273
- def _derive_splunk_path(cls, v, values) -> str:
247
+ @computed_field
248
+ @cached_property
249
+ def name(self) -> str:
274
250
  """
275
- Validate splunk_path and derive if None
251
+ The search name (e.g. "ESCU - Windows Modify Registry EnableLinkedConnections - Rule")
252
+
253
+ :returns: the search name
254
+ :rtype: str
276
255
  """
277
- if "name" not in values:
278
- raise ValueError("name missing; splunk_path is dependent on name")
256
+ return f"ESCU - {self.detection.name} - Rule"
279
257
 
280
- expected_path = f"saved/searches/{values['name']}"
281
- if v is not None and v != expected_path:
282
- raise ValueError(
283
- "splunk_path must be derived from name; leave as None and it will be derived automatically"
284
- )
285
- return f"saved/searches/{values['name']}"
258
+ @computed_field
259
+ @cached_property
260
+ def splunk_path(self) -> str:
261
+ """
262
+ The path to the saved search on the Splunk instance
286
263
 
287
- @validator("saved_search", always=True)
288
- @classmethod
289
- def _instantiate_saved_search(cls, v, values) -> str:
264
+ :returns: the search path
265
+ :rtype: str
290
266
  """
291
- Ensure saved_search was initialized as None and derive
267
+ return f"/saved/searches/{self.name}"
268
+
269
+ @computed_field
270
+ @cached_property
271
+ def saved_search(self) -> splunklib.SavedSearch:
292
272
  """
293
- if "splunk_path" not in values or "service" not in values:
294
- raise ValueError("splunk_path or service missing; saved_search is dependent on both")
273
+ A model of the saved search as provided by splunklib
295
274
 
296
- if v is not None:
297
- raise ValueError(
298
- "saved_search must be derived from the service and splunk_path; leave as None and it will be derived "
299
- "automatically"
300
- )
275
+ :returns: the SavedSearch object
276
+ :rtype: :class:`splunklib.client.SavedSearch`
277
+ """
301
278
  return splunklib.SavedSearch(
302
- values['service'],
303
- values['splunk_path'],
279
+ self.service,
280
+ self.splunk_path,
304
281
  )
305
282
 
306
- @validator("risk_analysis_action", always=True)
307
- @classmethod
308
- def _init_risk_analysis_action(cls, v, values) -> Optional[RiskAnalysisAction]:
309
- """
310
- Initialize risk_analysis_action
283
+ # TODO (cmcginley): need to make this refreshable
284
+ @computed_field
285
+ @property
286
+ def risk_analysis_action(self) -> RiskAnalysisAction | None:
311
287
  """
312
- if "saved_search" not in values:
313
- raise ValueError("saved_search missing; risk_analysis_action is dependent on saved_search")
288
+ The risk analysis adaptive response action (if defined)
314
289
 
315
- if v is not None:
316
- raise ValueError(
317
- "risk_analysis_action must be derived from the saved_search; leave as None and it will be derived "
318
- "automatically"
319
- )
320
- return CorrelationSearch._get_risk_analysis_action(values['saved_search'].content)
321
-
322
- @validator("notable_action", always=True)
323
- @classmethod
324
- def _init_notable_action(cls, v, values) -> Optional[NotableAction]:
290
+ :returns: the RiskAnalysisAction object, if it exists
291
+ :rtype: :class:`contentctl.objects.risk_analysis_action.RiskAnalysisAction` | None
325
292
  """
326
- Initialize notable_action
293
+ return self._risk_analysis_action
294
+
295
+ # TODO (cmcginley): need to make this refreshable
296
+ @computed_field
297
+ @property
298
+ def notable_action(self) -> NotableAction | None:
327
299
  """
328
- if "saved_search" not in values:
329
- raise ValueError("saved_search missing; notable_action is dependent on saved_search")
300
+ The notable adaptive response action (if defined)
330
301
 
331
- if v is not None:
332
- raise ValueError(
333
- "notable_action must be derived from the saved_search; leave as None and it will be derived "
334
- "automatically"
335
- )
336
- return CorrelationSearch._get_notable_action(values['saved_search'].content)
302
+ :returns: the NotableAction object, if it exists
303
+ :rtype: :class:`contentctl.objects.notable_action.NotableAction` | None
304
+ """
305
+ return self._notable_action
337
306
 
338
307
  @property
339
308
  def earliest_time(self) -> str:
@@ -393,7 +362,7 @@ class CorrelationSearch(BaseModel):
393
362
  return self.notable_action is not None
394
363
 
395
364
  @staticmethod
396
- def _get_risk_analysis_action(content: dict[str, Any]) -> Optional[RiskAnalysisAction]:
365
+ def _get_risk_analysis_action(content: dict[str, Any]) -> RiskAnalysisAction | None:
397
366
  """
398
367
  Given the saved search content, parse the risk analysis action
399
368
  :param content: a dict of strings to values
@@ -407,7 +376,7 @@ class CorrelationSearch(BaseModel):
407
376
  return None
408
377
 
409
378
  @staticmethod
410
- def _get_notable_action(content: dict[str, Any]) -> Optional[NotableAction]:
379
+ def _get_notable_action(content: dict[str, Any]) -> NotableAction | None:
411
380
  """
412
381
  Given the saved search content, parse the notable action
413
382
  :param content: a dict of strings to values
@@ -431,10 +400,6 @@ class CorrelationSearch(BaseModel):
431
400
  relevant.append(observable)
432
401
  return relevant
433
402
 
434
- # TODO (PEX-484): ideally, we could handle this and the following init w/ a call to
435
- # model_post_init, so that all the logic is encapsulated w/in _parse_risk_and_notable_actions
436
- # but that is a pydantic v2 feature (see the init validators for risk/notable actions):
437
- # https://docs.pydantic.dev/latest/api/base_model/#pydantic.main.BaseModel.model_post_init
438
403
  def _parse_risk_and_notable_actions(self) -> None:
439
404
  """Parses the risk/notable metadata we care about from self.saved_search.content
440
405
 
@@ -445,12 +410,12 @@ class CorrelationSearch(BaseModel):
445
410
  unpacked to be anything other than a singleton
446
411
  """
447
412
  # grab risk details if present
448
- self.risk_analysis_action = CorrelationSearch._get_risk_analysis_action(
413
+ self._risk_analysis_action = CorrelationSearch._get_risk_analysis_action(
449
414
  self.saved_search.content # type: ignore
450
415
  )
451
416
 
452
417
  # grab notable details if present
453
- self.notable_action = CorrelationSearch._get_notable_action(self.saved_search.content) # type: ignore
418
+ self._notable_action = CorrelationSearch._get_notable_action(self.saved_search.content) # type: ignore
454
419
 
455
420
  def refresh(self) -> None:
456
421
  """Refreshes the metadata in the SavedSearch entity, and re-parses the fields we care about
@@ -738,7 +703,7 @@ class CorrelationSearch(BaseModel):
738
703
  # TODO (#250): Re-enable and refactor code that validates the specific risk counts
739
704
  # Validate risk events in aggregate; we should have an equal amount of risk events for each
740
705
  # relevant observable, and the total count should match the total number of events
741
- # individual_count: Optional[int] = None
706
+ # individual_count: int | None = None
742
707
  # total_count = 0
743
708
  # for observable_str in observable_counts:
744
709
  # self.logger.debug(
@@ -802,7 +767,7 @@ class CorrelationSearch(BaseModel):
802
767
  )
803
768
 
804
769
  # initialize result as None
805
- result: Optional[IntegrationTestResult] = None
770
+ result: IntegrationTestResult | None = None
806
771
 
807
772
  # keep track of time slept and number of attempts for exponential backoff (base 2)
808
773
  elapsed_sleep_time = 0
@@ -0,0 +1,100 @@
1
+ from typing import Any
2
+ from pydantic import Field, Json, model_validator
3
+
4
+ import pathlib
5
+ from jinja2 import Environment
6
+ import json
7
+ from contentctl.objects.security_content_object import SecurityContentObject
8
+ from contentctl.objects.config import build
9
+ from enum import StrEnum
10
+
11
+ DEFAULT_DASHBAORD_JINJA2_TEMPLATE = '''<dashboard version="2" theme="{{ dashboard.theme }}">
12
+ <label>{{ dashboard.label(config) }}</label>
13
+ <description></description>
14
+ <definition><![CDATA[
15
+ {{ dashboard.pretty_print_json_obj() }}
16
+ ]]></definition>
17
+ <meta type="hiddenElements"><![CDATA[
18
+ {
19
+ "hideEdit": false,
20
+ "hideOpenInSearch": false,
21
+ "hideExport": false
22
+ }
23
+ ]]></meta>
24
+ </dashboard>'''
25
+
26
+ class DashboardTheme(StrEnum):
27
+ light = "light"
28
+ dark = "dark"
29
+
30
+ class Dashboard(SecurityContentObject):
31
+ j2_template: str = Field(default=DEFAULT_DASHBAORD_JINJA2_TEMPLATE, description="Jinja2 Template used to construct the dashboard")
32
+ description: str = Field(...,description="A description of the dashboard. This does not have to match "
33
+ "the description of the dashboard in the JSON file.", max_length=10000)
34
+ theme: DashboardTheme = Field(default=DashboardTheme.light, description="The theme of the dashboard. Choose between 'light' and 'dark'.")
35
+ json_obj: Json[dict[str,Any]] = Field(..., description="Valid JSON object that describes the dashboard")
36
+
37
+
38
+
39
+ def label(self, config:build)->str:
40
+ return f"{config.app.label} - {self.name}"
41
+
42
+ @model_validator(mode="before")
43
+ @classmethod
44
+ def validate_fields_from_json(cls, data:Any)->Any:
45
+ yml_file_name:str|None = data.get("file_path", None)
46
+ if yml_file_name is None:
47
+ raise ValueError("File name not passed to dashboard constructor")
48
+ yml_file_path = pathlib.Path(yml_file_name)
49
+ json_file_path = yml_file_path.with_suffix(".json")
50
+
51
+ if not json_file_path.is_file():
52
+ raise ValueError(f"Required file {json_file_path} does not exist.")
53
+
54
+ with open(json_file_path,'r') as jsonFilePointer:
55
+ try:
56
+ json_obj:dict[str,Any] = json.load(jsonFilePointer)
57
+ except Exception as e:
58
+ raise ValueError(f"Unable to load data from {json_file_path}: {str(e)}")
59
+
60
+ name_from_file = data.get("name",None)
61
+ name_from_json = json_obj.get("title",None)
62
+
63
+ errors:list[str] = []
64
+ if name_from_json is None:
65
+ errors.append(f"'title' field is missing from {json_file_path}")
66
+ elif name_from_json != name_from_file:
67
+ errors.append(f"The 'title' field in the JSON file [{json_file_path}] does not match the 'name' field in the YML object [{yml_file_path}]. These two MUST match:\n "
68
+ f"title in JSON : {name_from_json}\n "
69
+ f"title in YML : {name_from_file}\n ")
70
+
71
+ description_from_json = json_obj.get("description",None)
72
+ if description_from_json is None:
73
+ errors.append("'description' field is missing from field 'json_object'")
74
+
75
+ if len(errors) > 0 :
76
+ err_string = "\n - ".join(errors)
77
+ raise ValueError(f"Error(s) validating dashboard:\n - {err_string}")
78
+
79
+ data['name'] = name_from_file
80
+ data['json_obj'] = json.dumps(json_obj)
81
+ return data
82
+
83
+
84
+ def pretty_print_json_obj(self):
85
+ return json.dumps(self.json_obj, indent=4)
86
+
87
+ def getOutputFilepathRelativeToAppRoot(self, config:build)->pathlib.Path:
88
+ filename = f"{self.file_path.stem}.xml".lower()
89
+ return pathlib.Path("default/data/ui/views")/filename
90
+
91
+
92
+ def writeDashboardFile(self, j2_env:Environment, config:build):
93
+ template = j2_env.from_string(self.j2_template)
94
+ dashboard_text = template.render(config=config, dashboard=self)
95
+
96
+ with open(config.getPackageDirectoryPath()/self.getOutputFilepathRelativeToAppRoot(config), 'a') as f:
97
+ output_xml = dashboard_text.encode('utf-8', 'ignore').decode('utf-8')
98
+ f.write(output_xml)
99
+
100
+
@@ -1,7 +1,8 @@
1
1
  from __future__ import annotations
2
- from pydantic import Field, computed_field, model_validator,ValidationInfo, model_serializer
3
- from typing import Optional,Any
4
-
2
+ from pydantic import Field, computed_field,ValidationInfo, model_serializer, NonNegativeInt
3
+ from typing import Any
4
+ import uuid
5
+ import datetime
5
6
  from contentctl.objects.security_content_object import SecurityContentObject
6
7
  from contentctl.objects.deployment_scheduling import DeploymentScheduling
7
8
  from contentctl.objects.alert_action import AlertAction
@@ -15,9 +16,13 @@ class Deployment(SecurityContentObject):
15
16
  #author: str = None
16
17
  #description: str = None
17
18
  #contentType: SecurityContentType = SecurityContentType.deployments
19
+
20
+
18
21
  scheduling: DeploymentScheduling = Field(...)
19
22
  alert_action: AlertAction = AlertAction()
20
23
  type: DeploymentType = Field(...)
24
+ author: str = Field(...,max_length=255)
25
+ version: NonNegativeInt = 1
21
26
 
22
27
  #Type was the only tag exposed and should likely be removed/refactored.
23
28
  #For transitional reasons, provide this as a computed_field in prep for removal
@@ -25,7 +30,8 @@ class Deployment(SecurityContentObject):
25
30
  @property
26
31
  def tags(self)->dict[str,DeploymentType]:
27
32
  return {"type": self.type}
28
-
33
+
34
+
29
35
  @staticmethod
30
36
  def getDeployment(v:dict[str,Any], info:ValidationInfo)->Deployment:
31
37
  if v != {}:
@@ -36,8 +42,17 @@ class Deployment(SecurityContentObject):
36
42
  detection_name = info.data.get("name", None)
37
43
  if detection_name is None:
38
44
  raise ValueError("Could not create inline deployment - Baseline or Detection lacking 'name' field,")
45
+
46
+ # Add a number of static values
47
+ v.update({
48
+ 'name': f"{detection_name} - Inline Deployment",
49
+ 'id':uuid.uuid4(),
50
+ 'date': datetime.date.today(),
51
+ 'description': "Inline deployment created at runtime.",
52
+ 'author': "contentctl tool"
53
+ })
54
+
39
55
 
40
- v['name'] = f"{detection_name} - Inline Deployment"
41
56
  # This constructs a temporary in-memory deployment,
42
57
  # allowing the deployment to be easily defined in the
43
58
  # detection on a per detection basis.
@@ -16,6 +16,7 @@ from pydantic import (
16
16
  model_validator
17
17
  )
18
18
  from contentctl.objects.story import Story
19
+ from contentctl.objects.throttling import Throttling
19
20
  if TYPE_CHECKING:
20
21
  from contentctl.input.director import DirectorOutputDto
21
22
 
@@ -29,7 +30,6 @@ from contentctl.objects.enums import (
29
30
  RiskSeverity,
30
31
  KillChainPhase,
31
32
  NistCategory,
32
- RiskLevel,
33
33
  SecurityContentProductName
34
34
  )
35
35
  from contentctl.objects.atomic import AtomicEnrichment, AtomicTest
@@ -49,6 +49,23 @@ class DetectionTags(BaseModel):
49
49
  @property
50
50
  def risk_score(self) -> int:
51
51
  return round((self.confidence * self.impact)/100)
52
+
53
+ @computed_field
54
+ @property
55
+ def severity(self)->RiskSeverity:
56
+ if 0 <= self.risk_score <= 20:
57
+ return RiskSeverity.INFORMATIONAL
58
+ elif 20 < self.risk_score <= 40:
59
+ return RiskSeverity.LOW
60
+ elif 40 < self.risk_score <= 60:
61
+ return RiskSeverity.MEDIUM
62
+ elif 60 < self.risk_score <= 80:
63
+ return RiskSeverity.HIGH
64
+ elif 80 < self.risk_score <= 100:
65
+ return RiskSeverity.CRITICAL
66
+ else:
67
+ raise Exception(f"Error getting severity - risk_score must be between 0-100, but was actually {self.risk_score}")
68
+
52
69
 
53
70
  mitre_attack_id: List[MITRE_ATTACK_ID_TYPE] = []
54
71
  nist: list[NistCategory] = []
@@ -58,31 +75,16 @@ class DetectionTags(BaseModel):
58
75
  message: str = Field(...)
59
76
  product: list[SecurityContentProductName] = Field(..., min_length=1)
60
77
  required_fields: list[str] = Field(min_length=1)
61
-
78
+ throttling: Optional[Throttling] = None
62
79
  security_domain: SecurityDomain = Field(...)
63
-
64
- @computed_field
65
- @property
66
- def risk_severity(self) -> RiskSeverity:
67
- if self.risk_score >= 80:
68
- return RiskSeverity('high')
69
- elif (self.risk_score >= 50 and self.risk_score <= 79):
70
- return RiskSeverity('medium')
71
- else:
72
- return RiskSeverity('low')
73
-
74
80
  cve: List[CVE_TYPE] = []
75
81
  atomic_guid: List[AtomicTest] = []
76
- drilldown_search: Optional[str] = None
82
+
77
83
 
78
84
  # enrichment
79
85
  mitre_attack_enrichments: List[MitreAttackEnrichment] = Field([], validate_default=True)
80
86
  confidence_id: Optional[PositiveInt] = Field(None, ge=1, le=3)
81
87
  impact_id: Optional[PositiveInt] = Field(None, ge=1, le=5)
82
- # context_ids: list = None
83
- risk_level_id: Optional[NonNegativeInt] = Field(None, le=4)
84
- risk_level: Optional[RiskLevel] = None
85
- # observable_str: str = None
86
88
  evidence_str: Optional[str] = None
87
89
 
88
90
  @computed_field
@@ -112,7 +114,7 @@ class DetectionTags(BaseModel):
112
114
 
113
115
  # TODO (#268): Validate manual_test has length > 0 if not None
114
116
  manual_test: Optional[str] = None
115
-
117
+
116
118
  # The following validator is temporarily disabled pending further discussions
117
119
  # @validator('message')
118
120
  # def validate_message(cls,v,values):
@@ -158,7 +160,7 @@ class DetectionTags(BaseModel):
158
160
  "message": self.message,
159
161
  "risk_score": self.risk_score,
160
162
  "security_domain": self.security_domain,
161
- "risk_severity": self.risk_severity,
163
+ "risk_severity": self.severity,
162
164
  "mitre_attack_id": self.mitre_attack_id,
163
165
  "mitre_attack_enrichments": self.mitre_attack_enrichments
164
166
  }
@@ -0,0 +1,70 @@
1
+ from __future__ import annotations
2
+ from pydantic import BaseModel, Field, model_serializer
3
+ from typing import TYPE_CHECKING
4
+ if TYPE_CHECKING:
5
+ from contentctl.objects.detection import Detection
6
+ from contentctl.objects.enums import AnalyticsType
7
+ DRILLDOWN_SEARCH_PLACEHOLDER = "%original_detection_search%"
8
+ EARLIEST_OFFSET = "$info_min_time$"
9
+ LATEST_OFFSET = "$info_max_time$"
10
+ RISK_SEARCH = "index = risk starthoursago = 168 endhoursago = 0 | stats count values(search_name) values(risk_message) values(analyticstories) values(annotations._all) values(annotations.mitre_attack.mitre_tactic) "
11
+
12
+ class Drilldown(BaseModel):
13
+ name: str = Field(..., description="The name of the drilldown search", min_length=5)
14
+ search: str = Field(..., description="The text of a drilldown search. This must be valid SPL.", min_length=1)
15
+ earliest_offset:None | str = Field(...,
16
+ description="Earliest offset time for the drilldown search. "
17
+ f"The most common value for this field is '{EARLIEST_OFFSET}', "
18
+ "but it is NOT the default value and must be supplied explicitly.",
19
+ min_length= 1)
20
+ latest_offset:None | str = Field(...,
21
+ description="Latest offset time for the driolldown search. "
22
+ f"The most common value for this field is '{LATEST_OFFSET}', "
23
+ "but it is NOT the default value and must be supplied explicitly.",
24
+ min_length= 1)
25
+
26
+ @classmethod
27
+ def constructDrilldownsFromDetection(cls, detection: Detection) -> list[Drilldown]:
28
+ victim_observables = [o for o in detection.tags.observable if o.role[0] == "Victim"]
29
+ if len(victim_observables) == 0 or detection.type == AnalyticsType.Hunting:
30
+ # No victims, so no drilldowns
31
+ return []
32
+ print(f"Adding default drilldowns for [{detection.name}]")
33
+ variableNamesString = ' and '.join([f"${o.name}$" for o in victim_observables])
34
+ nameField = f"View the detection results for {variableNamesString}"
35
+ appendedSearch = " | search " + ' '.join([f"{o.name} = ${o.name}$" for o in victim_observables])
36
+ search_field = f"{detection.search}{appendedSearch}"
37
+ detection_results = cls(name=nameField, earliest_offset=EARLIEST_OFFSET, latest_offset=LATEST_OFFSET, search=search_field)
38
+
39
+
40
+ nameField = f"View risk events for the last 7 days for {variableNamesString}"
41
+ fieldNamesListString = ', '.join([o.name for o in victim_observables])
42
+ search_field = f"{RISK_SEARCH}by {fieldNamesListString} {appendedSearch}"
43
+ risk_events_last_7_days = cls(name=nameField, earliest_offset=None, latest_offset=None, search=search_field)
44
+
45
+ return [detection_results,risk_events_last_7_days]
46
+
47
+
48
+ def perform_search_substitutions(self, detection:Detection)->None:
49
+ """Replaces the field DRILLDOWN_SEARCH_PLACEHOLDER (%original_detection_search%)
50
+ with the search contained in the detection. We do this so that the YML does not
51
+ need the search copy/pasted from the search field into the drilldown object.
52
+
53
+ Args:
54
+ detection (Detection): Detection to be used to update the search field of the drilldown
55
+ """
56
+ self.search = self.search.replace(DRILLDOWN_SEARCH_PLACEHOLDER, detection.search)
57
+
58
+
59
+ @model_serializer
60
+ def serialize_model(self) -> dict[str,str]:
61
+ #Call serializer for parent
62
+ model:dict[str,str] = {}
63
+
64
+ model['name'] = self.name
65
+ model['search'] = self.search
66
+ if self.earliest_offset is not None:
67
+ model['earliest_offset'] = self.earliest_offset
68
+ if self.latest_offset is not None:
69
+ model['latest_offset'] = self.latest_offset
70
+ return model