contentctl 4.3.4__py3-none-any.whl → 4.4.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- contentctl/actions/build.py +1 -0
- contentctl/actions/detection_testing/GitService.py +10 -10
- contentctl/actions/detection_testing/infrastructures/DetectionTestingInfrastructure.py +68 -38
- contentctl/actions/detection_testing/infrastructures/DetectionTestingInfrastructureContainer.py +5 -1
- contentctl/actions/detection_testing/views/DetectionTestingViewWeb.py +10 -8
- contentctl/actions/initialize.py +28 -12
- contentctl/actions/inspect.py +191 -91
- contentctl/actions/new_content.py +10 -2
- contentctl/actions/validate.py +3 -6
- contentctl/api.py +1 -1
- contentctl/contentctl.py +3 -0
- contentctl/enrichments/attack_enrichment.py +49 -81
- contentctl/enrichments/cve_enrichment.py +6 -7
- contentctl/helper/splunk_app.py +141 -10
- contentctl/input/director.py +19 -24
- contentctl/input/new_content_questions.py +9 -42
- contentctl/objects/abstract_security_content_objects/detection_abstract.py +155 -13
- contentctl/objects/abstract_security_content_objects/security_content_object_abstract.py +17 -9
- contentctl/objects/atomic.py +51 -77
- contentctl/objects/base_test_result.py +7 -7
- contentctl/objects/baseline.py +12 -18
- contentctl/objects/baseline_tags.py +2 -5
- contentctl/objects/config.py +154 -26
- contentctl/objects/constants.py +34 -1
- contentctl/objects/correlation_search.py +79 -114
- contentctl/objects/dashboard.py +100 -0
- contentctl/objects/deployment.py +20 -5
- contentctl/objects/detection_metadata.py +71 -0
- contentctl/objects/detection_stanza.py +79 -0
- contentctl/objects/detection_tags.py +28 -26
- contentctl/objects/drilldown.py +70 -0
- contentctl/objects/enums.py +26 -24
- contentctl/objects/errors.py +187 -0
- contentctl/objects/investigation.py +23 -15
- contentctl/objects/investigation_tags.py +4 -3
- contentctl/objects/lookup.py +8 -1
- contentctl/objects/macro.py +16 -7
- contentctl/objects/notable_event.py +6 -5
- contentctl/objects/risk_analysis_action.py +4 -4
- contentctl/objects/risk_event.py +8 -7
- contentctl/objects/savedsearches_conf.py +196 -0
- contentctl/objects/story.py +4 -16
- contentctl/objects/throttling.py +46 -0
- contentctl/output/conf_output.py +4 -0
- contentctl/output/conf_writer.py +24 -4
- contentctl/output/new_content_yml_output.py +4 -9
- contentctl/output/templates/analyticstories_detections.j2 +2 -2
- contentctl/output/templates/analyticstories_investigations.j2 +5 -5
- contentctl/output/templates/analyticstories_stories.j2 +1 -1
- contentctl/output/templates/savedsearches_baselines.j2 +2 -3
- contentctl/output/templates/savedsearches_detections.j2 +12 -7
- contentctl/output/templates/savedsearches_investigations.j2 +3 -4
- contentctl/templates/detections/endpoint/anomalous_usage_of_7zip.yml +10 -1
- {contentctl-4.3.4.dist-info → contentctl-4.4.0.dist-info}/METADATA +6 -5
- {contentctl-4.3.4.dist-info → contentctl-4.4.0.dist-info}/RECORD +58 -57
- {contentctl-4.3.4.dist-info → contentctl-4.4.0.dist-info}/WHEEL +1 -1
- contentctl/objects/ssa_detection.py +0 -157
- contentctl/objects/ssa_detection_tags.py +0 -138
- contentctl/objects/unit_test_old.py +0 -10
- contentctl/objects/unit_test_ssa.py +0 -31
- contentctl/output/templates/finding_report.j2 +0 -30
- {contentctl-4.3.4.dist-info → contentctl-4.4.0.dist-info}/LICENSE.md +0 -0
- {contentctl-4.3.4.dist-info → contentctl-4.4.0.dist-info}/entry_points.txt +0 -0
contentctl/objects/config.py
CHANGED
|
@@ -1,26 +1,31 @@
|
|
|
1
1
|
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from os import environ
|
|
4
|
+
from datetime import datetime, UTC
|
|
5
|
+
from typing import Optional, Any, List, Union, Self
|
|
6
|
+
import random
|
|
7
|
+
from enum import StrEnum, auto
|
|
8
|
+
import pathlib
|
|
9
|
+
from urllib.parse import urlparse
|
|
10
|
+
from abc import ABC, abstractmethod
|
|
11
|
+
from functools import partialmethod
|
|
12
|
+
|
|
13
|
+
import tqdm
|
|
14
|
+
import semantic_version
|
|
2
15
|
from pydantic import (
|
|
3
16
|
BaseModel, Field, field_validator,
|
|
4
17
|
field_serializer, ConfigDict, DirectoryPath,
|
|
5
18
|
PositiveInt, FilePath, HttpUrl, AnyUrl, model_validator,
|
|
6
19
|
ValidationInfo
|
|
7
20
|
)
|
|
21
|
+
|
|
22
|
+
from contentctl.objects.constants import DOWNLOADS_DIRECTORY
|
|
8
23
|
from contentctl.output.yml_writer import YmlWriter
|
|
9
|
-
from os import environ
|
|
10
|
-
from datetime import datetime, UTC
|
|
11
|
-
from typing import Optional,Any,Annotated,List,Union, Self
|
|
12
|
-
import semantic_version
|
|
13
|
-
import random
|
|
14
|
-
from enum import StrEnum, auto
|
|
15
|
-
import pathlib
|
|
16
24
|
from contentctl.helper.utils import Utils
|
|
17
|
-
from urllib.parse import urlparse
|
|
18
|
-
from abc import ABC, abstractmethod
|
|
19
25
|
from contentctl.objects.enums import PostTestBehavior, DetectionTestingMode
|
|
20
26
|
from contentctl.objects.detection import Detection
|
|
21
27
|
from contentctl.objects.annotated_types import APPID_TYPE
|
|
22
|
-
import
|
|
23
|
-
from functools import partialmethod
|
|
28
|
+
from contentctl.helper.splunk_app import SplunkApp
|
|
24
29
|
|
|
25
30
|
ENTERPRISE_SECURITY_UID = 263
|
|
26
31
|
COMMON_INFORMATION_MODEL_UID = 1621
|
|
@@ -153,8 +158,7 @@ class CustomApp(App_Base):
|
|
|
153
158
|
str(destination),
|
|
154
159
|
verbose_print=True)
|
|
155
160
|
return str(destination)
|
|
156
|
-
|
|
157
|
-
|
|
161
|
+
|
|
158
162
|
# TODO (#266): disable the use_enum_values configuration
|
|
159
163
|
class Config_Base(BaseModel):
|
|
160
164
|
model_config = ConfigDict(use_enum_values=True,validate_default=True, arbitrary_types_allowed=True)
|
|
@@ -171,7 +175,13 @@ class Config_Base(BaseModel):
|
|
|
171
175
|
return str(path)
|
|
172
176
|
|
|
173
177
|
class init(Config_Base):
|
|
174
|
-
|
|
178
|
+
model_config = ConfigDict(use_enum_values=True,validate_default=True, arbitrary_types_allowed=True)
|
|
179
|
+
bare: bool = Field(default=False, description="contentctl normally provides some some example content "
|
|
180
|
+
"(macros, stories, data_sources, and/or analytic stories). This option disables "
|
|
181
|
+
"initialization with that additional contnet. Note that even if --bare is used, it "
|
|
182
|
+
"init will still create the directory structure of the app, "
|
|
183
|
+
"include the app_template directory with default content, and content in "
|
|
184
|
+
"the deployment/ directory (since it is not yet easily customizable).")
|
|
175
185
|
|
|
176
186
|
|
|
177
187
|
# TODO (#266): disable the use_enum_values configuration
|
|
@@ -185,8 +195,45 @@ class validate(Config_Base):
|
|
|
185
195
|
build_api: bool = Field(default=False, description="Should api objects be built and output in the build_path?")
|
|
186
196
|
data_source_TA_validation: bool = Field(default=False, description="Validate latest TA information from Splunkbase")
|
|
187
197
|
|
|
188
|
-
|
|
189
|
-
|
|
198
|
+
@property
|
|
199
|
+
def external_repos_path(self)->pathlib.Path:
|
|
200
|
+
return self.path/"external_repos"
|
|
201
|
+
|
|
202
|
+
@property
|
|
203
|
+
def mitre_cti_repo_path(self)->pathlib.Path:
|
|
204
|
+
return self.external_repos_path/"cti"
|
|
205
|
+
|
|
206
|
+
@property
|
|
207
|
+
def atomic_red_team_repo_path(self):
|
|
208
|
+
return self.external_repos_path/"atomic-red-team"
|
|
209
|
+
|
|
210
|
+
@model_validator(mode="after")
|
|
211
|
+
def ensureEnrichmentReposPresent(self)->Self:
|
|
212
|
+
'''
|
|
213
|
+
Ensures that the enrichments repos, the atomic red team repo and the
|
|
214
|
+
mitre attack enrichment repo, are present at the inded path.
|
|
215
|
+
Raises a detailed exception if either of these are not present
|
|
216
|
+
when enrichments are enabled.
|
|
217
|
+
'''
|
|
218
|
+
if not self.enrichments:
|
|
219
|
+
return self
|
|
220
|
+
# If enrichments are enabled, ensure that all of the
|
|
221
|
+
# enrichment directories exist
|
|
222
|
+
missing_repos:list[str] = []
|
|
223
|
+
if not self.atomic_red_team_repo_path.is_dir():
|
|
224
|
+
missing_repos.append(f"https://github.com/redcanaryco/atomic-red-team {self.atomic_red_team_repo_path}")
|
|
225
|
+
|
|
226
|
+
if not self.mitre_cti_repo_path.is_dir():
|
|
227
|
+
missing_repos.append(f"https://github.com/mitre/cti {self.mitre_cti_repo_path}")
|
|
228
|
+
|
|
229
|
+
if len(missing_repos) > 0:
|
|
230
|
+
msg_list = ["The following repositories, which are required for enrichment, have not "
|
|
231
|
+
f"been checked out to the {self.external_repos_path} directory. "
|
|
232
|
+
"Please check them out using the following commands:"]
|
|
233
|
+
msg_list.extend([f"git clone --single-branch {repo_string}" for repo_string in missing_repos])
|
|
234
|
+
msg = '\n\t'.join(msg_list)
|
|
235
|
+
raise FileNotFoundError(msg)
|
|
236
|
+
return self
|
|
190
237
|
|
|
191
238
|
class report(validate):
|
|
192
239
|
#reporting takes no extra args, but we define it here so that it can be a mode on the command line
|
|
@@ -233,27 +280,111 @@ class build(validate):
|
|
|
233
280
|
return self.getBuildDir() / f"{self.app.appid}-{self.app.version}.tar.gz"
|
|
234
281
|
else:
|
|
235
282
|
return self.getBuildDir() / f"{self.app.appid}-latest.tar.gz"
|
|
236
|
-
|
|
237
|
-
def getSSAPath(self)->pathlib.Path:
|
|
238
|
-
return self.getBuildDir() / "ssa"
|
|
239
283
|
|
|
240
284
|
def getAPIPath(self)->pathlib.Path:
|
|
241
285
|
return self.getBuildDir() / "api"
|
|
242
286
|
|
|
243
287
|
def getAppTemplatePath(self)->pathlib.Path:
|
|
244
288
|
return self.path/"app_template"
|
|
245
|
-
|
|
246
289
|
|
|
247
290
|
|
|
248
291
|
class StackType(StrEnum):
|
|
249
292
|
classic = auto()
|
|
250
293
|
victoria = auto()
|
|
251
294
|
|
|
295
|
+
|
|
252
296
|
class inspect(build):
|
|
253
|
-
splunk_api_username: str = Field(
|
|
254
|
-
|
|
297
|
+
splunk_api_username: str = Field(
|
|
298
|
+
description="Splunk API username used for appinspect and Splunkbase downloads."
|
|
299
|
+
)
|
|
300
|
+
splunk_api_password: str = Field(
|
|
301
|
+
exclude=True,
|
|
302
|
+
description="Splunk API password used for appinspect and Splunkbase downloads."
|
|
303
|
+
)
|
|
304
|
+
enable_metadata_validation: bool = Field(
|
|
305
|
+
default=False,
|
|
306
|
+
description=(
|
|
307
|
+
"Flag indicating whether detection metadata validation and versioning enforcement "
|
|
308
|
+
"should be enabled."
|
|
309
|
+
)
|
|
310
|
+
)
|
|
311
|
+
suppress_missing_content_exceptions: bool = Field(
|
|
312
|
+
default=False,
|
|
313
|
+
description=(
|
|
314
|
+
"Suppress exceptions during metadata validation if a detection that existed in "
|
|
315
|
+
"the previous build does not exist in this build. This is to ensure that content "
|
|
316
|
+
"is not accidentally removed. In order to support testing both public and private "
|
|
317
|
+
"content, this warning can be suppressed. If it is suppressed, it will still be "
|
|
318
|
+
"printed out as a warning."
|
|
319
|
+
)
|
|
320
|
+
)
|
|
321
|
+
enrichments: bool = Field(
|
|
322
|
+
default=True,
|
|
323
|
+
description=(
|
|
324
|
+
"[NOTE: enrichments must be ENABLED for inspect to run. Please adjust your config "
|
|
325
|
+
f"or CLI invocation appropriately] {validate.model_fields['enrichments'].description}"
|
|
326
|
+
)
|
|
327
|
+
)
|
|
328
|
+
# TODO (cmcginley): wording should change here if we want to be able to download any app from
|
|
329
|
+
# Splunkbase
|
|
330
|
+
previous_build: str | None = Field(
|
|
331
|
+
default=None,
|
|
332
|
+
description=(
|
|
333
|
+
"Local path to the previous app build for metatdata validation and versioning "
|
|
334
|
+
"enforcement (defaults to the latest release of the app published on Splunkbase)."
|
|
335
|
+
)
|
|
336
|
+
)
|
|
255
337
|
stack_type: StackType = Field(description="The type of your Splunk Cloud Stack")
|
|
256
338
|
|
|
339
|
+
@field_validator("enrichments", mode="after")
|
|
340
|
+
@classmethod
|
|
341
|
+
def validate_needed_flags_metadata_validation(cls, v: bool, info: ValidationInfo) -> bool:
|
|
342
|
+
"""
|
|
343
|
+
Validates that `enrichments` is True for the inspect action
|
|
344
|
+
|
|
345
|
+
:param v: the field's value
|
|
346
|
+
:type v: bool
|
|
347
|
+
:param info: the ValidationInfo to be used
|
|
348
|
+
:type info: :class:`pydantic.ValidationInfo`
|
|
349
|
+
|
|
350
|
+
:returns: bool, for v
|
|
351
|
+
:rtype: bool
|
|
352
|
+
"""
|
|
353
|
+
# Enforce that `enrichments` is True for the inspect action
|
|
354
|
+
if v is False:
|
|
355
|
+
raise ValueError("Field `enrichments` must be True for the `inspect` action")
|
|
356
|
+
|
|
357
|
+
return v
|
|
358
|
+
|
|
359
|
+
def get_previous_package_file_path(self) -> pathlib.Path:
|
|
360
|
+
"""
|
|
361
|
+
Returns a Path object for the path to the prior package build. If no path was provided, the
|
|
362
|
+
latest version is downloaded from Splunkbase and it's filepath is returned, and saved to the
|
|
363
|
+
in-memory config (so download doesn't happen twice in the same run).
|
|
364
|
+
|
|
365
|
+
:returns: Path object to previous app build
|
|
366
|
+
:rtype: :class:`pathlib.Path`
|
|
367
|
+
"""
|
|
368
|
+
previous_build_path = self.previous_build
|
|
369
|
+
# Download the previous build as the latest release on Splunkbase if no path was provided
|
|
370
|
+
if previous_build_path is None:
|
|
371
|
+
print(
|
|
372
|
+
f"Downloading latest {self.app.label} build from Splunkbase to serve as previous "
|
|
373
|
+
"build during validation..."
|
|
374
|
+
)
|
|
375
|
+
app = SplunkApp(app_uid=self.app.uid)
|
|
376
|
+
previous_build_path = app.download(
|
|
377
|
+
out=pathlib.Path(DOWNLOADS_DIRECTORY),
|
|
378
|
+
username=self.splunk_api_username,
|
|
379
|
+
password=self.splunk_api_password,
|
|
380
|
+
is_dir=True,
|
|
381
|
+
overwrite=True
|
|
382
|
+
)
|
|
383
|
+
print(f"Latest release downloaded from Splunkbase to: {previous_build_path}")
|
|
384
|
+
self.previous_build = str(previous_build_path)
|
|
385
|
+
return pathlib.Path(previous_build_path)
|
|
386
|
+
|
|
387
|
+
|
|
257
388
|
class NewContentType(StrEnum):
|
|
258
389
|
detection = auto()
|
|
259
390
|
story = auto()
|
|
@@ -828,7 +959,6 @@ class test_servers(test_common):
|
|
|
828
959
|
index+=1
|
|
829
960
|
|
|
830
961
|
|
|
831
|
-
|
|
832
962
|
class release_notes(Config_Base):
|
|
833
963
|
old_tag:Optional[str] = Field(None, description="Name of the tag to diff against to find new content. "
|
|
834
964
|
"If it is not supplied, then it will be inferred as the "
|
|
@@ -910,6 +1040,4 @@ class release_notes(Config_Base):
|
|
|
910
1040
|
# raise ValueError("The latest_branch '{self.latest_branch}' was not found in the repository")
|
|
911
1041
|
|
|
912
1042
|
|
|
913
|
-
# return self
|
|
914
|
-
|
|
915
|
-
|
|
1043
|
+
# return self
|
contentctl/objects/constants.py
CHANGED
|
@@ -1,3 +1,5 @@
|
|
|
1
|
+
# Use for calculation of maximum length of name field
|
|
2
|
+
from contentctl.objects.enums import SecurityDomain
|
|
1
3
|
|
|
2
4
|
ATTACK_TACTICS_KILLCHAIN_MAPPING = {
|
|
3
5
|
"Reconnaissance": "Reconnaissance",
|
|
@@ -136,4 +138,35 @@ SES_ATTACK_TACTICS_ID_MAPPING = {
|
|
|
136
138
|
RBA_OBSERVABLE_ROLE_MAPPING = {
|
|
137
139
|
"Attacker": 0,
|
|
138
140
|
"Victim": 1
|
|
139
|
-
}
|
|
141
|
+
}
|
|
142
|
+
|
|
143
|
+
# The relative path to the directory where any apps/packages will be downloaded
|
|
144
|
+
DOWNLOADS_DIRECTORY = "downloads"
|
|
145
|
+
|
|
146
|
+
# Maximum length of the name field for a search.
|
|
147
|
+
# This number is derived from a limitation that exists in
|
|
148
|
+
# ESCU where a search cannot be edited, due to validation
|
|
149
|
+
# errors, if its name is longer than 99 characters.
|
|
150
|
+
# When an saved search is cloned in Enterprise Security User Interface,
|
|
151
|
+
# it is wrapped in the following:
|
|
152
|
+
# {Detection.tags.security_domain.value} - {SEARCH_STANZA_NAME} - Rule
|
|
153
|
+
# Similarly, when we generate the search stanza name in contentctl, it
|
|
154
|
+
# is app.label - detection.name - Rule
|
|
155
|
+
# However, in product the search name is:
|
|
156
|
+
# {CustomApp.label} - {detection.name} - Rule,
|
|
157
|
+
# or in ESCU:
|
|
158
|
+
# ESCU - {detection.name} - Rule,
|
|
159
|
+
# this gives us a maximum length below.
|
|
160
|
+
# When an ESCU search is cloned, it will
|
|
161
|
+
# have a full name like (the following is NOT a typo):
|
|
162
|
+
# Endpoint - ESCU - Name of Search From YML File - Rule - Rule
|
|
163
|
+
# The math below accounts for all these caveats
|
|
164
|
+
ES_MAX_STANZA_LENGTH = 99
|
|
165
|
+
CONTENTCTL_DETECTION_STANZA_NAME_FORMAT_TEMPLATE = "{app_label} - {detection_name} - Rule"
|
|
166
|
+
CONTENTCTL_BASELINE_STANZA_NAME_FORMAT_TEMPLATE = "{app_label} - {detection_name}"
|
|
167
|
+
CONTENTCTL_RESPONSE_TASK_NAME_FORMAT_TEMPLATE = "{app_label} - {detection_name} - Response Task"
|
|
168
|
+
|
|
169
|
+
ES_SEARCH_STANZA_NAME_FORMAT_AFTER_CLONING_IN_PRODUCT_TEMPLATE = "{security_domain_value} - {search_name} - Rule"
|
|
170
|
+
SECURITY_DOMAIN_MAX_LENGTH = max([len(SecurityDomain[value]) for value in SecurityDomain._member_map_])
|
|
171
|
+
CONTENTCTL_MAX_STANZA_LENGTH = ES_MAX_STANZA_LENGTH - len(ES_SEARCH_STANZA_NAME_FORMAT_AFTER_CLONING_IN_PRODUCT_TEMPLATE.format(security_domain_value="X"*SECURITY_DOMAIN_MAX_LENGTH,search_name=""))
|
|
172
|
+
CONTENTCTL_MAX_SEARCH_NAME_LENGTH = CONTENTCTL_MAX_STANZA_LENGTH - len(CONTENTCTL_DETECTION_STANZA_NAME_FORMAT_TEMPLATE.format(app_label="ESCU", detection_name=""))
|
|
@@ -1,10 +1,11 @@
|
|
|
1
1
|
import logging
|
|
2
2
|
import time
|
|
3
3
|
import json
|
|
4
|
-
from typing import
|
|
4
|
+
from typing import Any
|
|
5
5
|
from enum import Enum
|
|
6
|
+
from functools import cached_property
|
|
6
7
|
|
|
7
|
-
from pydantic import BaseModel,
|
|
8
|
+
from pydantic import ConfigDict, BaseModel, computed_field, Field, PrivateAttr
|
|
8
9
|
from splunklib.results import JSONResultsReader, Message # type: ignore
|
|
9
10
|
from splunklib.binding import HTTPError, ResponseReader # type: ignore
|
|
10
11
|
import splunklib.client as splunklib # type: ignore
|
|
@@ -15,7 +16,7 @@ from contentctl.objects.notable_action import NotableAction
|
|
|
15
16
|
from contentctl.objects.base_test_result import TestResultStatus
|
|
16
17
|
from contentctl.objects.integration_test_result import IntegrationTestResult
|
|
17
18
|
from contentctl.actions.detection_testing.progress_bar import (
|
|
18
|
-
format_pbar_string,
|
|
19
|
+
format_pbar_string, # type: ignore
|
|
19
20
|
TestReportingType,
|
|
20
21
|
TestingStates
|
|
21
22
|
)
|
|
@@ -178,13 +179,14 @@ class PbarData(BaseModel):
|
|
|
178
179
|
:param fq_test_name: the fully qualifed (fq) test name ("<detection_name>:<test_name>") used for logging
|
|
179
180
|
:param start_time: the start time used for logging
|
|
180
181
|
"""
|
|
181
|
-
pbar: tqdm
|
|
182
|
+
pbar: tqdm # type: ignore
|
|
182
183
|
fq_test_name: str
|
|
183
184
|
start_time: float
|
|
184
185
|
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
arbitrary_types_allowed
|
|
186
|
+
# needed to support the tqdm type
|
|
187
|
+
model_config = ConfigDict(
|
|
188
|
+
arbitrary_types_allowed=True
|
|
189
|
+
)
|
|
188
190
|
|
|
189
191
|
|
|
190
192
|
class CorrelationSearch(BaseModel):
|
|
@@ -197,143 +199,110 @@ class CorrelationSearch(BaseModel):
|
|
|
197
199
|
:param pbar_data: the encapsulated info needed for logging w/ pbar
|
|
198
200
|
:param test_index: the index attack data is forwarded to for testing (optionally used in cleanup)
|
|
199
201
|
"""
|
|
200
|
-
## The following three fields are explicitly needed at instantiation # noqa: E266
|
|
201
|
-
|
|
202
202
|
# the detection associated with the correlation search (e.g. "Windows Modify Registry EnableLinkedConnections")
|
|
203
|
-
detection: Detection
|
|
203
|
+
detection: Detection = Field(...)
|
|
204
204
|
|
|
205
205
|
# a Service instance representing a connection to a Splunk instance
|
|
206
|
-
service: splunklib.Service
|
|
206
|
+
service: splunklib.Service = Field(...)
|
|
207
207
|
|
|
208
208
|
# the encapsulated info needed for logging w/ pbar
|
|
209
|
-
pbar_data: PbarData
|
|
210
|
-
|
|
211
|
-
## The following field is optional for instantiation # noqa: E266
|
|
209
|
+
pbar_data: PbarData = Field(...)
|
|
212
210
|
|
|
213
211
|
# The index attack data is sent to; can be None if we are relying on the caller to do our
|
|
214
212
|
# cleanup of this index
|
|
215
|
-
test_index:
|
|
216
|
-
|
|
217
|
-
## All remaining fields can be derived from other fields or have intentional defaults that # noqa: E266
|
|
218
|
-
## should not be changed (validators should prevent instantiating some of these fields directly # noqa: E266
|
|
219
|
-
## to prevent undefined behavior) # noqa: E266
|
|
213
|
+
test_index: str | None = Field(default=None, min_length=1)
|
|
220
214
|
|
|
221
215
|
# The logger to use (logs all go to a null pipe unless ENABLE_LOGGING is set to True, so as not
|
|
222
216
|
# to conflict w/ tqdm)
|
|
223
|
-
logger: logging.Logger = Field(default_factory=get_logger)
|
|
224
|
-
|
|
225
|
-
# The search name (e.g. "ESCU - Windows Modify Registry EnableLinkedConnections - Rule")
|
|
226
|
-
name: Optional[str] = None
|
|
227
|
-
|
|
228
|
-
# The path to the saved search on the Splunk instance
|
|
229
|
-
splunk_path: Optional[str] = None
|
|
230
|
-
|
|
231
|
-
# A model of the saved search as provided by splunklib
|
|
232
|
-
saved_search: Optional[splunklib.SavedSearch] = None
|
|
217
|
+
logger: logging.Logger = Field(default_factory=get_logger, init=False)
|
|
233
218
|
|
|
234
219
|
# The set of indexes to clear on cleanup
|
|
235
|
-
indexes_to_purge: set[str] = set()
|
|
220
|
+
indexes_to_purge: set[str] = Field(default=set(), init=False)
|
|
236
221
|
|
|
237
222
|
# The risk analysis adaptive response action (if defined)
|
|
238
|
-
|
|
223
|
+
_risk_analysis_action: RiskAnalysisAction | None = PrivateAttr(default=None)
|
|
239
224
|
|
|
240
225
|
# The notable adaptive response action (if defined)
|
|
241
|
-
|
|
226
|
+
_notable_action: NotableAction | None = PrivateAttr(default=None)
|
|
242
227
|
|
|
243
228
|
# The list of risk events found
|
|
244
|
-
_risk_events:
|
|
229
|
+
_risk_events: list[RiskEvent] | None = PrivateAttr(default=None)
|
|
245
230
|
|
|
246
231
|
# The list of notable events found
|
|
247
|
-
_notable_events:
|
|
232
|
+
_notable_events: list[NotableEvent] | None = PrivateAttr(default=None)
|
|
248
233
|
|
|
249
|
-
|
|
250
|
-
|
|
251
|
-
|
|
252
|
-
|
|
253
|
-
extra
|
|
234
|
+
# Need arbitrary types to allow fields w/ types like SavedSearch; we also want to forbid
|
|
235
|
+
# unexpected fields
|
|
236
|
+
model_config = ConfigDict(
|
|
237
|
+
arbitrary_types_allowed=True,
|
|
238
|
+
extra='forbid'
|
|
239
|
+
)
|
|
254
240
|
|
|
255
|
-
|
|
256
|
-
|
|
257
|
-
def _convert_detection_to_search_name(cls, v, values) -> str:
|
|
258
|
-
"""
|
|
259
|
-
Validate name and derive if None
|
|
260
|
-
"""
|
|
261
|
-
if "detection" not in values:
|
|
262
|
-
raise ValueError("detection missing; name is dependent on detection")
|
|
241
|
+
def model_post_init(self, __context: Any) -> None:
|
|
242
|
+
super().model_post_init(__context)
|
|
263
243
|
|
|
264
|
-
|
|
265
|
-
|
|
266
|
-
raise ValueError(
|
|
267
|
-
"name must be derived from detection; leave as None and it will be derived automatically"
|
|
268
|
-
)
|
|
269
|
-
return expected_name
|
|
244
|
+
# Parse the initial values for the risk/notable actions
|
|
245
|
+
self._parse_risk_and_notable_actions()
|
|
270
246
|
|
|
271
|
-
@
|
|
272
|
-
@
|
|
273
|
-
def
|
|
247
|
+
@computed_field
|
|
248
|
+
@cached_property
|
|
249
|
+
def name(self) -> str:
|
|
274
250
|
"""
|
|
275
|
-
|
|
251
|
+
The search name (e.g. "ESCU - Windows Modify Registry EnableLinkedConnections - Rule")
|
|
252
|
+
|
|
253
|
+
:returns: the search name
|
|
254
|
+
:rtype: str
|
|
276
255
|
"""
|
|
277
|
-
|
|
278
|
-
raise ValueError("name missing; splunk_path is dependent on name")
|
|
256
|
+
return f"ESCU - {self.detection.name} - Rule"
|
|
279
257
|
|
|
280
|
-
|
|
281
|
-
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
return f"saved/searches/{values['name']}"
|
|
258
|
+
@computed_field
|
|
259
|
+
@cached_property
|
|
260
|
+
def splunk_path(self) -> str:
|
|
261
|
+
"""
|
|
262
|
+
The path to the saved search on the Splunk instance
|
|
286
263
|
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
def _instantiate_saved_search(cls, v, values) -> str:
|
|
264
|
+
:returns: the search path
|
|
265
|
+
:rtype: str
|
|
290
266
|
"""
|
|
291
|
-
|
|
267
|
+
return f"/saved/searches/{self.name}"
|
|
268
|
+
|
|
269
|
+
@computed_field
|
|
270
|
+
@cached_property
|
|
271
|
+
def saved_search(self) -> splunklib.SavedSearch:
|
|
292
272
|
"""
|
|
293
|
-
|
|
294
|
-
raise ValueError("splunk_path or service missing; saved_search is dependent on both")
|
|
273
|
+
A model of the saved search as provided by splunklib
|
|
295
274
|
|
|
296
|
-
|
|
297
|
-
|
|
298
|
-
|
|
299
|
-
"automatically"
|
|
300
|
-
)
|
|
275
|
+
:returns: the SavedSearch object
|
|
276
|
+
:rtype: :class:`splunklib.client.SavedSearch`
|
|
277
|
+
"""
|
|
301
278
|
return splunklib.SavedSearch(
|
|
302
|
-
|
|
303
|
-
|
|
279
|
+
self.service,
|
|
280
|
+
self.splunk_path,
|
|
304
281
|
)
|
|
305
282
|
|
|
306
|
-
|
|
307
|
-
@
|
|
308
|
-
|
|
309
|
-
|
|
310
|
-
Initialize risk_analysis_action
|
|
283
|
+
# TODO (cmcginley): need to make this refreshable
|
|
284
|
+
@computed_field
|
|
285
|
+
@property
|
|
286
|
+
def risk_analysis_action(self) -> RiskAnalysisAction | None:
|
|
311
287
|
"""
|
|
312
|
-
|
|
313
|
-
raise ValueError("saved_search missing; risk_analysis_action is dependent on saved_search")
|
|
288
|
+
The risk analysis adaptive response action (if defined)
|
|
314
289
|
|
|
315
|
-
|
|
316
|
-
|
|
317
|
-
"risk_analysis_action must be derived from the saved_search; leave as None and it will be derived "
|
|
318
|
-
"automatically"
|
|
319
|
-
)
|
|
320
|
-
return CorrelationSearch._get_risk_analysis_action(values['saved_search'].content)
|
|
321
|
-
|
|
322
|
-
@validator("notable_action", always=True)
|
|
323
|
-
@classmethod
|
|
324
|
-
def _init_notable_action(cls, v, values) -> Optional[NotableAction]:
|
|
290
|
+
:returns: the RiskAnalysisAction object, if it exists
|
|
291
|
+
:rtype: :class:`contentctl.objects.risk_analysis_action.RiskAnalysisAction` | None
|
|
325
292
|
"""
|
|
326
|
-
|
|
293
|
+
return self._risk_analysis_action
|
|
294
|
+
|
|
295
|
+
# TODO (cmcginley): need to make this refreshable
|
|
296
|
+
@computed_field
|
|
297
|
+
@property
|
|
298
|
+
def notable_action(self) -> NotableAction | None:
|
|
327
299
|
"""
|
|
328
|
-
|
|
329
|
-
raise ValueError("saved_search missing; notable_action is dependent on saved_search")
|
|
300
|
+
The notable adaptive response action (if defined)
|
|
330
301
|
|
|
331
|
-
|
|
332
|
-
|
|
333
|
-
|
|
334
|
-
|
|
335
|
-
)
|
|
336
|
-
return CorrelationSearch._get_notable_action(values['saved_search'].content)
|
|
302
|
+
:returns: the NotableAction object, if it exists
|
|
303
|
+
:rtype: :class:`contentctl.objects.notable_action.NotableAction` | None
|
|
304
|
+
"""
|
|
305
|
+
return self._notable_action
|
|
337
306
|
|
|
338
307
|
@property
|
|
339
308
|
def earliest_time(self) -> str:
|
|
@@ -393,7 +362,7 @@ class CorrelationSearch(BaseModel):
|
|
|
393
362
|
return self.notable_action is not None
|
|
394
363
|
|
|
395
364
|
@staticmethod
|
|
396
|
-
def _get_risk_analysis_action(content: dict[str, Any]) ->
|
|
365
|
+
def _get_risk_analysis_action(content: dict[str, Any]) -> RiskAnalysisAction | None:
|
|
397
366
|
"""
|
|
398
367
|
Given the saved search content, parse the risk analysis action
|
|
399
368
|
:param content: a dict of strings to values
|
|
@@ -407,7 +376,7 @@ class CorrelationSearch(BaseModel):
|
|
|
407
376
|
return None
|
|
408
377
|
|
|
409
378
|
@staticmethod
|
|
410
|
-
def _get_notable_action(content: dict[str, Any]) ->
|
|
379
|
+
def _get_notable_action(content: dict[str, Any]) -> NotableAction | None:
|
|
411
380
|
"""
|
|
412
381
|
Given the saved search content, parse the notable action
|
|
413
382
|
:param content: a dict of strings to values
|
|
@@ -431,10 +400,6 @@ class CorrelationSearch(BaseModel):
|
|
|
431
400
|
relevant.append(observable)
|
|
432
401
|
return relevant
|
|
433
402
|
|
|
434
|
-
# TODO (PEX-484): ideally, we could handle this and the following init w/ a call to
|
|
435
|
-
# model_post_init, so that all the logic is encapsulated w/in _parse_risk_and_notable_actions
|
|
436
|
-
# but that is a pydantic v2 feature (see the init validators for risk/notable actions):
|
|
437
|
-
# https://docs.pydantic.dev/latest/api/base_model/#pydantic.main.BaseModel.model_post_init
|
|
438
403
|
def _parse_risk_and_notable_actions(self) -> None:
|
|
439
404
|
"""Parses the risk/notable metadata we care about from self.saved_search.content
|
|
440
405
|
|
|
@@ -445,12 +410,12 @@ class CorrelationSearch(BaseModel):
|
|
|
445
410
|
unpacked to be anything other than a singleton
|
|
446
411
|
"""
|
|
447
412
|
# grab risk details if present
|
|
448
|
-
self.
|
|
413
|
+
self._risk_analysis_action = CorrelationSearch._get_risk_analysis_action(
|
|
449
414
|
self.saved_search.content # type: ignore
|
|
450
415
|
)
|
|
451
416
|
|
|
452
417
|
# grab notable details if present
|
|
453
|
-
self.
|
|
418
|
+
self._notable_action = CorrelationSearch._get_notable_action(self.saved_search.content) # type: ignore
|
|
454
419
|
|
|
455
420
|
def refresh(self) -> None:
|
|
456
421
|
"""Refreshes the metadata in the SavedSearch entity, and re-parses the fields we care about
|
|
@@ -738,7 +703,7 @@ class CorrelationSearch(BaseModel):
|
|
|
738
703
|
# TODO (#250): Re-enable and refactor code that validates the specific risk counts
|
|
739
704
|
# Validate risk events in aggregate; we should have an equal amount of risk events for each
|
|
740
705
|
# relevant observable, and the total count should match the total number of events
|
|
741
|
-
# individual_count:
|
|
706
|
+
# individual_count: int | None = None
|
|
742
707
|
# total_count = 0
|
|
743
708
|
# for observable_str in observable_counts:
|
|
744
709
|
# self.logger.debug(
|
|
@@ -802,7 +767,7 @@ class CorrelationSearch(BaseModel):
|
|
|
802
767
|
)
|
|
803
768
|
|
|
804
769
|
# initialize result as None
|
|
805
|
-
result:
|
|
770
|
+
result: IntegrationTestResult | None = None
|
|
806
771
|
|
|
807
772
|
# keep track of time slept and number of attempts for exponential backoff (base 2)
|
|
808
773
|
elapsed_sleep_time = 0
|