contentctl 5.2.0__py3-none-any.whl → 5.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (42) hide show
  1. contentctl/actions/build.py +5 -43
  2. contentctl/actions/detection_testing/DetectionTestingManager.py +64 -24
  3. contentctl/actions/detection_testing/infrastructures/DetectionTestingInfrastructure.py +146 -42
  4. contentctl/actions/detection_testing/views/DetectionTestingView.py +5 -6
  5. contentctl/actions/detection_testing/views/DetectionTestingViewCLI.py +2 -0
  6. contentctl/actions/initialize.py +35 -9
  7. contentctl/actions/release_notes.py +14 -12
  8. contentctl/actions/test.py +16 -20
  9. contentctl/actions/validate.py +8 -15
  10. contentctl/helper/utils.py +69 -20
  11. contentctl/input/director.py +147 -119
  12. contentctl/input/yml_reader.py +39 -27
  13. contentctl/objects/abstract_security_content_objects/detection_abstract.py +94 -20
  14. contentctl/objects/abstract_security_content_objects/security_content_object_abstract.py +548 -8
  15. contentctl/objects/baseline.py +24 -6
  16. contentctl/objects/config.py +32 -8
  17. contentctl/objects/content_versioning_service.py +508 -0
  18. contentctl/objects/correlation_search.py +53 -63
  19. contentctl/objects/dashboard.py +15 -1
  20. contentctl/objects/data_source.py +13 -1
  21. contentctl/objects/deployment.py +23 -9
  22. contentctl/objects/detection.py +2 -0
  23. contentctl/objects/enums.py +28 -18
  24. contentctl/objects/investigation.py +40 -20
  25. contentctl/objects/lookup.py +61 -5
  26. contentctl/objects/macro.py +19 -4
  27. contentctl/objects/playbook.py +16 -2
  28. contentctl/objects/rba.py +1 -33
  29. contentctl/objects/removed_security_content_object.py +50 -0
  30. contentctl/objects/security_content_object.py +1 -0
  31. contentctl/objects/story.py +37 -5
  32. contentctl/output/api_json_output.py +5 -3
  33. contentctl/output/conf_output.py +9 -1
  34. contentctl/output/runtime_csv_writer.py +111 -0
  35. contentctl/output/svg_output.py +4 -5
  36. contentctl/output/templates/savedsearches_detections.j2 +2 -6
  37. {contentctl-5.2.0.dist-info → contentctl-5.3.0.dist-info}/METADATA +4 -3
  38. {contentctl-5.2.0.dist-info → contentctl-5.3.0.dist-info}/RECORD +41 -39
  39. {contentctl-5.2.0.dist-info → contentctl-5.3.0.dist-info}/WHEEL +1 -1
  40. contentctl/output/data_source_writer.py +0 -52
  41. {contentctl-5.2.0.dist-info → contentctl-5.3.0.dist-info}/LICENSE.md +0 -0
  42. {contentctl-5.2.0.dist-info → contentctl-5.3.0.dist-info}/entry_points.txt +0 -0
@@ -1,35 +1,36 @@
1
+ import json
1
2
  import logging
3
+ import re
2
4
  import time
3
- import json
4
- from typing import Any
5
- from enum import StrEnum, IntEnum
5
+ from enum import IntEnum, StrEnum
6
6
  from functools import cached_property
7
+ from typing import Any
7
8
 
8
- from pydantic import ConfigDict, BaseModel, computed_field, Field, PrivateAttr
9
- from splunklib.results import JSONResultsReader, Message # type: ignore
10
- from splunklib.binding import HTTPError, ResponseReader # type: ignore
11
9
  import splunklib.client as splunklib # type: ignore
10
+ from pydantic import BaseModel, ConfigDict, Field, PrivateAttr, computed_field
11
+ from splunklib.binding import HTTPError, ResponseReader # type: ignore
12
+ from splunklib.results import JSONResultsReader, Message # type: ignore
12
13
  from tqdm import tqdm # type: ignore
13
14
 
14
- from contentctl.objects.risk_analysis_action import RiskAnalysisAction
15
- from contentctl.objects.notable_action import NotableAction
16
- from contentctl.objects.base_test_result import TestResultStatus
17
- from contentctl.objects.integration_test_result import IntegrationTestResult
18
15
  from contentctl.actions.detection_testing.progress_bar import (
19
- format_pbar_string, # type: ignore
20
- TestReportingType,
21
16
  TestingStates,
17
+ TestReportingType,
18
+ format_pbar_string, # type: ignore
22
19
  )
20
+ from contentctl.helper.utils import Utils
21
+ from contentctl.objects.base_test_result import TestResultStatus
22
+ from contentctl.objects.detection import Detection
23
23
  from contentctl.objects.errors import (
24
+ ClientError,
24
25
  IntegrationTestingError,
25
26
  ServerError,
26
- ClientError,
27
27
  ValidationFailed,
28
28
  )
29
- from contentctl.objects.detection import Detection
30
- from contentctl.objects.risk_event import RiskEvent
29
+ from contentctl.objects.integration_test_result import IntegrationTestResult
30
+ from contentctl.objects.notable_action import NotableAction
31
31
  from contentctl.objects.notable_event import NotableEvent
32
-
32
+ from contentctl.objects.risk_analysis_action import RiskAnalysisAction
33
+ from contentctl.objects.risk_event import RiskEvent
33
34
 
34
35
  # Suppress logging by default; enable for local testing
35
36
  ENABLE_LOGGING = False
@@ -37,46 +38,6 @@ LOG_LEVEL = logging.DEBUG
37
38
  LOG_PATH = "correlation_search.log"
38
39
 
39
40
 
40
- def get_logger() -> logging.Logger:
41
- """
42
- Gets a logger instance for the module; logger is configured if not already configured. The
43
- NullHandler is used to suppress loggging when running in production so as not to conflict w/
44
- contentctl's larger pbar-based logging. The StreamHandler is enabled by setting ENABLE_LOGGING
45
- to True (useful for debugging/testing locally)
46
- """
47
- # get logger for module
48
- logger = logging.getLogger(__name__)
49
-
50
- # set propagate to False if not already set as such (needed to that we do not flow up to any
51
- # root loggers)
52
- if logger.propagate:
53
- logger.propagate = False
54
-
55
- # if logger has no handlers, it needs to be configured for the first time
56
- if not logger.hasHandlers():
57
- # set level
58
- logger.setLevel(LOG_LEVEL)
59
-
60
- # if logging enabled, use a StreamHandler; else, use the NullHandler to suppress logging
61
- handler: logging.Handler
62
- if ENABLE_LOGGING:
63
- handler = logging.FileHandler(LOG_PATH)
64
- else:
65
- handler = logging.NullHandler()
66
-
67
- # Format our output
68
- formatter = logging.Formatter(
69
- "%(asctime)s - %(levelname)s:%(name)s - %(message)s"
70
- )
71
- handler.setFormatter(formatter)
72
-
73
- # Set handler level and add to logger
74
- handler.setLevel(LOG_LEVEL)
75
- logger.addHandler(handler)
76
-
77
- return logger
78
-
79
-
80
41
  class SavedSearchKeys(StrEnum):
81
42
  """
82
43
  Various keys into the SavedSearch content
@@ -135,34 +96,58 @@ class ResultIterator:
135
96
  Given a ResponseReader, constructs a JSONResultsReader and iterates over it; when Message instances are encountered,
136
97
  they are logged if the message is anything other than "error", in which case an error is raised. Regular results are
137
98
  returned as expected
99
+
138
100
  :param response_reader: a ResponseReader object
139
- :param logger: a Logger object
101
+ :type response_reader: :class:`splunklib.binding.ResponseReader`
102
+ :param error_filters: set of re Patterns used to filter out errors we're ok ignoring
103
+ :type error_filters: list[:class:`re.Pattern[str]`]
140
104
  """
141
105
 
142
- def __init__(self, response_reader: ResponseReader) -> None:
106
+ def __init__(
107
+ self, response_reader: ResponseReader, error_filters: list[re.Pattern[str]] = []
108
+ ) -> None:
143
109
  # init the results reader
144
110
  self.results_reader: JSONResultsReader = JSONResultsReader(response_reader)
145
111
 
112
+ # the list of patterns for errors to ignore
113
+ self.error_filters: list[re.Pattern[str]] = error_filters
114
+
146
115
  # get logger
147
- self.logger: logging.Logger = get_logger()
116
+ self.logger: logging.Logger = Utils.get_logger(
117
+ __name__, LOG_LEVEL, LOG_PATH, ENABLE_LOGGING
118
+ )
148
119
 
149
120
  def __iter__(self) -> "ResultIterator":
150
121
  return self
151
122
 
152
- def __next__(self) -> dict[Any, Any]:
123
+ def __next__(self) -> dict[str, Any]:
153
124
  # Use a reader for JSON format so we can iterate over our results
154
125
  for result in self.results_reader:
155
126
  # log messages, or raise if error
156
127
  if isinstance(result, Message):
157
128
  # convert level string to level int
158
- level_name = result.type.strip().upper() # type: ignore
129
+ level_name: str = result.type.strip().upper() # type: ignore
130
+ # TODO (PEX-510): this method is deprecated; replace with our own enum
159
131
  level: int = logging.getLevelName(level_name)
160
132
 
161
133
  # log message at appropriate level and raise if needed
162
134
  message = f"SPLUNK: {result.message}" # type: ignore
163
135
  self.logger.log(level, message)
136
+ filtered = False
164
137
  if level == logging.ERROR:
165
- raise ServerError(message)
138
+ # if the error matches any of the filters, flag it
139
+ for filter in self.error_filters:
140
+ self.logger.debug(f"Filter: {filter}; message: {message}")
141
+ if filter.match(message) is not None:
142
+ self.logger.debug(
143
+ f"Error matched filter {filter}; continuing"
144
+ )
145
+ filtered = True
146
+ break
147
+
148
+ # if no filter was matched, raise
149
+ if not filtered:
150
+ raise ServerError(message)
166
151
 
167
152
  # if dict, just return
168
153
  elif isinstance(result, dict):
@@ -218,7 +203,12 @@ class CorrelationSearch(BaseModel):
218
203
 
219
204
  # The logger to use (logs all go to a null pipe unless ENABLE_LOGGING is set to True, so as not
220
205
  # to conflict w/ tqdm)
221
- logger: logging.Logger = Field(default_factory=get_logger, init=False)
206
+ logger: logging.Logger = Field(
207
+ default_factory=lambda: Utils.get_logger(
208
+ __name__, LOG_LEVEL, LOG_PATH, ENABLE_LOGGING
209
+ ),
210
+ init=False,
211
+ )
222
212
 
223
213
  # The set of indexes to clear on cleanup
224
214
  indexes_to_purge: set[str] = Field(default=set(), init=False)
@@ -4,9 +4,10 @@ from enum import StrEnum
4
4
  from typing import Any
5
5
 
6
6
  from jinja2 import Environment
7
- from pydantic import Field, Json, model_validator
7
+ from pydantic import Field, Json, field_validator, model_validator
8
8
 
9
9
  from contentctl.objects.config import build
10
+ from contentctl.objects.enums import ContentStatus
10
11
  from contentctl.objects.security_content_object import SecurityContentObject
11
12
 
12
13
  DEFAULT_DASHBOARD_JINJA2_TEMPLATE = """<dashboard version="2" theme="{{ dashboard.theme }}">
@@ -48,6 +49,19 @@ class Dashboard(SecurityContentObject):
48
49
  json_obj: Json[dict[str, Any]] = Field(
49
50
  ..., description="Valid JSON object that describes the dashboard"
50
51
  )
52
+ status: ContentStatus = ContentStatus.production
53
+
54
+ @field_validator("status", mode="after")
55
+ @classmethod
56
+ def NarrowStatus(cls, status: ContentStatus) -> ContentStatus:
57
+ return cls.NarrowStatusTemplate(status, [ContentStatus.production])
58
+
59
+ def label(self, config: build) -> str:
60
+ return f"{config.app.label} - {self.name}"
61
+
62
+ @classmethod
63
+ def containing_folder(cls) -> pathlib.Path:
64
+ return pathlib.Path("dashboards")
51
65
 
52
66
  @model_validator(mode="before")
53
67
  @classmethod
@@ -1,9 +1,11 @@
1
1
  from __future__ import annotations
2
2
 
3
+ import pathlib
3
4
  from typing import Any, Optional
4
5
 
5
- from pydantic import BaseModel, Field, HttpUrl, model_serializer
6
+ from pydantic import BaseModel, Field, HttpUrl, field_validator, model_serializer
6
7
 
8
+ from contentctl.objects.enums import ContentStatus
7
9
  from contentctl.objects.security_content_object import SecurityContentObject
8
10
 
9
11
 
@@ -26,6 +28,16 @@ class DataSource(SecurityContentObject):
26
28
  convert_to_log_source: None | list = None
27
29
  example_log: None | str = None
28
30
  output_fields: list[str] = []
31
+ status: ContentStatus = ContentStatus.production
32
+
33
+ @field_validator("status", mode="after")
34
+ @classmethod
35
+ def NarrowStatus(cls, status: ContentStatus) -> ContentStatus:
36
+ return cls.NarrowStatusTemplate(status, [ContentStatus.production])
37
+
38
+ @classmethod
39
+ def containing_folder(cls) -> pathlib.Path:
40
+ return pathlib.Path("data_sources")
29
41
 
30
42
  @model_serializer
31
43
  def serialize_model(self):
@@ -1,19 +1,23 @@
1
1
  from __future__ import annotations
2
+
3
+ import datetime
4
+ import pathlib
5
+ import uuid
6
+ from typing import Any
7
+
2
8
  from pydantic import (
3
9
  Field,
4
- computed_field,
10
+ NonNegativeInt,
5
11
  ValidationInfo,
12
+ computed_field,
13
+ field_validator,
6
14
  model_serializer,
7
- NonNegativeInt,
8
15
  )
9
- from typing import Any
10
- import uuid
11
- import datetime
12
- from contentctl.objects.security_content_object import SecurityContentObject
13
- from contentctl.objects.deployment_scheduling import DeploymentScheduling
14
- from contentctl.objects.alert_action import AlertAction
15
16
 
16
- from contentctl.objects.enums import DeploymentType
17
+ from contentctl.objects.alert_action import AlertAction
18
+ from contentctl.objects.deployment_scheduling import DeploymentScheduling
19
+ from contentctl.objects.enums import ContentStatus, DeploymentType
20
+ from contentctl.objects.security_content_object import SecurityContentObject
17
21
 
18
22
 
19
23
  class Deployment(SecurityContentObject):
@@ -22,6 +26,12 @@ class Deployment(SecurityContentObject):
22
26
  type: DeploymentType = Field(...)
23
27
  author: str = Field(..., max_length=255)
24
28
  version: NonNegativeInt = 1
29
+ status: ContentStatus = ContentStatus.production
30
+
31
+ @field_validator("status", mode="after")
32
+ @classmethod
33
+ def NarrowStatus(cls, status: ContentStatus) -> ContentStatus:
34
+ return cls.NarrowStatusTemplate(status, [ContentStatus.production])
25
35
 
26
36
  # Type was the only tag exposed and should likely be removed/refactored.
27
37
  # For transitional reasons, provide this as a computed_field in prep for removal
@@ -30,6 +40,10 @@ class Deployment(SecurityContentObject):
30
40
  def tags(self) -> dict[str, DeploymentType]:
31
41
  return {"type": self.type}
32
42
 
43
+ @classmethod
44
+ def containing_folder(cls) -> pathlib.Path:
45
+ return pathlib.Path("deployments")
46
+
33
47
  @staticmethod
34
48
  def getDeployment(v: dict[str, Any], info: ValidationInfo) -> Deployment:
35
49
  if v != {}:
@@ -1,4 +1,5 @@
1
1
  from __future__ import annotations
2
+
2
3
  from contentctl.objects.abstract_security_content_objects.detection_abstract import (
3
4
  Detection_Abstract,
4
5
  )
@@ -16,3 +17,4 @@ class Detection(Detection_Abstract):
16
17
  # undefined issues with the contentctl tooling
17
18
  # or output of the tooling.
18
19
  pass
20
+ pass
@@ -1,6 +1,7 @@
1
1
  from __future__ import annotations
2
+
3
+ from enum import StrEnum, auto
2
4
  from typing import List
3
- from enum import StrEnum, IntEnum
4
5
 
5
6
 
6
7
  class AnalyticsType(StrEnum):
@@ -46,18 +47,21 @@ class PlaybookType(StrEnum):
46
47
  RESPONSE = "Response"
47
48
 
48
49
 
49
- class SecurityContentType(IntEnum):
50
- detections = 1
51
- baselines = 2
52
- stories = 3
53
- playbooks = 4
54
- macros = 5
55
- lookups = 6
56
- deployments = 7
57
- investigations = 8
58
- unit_tests = 9
59
- data_sources = 11
60
- dashboards = 12
50
+ class SecurityContentType(StrEnum):
51
+ detection = auto()
52
+ baseline = auto()
53
+ story = auto()
54
+ playbook = auto()
55
+ macro = auto()
56
+ lookup = auto()
57
+ deployment = auto()
58
+ investigation = auto()
59
+ unit_test = auto()
60
+ data_source = auto()
61
+ dashboard = auto()
62
+
63
+
64
+ # Create a mapping to map the type of content to the directory which stores it
61
65
 
62
66
 
63
67
  # Bringing these changes back in line will take some time after
@@ -87,11 +91,17 @@ class SecurityContentInvestigationProductName(StrEnum):
87
91
  SPLUNK_PHANTOM = "Splunk Phantom"
88
92
 
89
93
 
90
- class DetectionStatus(StrEnum):
91
- production = "production"
92
- deprecated = "deprecated"
93
- experimental = "experimental"
94
- validation = "validation"
94
+ class ContentStatus(StrEnum):
95
+ experimental = auto()
96
+ production = auto()
97
+ deprecated = auto()
98
+ removed = auto()
99
+
100
+
101
+ CONTENT_STATUS_THAT_REQUIRES_DEPRECATION_INFO = [
102
+ ContentStatus.deprecated,
103
+ ContentStatus.removed,
104
+ ]
95
105
 
96
106
 
97
107
  class LogLevel(StrEnum):
@@ -1,9 +1,16 @@
1
1
  from __future__ import annotations
2
2
 
3
+ import pathlib
3
4
  import re
4
- from typing import Any, List, Literal
5
-
6
- from pydantic import ConfigDict, Field, computed_field, model_serializer
5
+ from typing import Any, List
6
+
7
+ from pydantic import (
8
+ ConfigDict,
9
+ Field,
10
+ computed_field,
11
+ field_validator,
12
+ model_serializer,
13
+ )
7
14
 
8
15
  from contentctl.objects.config import CustomApp
9
16
  from contentctl.objects.constants import (
@@ -11,7 +18,7 @@ from contentctl.objects.constants import (
11
18
  CONTENTCTL_MAX_STANZA_LENGTH,
12
19
  CONTENTCTL_RESPONSE_TASK_NAME_FORMAT_TEMPLATE,
13
20
  )
14
- from contentctl.objects.enums import DataModel, DetectionStatus
21
+ from contentctl.objects.enums import ContentStatus, DataModel
15
22
  from contentctl.objects.investigation_tags import InvestigationTags
16
23
  from contentctl.objects.security_content_object import SecurityContentObject
17
24
 
@@ -24,7 +31,16 @@ class Investigation(SecurityContentObject):
24
31
  how_to_implement: str = Field(...)
25
32
  known_false_positives: str = Field(...)
26
33
  tags: InvestigationTags
27
- status: Literal[DetectionStatus.production, DetectionStatus.deprecated]
34
+ status: ContentStatus
35
+
36
+ @field_validator("status", mode="after")
37
+ @classmethod
38
+ def NarrowStatus(cls, status: ContentStatus) -> ContentStatus:
39
+ return cls.NarrowStatusTemplate(status, [ContentStatus.removed])
40
+
41
+ @classmethod
42
+ def containing_folder(cls) -> pathlib.Path:
43
+ return pathlib.Path("investigations")
28
44
 
29
45
  # enrichment
30
46
  @computed_field
@@ -62,18 +78,8 @@ class Investigation(SecurityContentObject):
62
78
 
63
79
  # This is a slightly modified version of the get_conf_stanza_name function from
64
80
  # SecurityContentObject_Abstract
65
- def get_response_task_name(
66
- self, app: CustomApp, max_stanza_length: int = CONTENTCTL_MAX_STANZA_LENGTH
67
- ) -> str:
68
- stanza_name = CONTENTCTL_RESPONSE_TASK_NAME_FORMAT_TEMPLATE.format(
69
- app_label=app.label, detection_name=self.name
70
- )
71
- if len(stanza_name) > max_stanza_length:
72
- raise ValueError(
73
- f"conf stanza may only be {max_stanza_length} characters, "
74
- f"but stanza was actually {len(stanza_name)} characters: '{stanza_name}' "
75
- )
76
- return stanza_name
81
+ def get_response_task_name(self, app: CustomApp) -> str:
82
+ return self.static_get_conf_stanza_name(self.name, app)
77
83
 
78
84
  @model_serializer
79
85
  def serialize_model(self):
@@ -103,6 +109,20 @@ class Investigation(SecurityContentObject):
103
109
  # back to itself
104
110
  for story in self.tags.analytic_story:
105
111
  story.investigations.append(self)
106
- # back to itself
107
- for story in self.tags.analytic_story:
108
- story.investigations.append(self)
112
+
113
+ @classmethod
114
+ def static_get_conf_stanza_name(
115
+ cls,
116
+ name: str,
117
+ app: CustomApp,
118
+ max_stanza_length: int = CONTENTCTL_MAX_STANZA_LENGTH,
119
+ ) -> str:
120
+ stanza_name = CONTENTCTL_RESPONSE_TASK_NAME_FORMAT_TEMPLATE.format(
121
+ app_label=app.label, detection_name=name
122
+ )
123
+ if len(stanza_name) > max_stanza_length:
124
+ raise ValueError(
125
+ f"conf stanza may only be {max_stanza_length} characters, "
126
+ f"but stanza was actually {len(stanza_name)} characters: '{stanza_name}' "
127
+ )
128
+ return stanza_name
@@ -2,6 +2,7 @@ from __future__ import annotations
2
2
 
3
3
  import abc
4
4
  import csv
5
+ import datetime
5
6
  import pathlib
6
7
  import re
7
8
  from enum import StrEnum, auto
@@ -12,6 +13,7 @@ from pydantic import (
12
13
  BeforeValidator,
13
14
  Field,
14
15
  FilePath,
16
+ HttpUrl,
15
17
  NonNegativeInt,
16
18
  TypeAdapter,
17
19
  ValidationInfo,
@@ -25,6 +27,9 @@ if TYPE_CHECKING:
25
27
  from contentctl.input.director import DirectorOutputDto
26
28
  from contentctl.objects.config import validate
27
29
 
30
+ from io import StringIO, TextIOBase
31
+
32
+ from contentctl.objects.enums import ContentStatus
28
33
  from contentctl.objects.security_content_object import SecurityContentObject
29
34
 
30
35
  # This section is used to ignore lookups that are NOT shipped with ESCU app but are used in the detections. Adding exclusions here will so that contentctl builds will not fail.
@@ -93,6 +98,16 @@ class Lookup(SecurityContentObject, abc.ABC):
93
98
  default=None
94
99
  )
95
100
  case_sensitive_match: None | bool = Field(default=None)
101
+ status: ContentStatus = ContentStatus.production
102
+
103
+ @field_validator("status", mode="after")
104
+ @classmethod
105
+ def NarrowStatus(cls, status: ContentStatus) -> ContentStatus:
106
+ return cls.NarrowStatusTemplate(status, [ContentStatus.production])
107
+
108
+ @classmethod
109
+ def containing_folder(cls) -> pathlib.Path:
110
+ return pathlib.Path("lookups")
96
111
 
97
112
  @model_serializer
98
113
  def serialize_model(self):
@@ -174,6 +189,13 @@ class Lookup(SecurityContentObject, abc.ABC):
174
189
 
175
190
  return list(all_lookups)
176
191
 
192
+ @computed_field
193
+ @cached_property
194
+ def researchSiteLink(self) -> HttpUrl:
195
+ raise NotImplementedError(
196
+ f"researchSiteLink has not been implemented for [{type(self).__name__} - {self.name}]"
197
+ )
198
+
177
199
 
178
200
  class FileBackedLookup(Lookup, abc.ABC):
179
201
  # For purposes of the disciminated union, the child classes which
@@ -206,6 +228,10 @@ class FileBackedLookup(Lookup, abc.ABC):
206
228
  """
207
229
  pass
208
230
 
231
+ @property
232
+ def content_file_handle(self) -> TextIOBase:
233
+ return open(self.filename, "r")
234
+
209
235
 
210
236
  class CSVLookup(FileBackedLookup):
211
237
  lookup_type: Literal[Lookup_Type.csv]
@@ -245,8 +271,9 @@ class CSVLookup(FileBackedLookup):
245
271
  This function computes the filenames to write into the app itself. This is abstract because
246
272
  CSV and MLmodel requirements are different.
247
273
  """
274
+
248
275
  return pathlib.Path(
249
- f"{self.filename.stem}_{self.date.year}{self.date.month:02}{self.date.day:02}.{self.lookup_type}"
276
+ f"{self.name}_{self.date.year}{self.date.month:02}{self.date.day:02}.{self.lookup_type}"
250
277
  )
251
278
 
252
279
  @model_validator(mode="after")
@@ -256,9 +283,11 @@ class CSVLookup(FileBackedLookup):
256
283
  # If a row has MORE fields than fieldnames, they will be dumped in a list under the key 'restkey' - this should throw an Exception
257
284
  # If a row has LESS fields than fieldnames, then the field should contain None by default. This should also throw an exception.
258
285
  csv_errors: list[str] = []
259
- with open(self.filename, "r") as csv_fp:
260
- RESTKEY = "extra_fields_in_a_row"
261
- csv_dict = csv.DictReader(csv_fp, restkey=RESTKEY)
286
+
287
+ RESTKEY = "extra_fields_in_a_row"
288
+ with self.content_file_handle as handle:
289
+ csv_dict = csv.DictReader(handle, restkey=RESTKEY)
290
+
262
291
  if csv_dict.fieldnames is None:
263
292
  raise ValueError(
264
293
  f"Error validating the CSV referenced by the lookup: {self.filename}:\n\t"
@@ -291,6 +320,28 @@ class CSVLookup(FileBackedLookup):
291
320
  return self
292
321
 
293
322
 
323
+ class RuntimeCSV(CSVLookup):
324
+ contents: str = Field(
325
+ description="This field contains the contents that would usually "
326
+ "be written to a CSV file. However, we store these in memory, "
327
+ "rather than on disk, to avoid needing to create a CSV file "
328
+ "before copying it into the app build."
329
+ )
330
+ # Since these are defined at runtime, they always have
331
+ # a date of today
332
+ date: datetime.date = Field(default=datetime.date.today())
333
+
334
+ @model_validator(mode="after")
335
+ def ensure_lookup_file_exists(self) -> Self:
336
+ # Because the contents of this file are created at runtime, it does
337
+ # not actually need to exist. As such, we do not validate it
338
+ return self
339
+
340
+ @property
341
+ def content_file_handle(self) -> TextIOBase:
342
+ return StringIO(self.contents)
343
+
344
+
294
345
  class KVStoreLookup(Lookup):
295
346
  lookup_type: Literal[Lookup_Type.kvstore]
296
347
  fields: list[str] = Field(
@@ -364,6 +415,11 @@ class MlModel(FileBackedLookup):
364
415
  return pathlib.Path(f"{self.filename.stem}.{self.lookup_type}")
365
416
 
366
417
 
367
- LookupAdapter = TypeAdapter(
418
+ LookupAdapter: TypeAdapter[CSVLookup | KVStoreLookup | MlModel] = TypeAdapter(
368
419
  Annotated[CSVLookup | KVStoreLookup | MlModel, Field(discriminator="lookup_type")]
369
420
  )
421
+
422
+ # The following are defined as they are used by the Director. For normal SecurityContentObject
423
+ # types, they already exist. But do not for the TypeAdapter
424
+ setattr(LookupAdapter, "containing_folder", lambda: "lookups")
425
+ setattr(LookupAdapter, "__name__", "Lookup")
@@ -1,14 +1,19 @@
1
1
  # Used so that we can have a staticmethod that takes the class
2
2
  # type Macro as an argument
3
3
  from __future__ import annotations
4
- from typing import TYPE_CHECKING, List
4
+
5
+ import datetime
6
+ import pathlib
5
7
  import re
6
- from pydantic import Field, model_serializer, NonNegativeInt
7
8
  import uuid
8
- import datetime
9
+ from typing import TYPE_CHECKING, List
10
+
11
+ from pydantic import Field, NonNegativeInt, field_validator, model_serializer
9
12
 
10
13
  if TYPE_CHECKING:
11
14
  from contentctl.input.director import DirectorOutputDto
15
+
16
+ from contentctl.objects.enums import ContentStatus
12
17
  from contentctl.objects.security_content_object import SecurityContentObject
13
18
 
14
19
  # The following macros are included in commonly-installed apps.
@@ -28,9 +33,19 @@ class Macro(SecurityContentObject):
28
33
  arguments: List[str] = Field([])
29
34
  # TODO: Add id field to all macro ymls
30
35
  id: uuid.UUID = Field(default_factory=uuid.uuid4)
31
- date: datetime.date = Field(datetime.date.today())
36
+ date: datetime.date = Field(default=datetime.date.today())
32
37
  author: str = Field("NO AUTHOR DEFINED", max_length=255)
33
38
  version: NonNegativeInt = 1
39
+ status: ContentStatus = ContentStatus.production
40
+
41
+ @field_validator("status", mode="after")
42
+ @classmethod
43
+ def NarrowStatus(cls, status: ContentStatus) -> ContentStatus:
44
+ return cls.NarrowStatusTemplate(status, [ContentStatus.production])
45
+
46
+ @classmethod
47
+ def containing_folder(cls) -> pathlib.Path:
48
+ return pathlib.Path("macros")
34
49
 
35
50
  @model_serializer
36
51
  def serialize_model(self):
@@ -1,11 +1,13 @@
1
1
  from __future__ import annotations
2
+
3
+ import pathlib
2
4
  from typing import Self
3
- from pydantic import model_validator, Field, FilePath
4
5
 
6
+ from pydantic import Field, FilePath, field_validator, model_validator
5
7
 
8
+ from contentctl.objects.enums import ContentStatus, PlaybookType
6
9
  from contentctl.objects.playbook_tags import PlaybookTag
7
10
  from contentctl.objects.security_content_object import SecurityContentObject
8
- from contentctl.objects.enums import PlaybookType
9
11
 
10
12
 
11
13
  class Playbook(SecurityContentObject):
@@ -19,6 +21,16 @@ class Playbook(SecurityContentObject):
19
21
  playbook: str = Field(min_length=4)
20
22
  app_list: list[str] = Field(..., min_length=0)
21
23
  tags: PlaybookTag = Field(...)
24
+ status: ContentStatus = ContentStatus.production
25
+
26
+ @field_validator("status", mode="after")
27
+ @classmethod
28
+ def NarrowStatus(cls, status: ContentStatus) -> ContentStatus:
29
+ return cls.NarrowStatusTemplate(status, [ContentStatus.production])
30
+
31
+ @classmethod
32
+ def containing_folder(cls) -> pathlib.Path:
33
+ return pathlib.Path("playbooks")
22
34
 
23
35
  @model_validator(mode="after")
24
36
  def ensureJsonAndPyFilesExist(self) -> Self:
@@ -66,3 +78,5 @@ class Playbook(SecurityContentObject):
66
78
  )
67
79
 
68
80
  return self
81
+ return self
82
+ return self