contentctl 4.3.4__py3-none-any.whl → 4.4.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (63) hide show
  1. contentctl/actions/build.py +1 -0
  2. contentctl/actions/detection_testing/GitService.py +10 -10
  3. contentctl/actions/detection_testing/infrastructures/DetectionTestingInfrastructure.py +68 -38
  4. contentctl/actions/detection_testing/infrastructures/DetectionTestingInfrastructureContainer.py +5 -1
  5. contentctl/actions/detection_testing/views/DetectionTestingViewWeb.py +10 -8
  6. contentctl/actions/initialize.py +28 -12
  7. contentctl/actions/inspect.py +191 -91
  8. contentctl/actions/new_content.py +10 -2
  9. contentctl/actions/validate.py +3 -6
  10. contentctl/api.py +1 -1
  11. contentctl/contentctl.py +3 -0
  12. contentctl/enrichments/attack_enrichment.py +49 -81
  13. contentctl/enrichments/cve_enrichment.py +6 -7
  14. contentctl/helper/splunk_app.py +141 -10
  15. contentctl/input/director.py +19 -24
  16. contentctl/input/new_content_questions.py +9 -42
  17. contentctl/objects/abstract_security_content_objects/detection_abstract.py +155 -13
  18. contentctl/objects/abstract_security_content_objects/security_content_object_abstract.py +17 -9
  19. contentctl/objects/atomic.py +51 -77
  20. contentctl/objects/base_test_result.py +7 -7
  21. contentctl/objects/baseline.py +12 -18
  22. contentctl/objects/baseline_tags.py +2 -5
  23. contentctl/objects/config.py +154 -26
  24. contentctl/objects/constants.py +34 -1
  25. contentctl/objects/correlation_search.py +79 -114
  26. contentctl/objects/dashboard.py +100 -0
  27. contentctl/objects/deployment.py +20 -5
  28. contentctl/objects/detection_metadata.py +71 -0
  29. contentctl/objects/detection_stanza.py +79 -0
  30. contentctl/objects/detection_tags.py +28 -26
  31. contentctl/objects/drilldown.py +70 -0
  32. contentctl/objects/enums.py +26 -24
  33. contentctl/objects/errors.py +187 -0
  34. contentctl/objects/investigation.py +23 -15
  35. contentctl/objects/investigation_tags.py +4 -3
  36. contentctl/objects/lookup.py +8 -1
  37. contentctl/objects/macro.py +16 -7
  38. contentctl/objects/notable_event.py +6 -5
  39. contentctl/objects/risk_analysis_action.py +4 -4
  40. contentctl/objects/risk_event.py +8 -7
  41. contentctl/objects/savedsearches_conf.py +196 -0
  42. contentctl/objects/story.py +4 -16
  43. contentctl/objects/throttling.py +46 -0
  44. contentctl/output/conf_output.py +4 -0
  45. contentctl/output/conf_writer.py +24 -4
  46. contentctl/output/new_content_yml_output.py +4 -9
  47. contentctl/output/templates/analyticstories_detections.j2 +2 -2
  48. contentctl/output/templates/analyticstories_investigations.j2 +5 -5
  49. contentctl/output/templates/analyticstories_stories.j2 +1 -1
  50. contentctl/output/templates/savedsearches_baselines.j2 +2 -3
  51. contentctl/output/templates/savedsearches_detections.j2 +12 -7
  52. contentctl/output/templates/savedsearches_investigations.j2 +3 -4
  53. contentctl/templates/detections/endpoint/anomalous_usage_of_7zip.yml +10 -1
  54. {contentctl-4.3.4.dist-info → contentctl-4.4.0.dist-info}/METADATA +6 -5
  55. {contentctl-4.3.4.dist-info → contentctl-4.4.0.dist-info}/RECORD +58 -57
  56. {contentctl-4.3.4.dist-info → contentctl-4.4.0.dist-info}/WHEEL +1 -1
  57. contentctl/objects/ssa_detection.py +0 -157
  58. contentctl/objects/ssa_detection_tags.py +0 -138
  59. contentctl/objects/unit_test_old.py +0 -10
  60. contentctl/objects/unit_test_ssa.py +0 -31
  61. contentctl/output/templates/finding_report.j2 +0 -30
  62. {contentctl-4.3.4.dist-info → contentctl-4.4.0.dist-info}/LICENSE.md +0 -0
  63. {contentctl-4.3.4.dist-info → contentctl-4.4.0.dist-info}/entry_points.txt +0 -0
@@ -1,78 +1,86 @@
1
1
  import sys
2
-
3
-
4
2
  from dataclasses import dataclass
5
-
6
3
  import pathlib
7
4
  import json
8
5
  import datetime
6
+ import timeit
7
+ import time
9
8
 
10
-
11
- from contentctl.objects.config import inspect
12
9
  from requests import Session, post, get
13
10
  from requests.auth import HTTPBasicAuth
14
- import timeit
15
- import time
11
+
12
+ from contentctl.objects.config import inspect
13
+ from contentctl.objects.savedsearches_conf import SavedsearchesConf
14
+ from contentctl.objects.errors import (
15
+ MetadataValidationError,
16
+ DetectionIDError,
17
+ DetectionMissingError,
18
+ VersionDecrementedError,
19
+ VersionBumpingError
20
+ )
21
+
22
+
16
23
  @dataclass(frozen=True)
17
24
  class InspectInputDto:
18
- config:inspect
25
+ config: inspect
19
26
 
20
27
 
21
28
  class Inspect:
22
29
 
23
30
  def execute(self, config: inspect) -> str:
24
- if config.build_app or config.build_api:
25
-
31
+ if config.build_app or config.build_api:
32
+
26
33
  self.inspectAppCLI(config)
27
34
  appinspect_token = self.inspectAppAPI(config)
28
-
29
-
35
+
36
+ if config.enable_metadata_validation:
37
+ self.check_detection_metadata(config)
38
+ else:
39
+ print("🟡 Detection metadata validation disabled, skipping.")
40
+
30
41
  return appinspect_token
31
42
 
32
43
  else:
33
- raise Exception("Inspect only supported for app and api build targets")
34
-
35
- def getElapsedTime(self, startTime:float)->datetime.timedelta:
36
- return datetime.timedelta(seconds=round(timeit.default_timer() - startTime))
44
+ raise Exception("Inspect only supported for app and api build targets")
37
45
 
46
+ def getElapsedTime(self, startTime: float) -> datetime.timedelta:
47
+ return datetime.timedelta(seconds=round(timeit.default_timer() - startTime))
38
48
 
39
- def inspectAppAPI(self, config: inspect)->str:
49
+ def inspectAppAPI(self, config: inspect) -> str:
40
50
  session = Session()
41
51
  session.auth = HTTPBasicAuth(config.splunk_api_username, config.splunk_api_password)
42
52
  if config.stack_type not in ['victoria', 'classic']:
43
53
  raise Exception(f"stack_type MUST be either 'classic' or 'victoria', NOT '{config.stack_type}'")
44
-
54
+
45
55
  APPINSPECT_API_LOGIN = "https://api.splunk.com/2.0/rest/login/splunk"
46
-
47
-
48
-
56
+
49
57
  res = session.get(APPINSPECT_API_LOGIN)
50
- #If login failed or other failure, raise an exception
58
+ # If login failed or other failure, raise an exception
51
59
  res.raise_for_status()
52
-
53
- authorization_bearer = res.json().get("data",{}).get("token",None)
60
+
61
+ authorization_bearer = res.json().get("data", {}).get("token", None)
54
62
  APPINSPECT_API_VALIDATION_REQUEST = "https://appinspect.splunk.com/v1/app/validate"
55
63
  headers = {
56
64
  "Authorization": f"bearer {authorization_bearer}",
57
65
  "Cache-Control": "no-cache"
58
66
  }
59
-
67
+
60
68
  package_path = config.getPackageFilePath(include_version=False)
61
69
  if not package_path.is_file():
62
70
  raise Exception(f"Cannot run Appinspect API on App '{config.app.title}' - "
63
71
  f"no package exists as expected path '{package_path}'.\nAre you "
64
72
  "trying to 'contentctl deploy_acs' the package BEFORE running 'contentctl build'?")
65
-
73
+
66
74
  files = {
67
- "app_package": open(package_path,"rb"),
68
- "included_tags":(None,"cloud")
69
- }
70
-
75
+ "app_package": open(package_path, "rb"),
76
+ "included_tags": (None, "cloud")
77
+ }
78
+
71
79
  res = post(APPINSPECT_API_VALIDATION_REQUEST, headers=headers, files=files)
72
80
 
73
81
  res.raise_for_status()
74
82
 
75
- request_id = res.json().get("request_id",None)
83
+ request_id = res.json().get("request_id", None)
76
84
  APPINSPECT_API_VALIDATION_STATUS = f"https://appinspect.splunk.com/v1/app/validate/status/{request_id}?included_tags=private_{config.stack_type}"
77
85
  headers = headers = {
78
86
  "Authorization": f"bearer {authorization_bearer}"
@@ -83,10 +91,10 @@ class Inspect:
83
91
  # checking many times when we know it will take at least 40 seconds to run.
84
92
  iteration_wait_time = 40
85
93
  while True:
86
-
94
+
87
95
  res = get(APPINSPECT_API_VALIDATION_STATUS, headers=headers)
88
96
  res.raise_for_status()
89
- status = res.json().get("status",None)
97
+ status = res.json().get("status", None)
90
98
  if status in ["PROCESSING", "PREPARING"]:
91
99
  print(f"[{self.getElapsedTime(startTime)}] Appinspect API is {status}...")
92
100
  time.sleep(iteration_wait_time)
@@ -97,12 +105,10 @@ class Inspect:
97
105
  break
98
106
  else:
99
107
  raise Exception(f"Error - Unknown Appinspect API status '{status}'")
100
-
101
-
102
108
 
103
- #We have finished running appinspect, so get the report
109
+ # We have finished running appinspect, so get the report
104
110
  APPINSPECT_API_REPORT = f"https://appinspect.splunk.com/v1/app/report/{request_id}?included_tags=private_{config.stack_type}"
105
- #Get human-readable HTML report
111
+ # Get human-readable HTML report
106
112
  headers = headers = {
107
113
  "Authorization": f"bearer {authorization_bearer}",
108
114
  "Content-Type": "text/html"
@@ -110,8 +116,8 @@ class Inspect:
110
116
  res = get(APPINSPECT_API_REPORT, headers=headers)
111
117
  res.raise_for_status()
112
118
  report_html = res.content
113
-
114
- #Get JSON report for processing
119
+
120
+ # Get JSON report for processing
115
121
  headers = headers = {
116
122
  "Authorization": f"bearer {authorization_bearer}",
117
123
  "Content-Type": "application/json"
@@ -119,33 +125,31 @@ class Inspect:
119
125
  res = get(APPINSPECT_API_REPORT, headers=headers)
120
126
  res.raise_for_status()
121
127
  report_json = res.json()
122
-
128
+
123
129
  # Just get app path here to avoid long function calls in the open() calls below
124
130
  appPath = config.getPackageFilePath(include_version=True)
125
131
  appinpect_html_path = appPath.with_suffix(appPath.suffix+".appinspect_api_results.html")
126
132
  appinspect_json_path = appPath.with_suffix(appPath.suffix+".appinspect_api_results.json")
127
- #Use the full path of the app, but update the suffix to include info about appinspect
133
+ # Use the full path of the app, but update the suffix to include info about appinspect
128
134
  with open(appinpect_html_path, "wb") as report:
129
135
  report.write(report_html)
130
136
  with open(appinspect_json_path, "w") as report:
131
137
  json.dump(report_json, report)
132
-
133
-
138
+
134
139
  self.parseAppinspectJsonLogFile(appinspect_json_path)
135
-
136
140
 
137
141
  return authorization_bearer
138
-
139
-
140
- def inspectAppCLI(self, config:inspect)-> None:
141
-
142
+
143
+ def inspectAppCLI(self, config: inspect) -> None:
142
144
  try:
143
- raise Exception("Local spunk-appinspect Not Supported at this time (you may use the appinspect api). If you would like to locally inspect your app with"
144
- "Python 3.7, 3.8, or 3.9 (with limited support), please refer to:\n"
145
- "\t - https://dev.splunk.com/enterprise/docs/developapps/testvalidate/appinspect/useappinspectclitool/")
145
+ raise Exception(
146
+ "Local spunk-appinspect Not Supported at this time (you may use the appinspect api). If you would like to locally inspect your app with"
147
+ "Python 3.7, 3.8, or 3.9 (with limited support), please refer to:\n"
148
+ "\t - https://dev.splunk.com/enterprise/docs/developapps/testvalidate/appinspect/useappinspectclitool/"
149
+ )
146
150
  from splunk_appinspect.main import (
147
- validate, MODE_OPTION, APP_PACKAGE_ARGUMENT, OUTPUT_FILE_OPTION,
148
- LOG_FILE_OPTION, INCLUDED_TAGS_OPTION, EXCLUDED_TAGS_OPTION,
151
+ validate, MODE_OPTION, APP_PACKAGE_ARGUMENT, OUTPUT_FILE_OPTION,
152
+ LOG_FILE_OPTION, INCLUDED_TAGS_OPTION, EXCLUDED_TAGS_OPTION,
149
153
  PRECERT_MODE, TEST_MODE)
150
154
  except Exception as e:
151
155
  print(e)
@@ -153,19 +157,19 @@ class Inspect:
153
157
  # if sys.version_info.major == 3 and sys.version_info.minor > 9:
154
158
  # print("The package splunk-appinspect was not installed due to a current issue with the library on Python3.10+. "
155
159
  # "Please use the following commands to set up a virtualenvironment in a different folder so you may run appinspect manually (if desired):"
156
- # "\n\tpython3.9 -m venv .venv"
160
+ # "\n\tpython3.9 -m venv .venv"
157
161
  # "\n\tsource .venv/bin/activate"
158
162
  # "\n\tpython3 -m pip install splunk-appinspect"
159
- # f"\n\tsplunk-appinspect inspect {self.getPackagePath(include_version=False).relative_to(pathlib.Path('.').absolute())} --mode precert")
160
-
163
+ # f"\n\tsplunk-appinspect inspect {self.getPackagePath(include_version=False).relative_to(pathlib.Path('.').absolute())} --mode precert")
164
+
161
165
  # else:
162
166
  # print("splunk-appinspect is only compatable with Python3.9 at this time. Please see the following open issue here: https://github.com/splunk/contentctl/issues/28")
163
167
  # print("******WARNING******")
164
168
  return
165
169
 
166
170
  # Note that all tags are available and described here:
167
- # https://dev.splunk.com/enterprise/reference/appinspect/appinspecttagreference/
168
- # By default, precert mode will run ALL checks. Explicitly included or excluding tags will
171
+ # https://dev.splunk.com/enterprise/reference/appinspect/appinspecttagreference/
172
+ # By default, precert mode will run ALL checks. Explicitly included or excluding tags will
169
173
  # change this behavior. To give the most thorough inspection, we leave these empty so that
170
174
  # ALL checks are run
171
175
  included_tags = []
@@ -179,82 +183,178 @@ class Inspect:
179
183
  options_list += [MODE_OPTION, TEST_MODE]
180
184
  options_list += [OUTPUT_FILE_OPTION, str(appinspect_output)]
181
185
  options_list += [LOG_FILE_OPTION, str(appinspect_logging)]
182
-
183
- #If there are any tags defined, then include them here
186
+
187
+ # If there are any tags defined, then include them here
184
188
  for opt in included_tags:
185
189
  options_list += [INCLUDED_TAGS_OPTION, opt]
186
190
  for opt in excluded_tags:
187
191
  options_list += [EXCLUDED_TAGS_OPTION, opt]
188
192
 
189
- cmdline = options_list + [arg[1] for arg in arguments_list]
193
+ cmdline = options_list + [arg[1] for arg in arguments_list]
190
194
  validate(cmdline)
191
-
195
+
192
196
  except SystemExit as e:
193
197
  if e.code == 0:
194
198
  # The sys.exit called inside of appinspect validate closes stdin. We need to
195
199
  # reopen it.
196
- sys.stdin = open("/dev/stdin","r")
200
+ sys.stdin = open("/dev/stdin", "r")
197
201
  print(f"AppInspect passed! Please check [ {appinspect_output} , {appinspect_logging} ] for verbose information.")
198
202
  else:
199
203
  if sys.version.startswith('3.11') or sys.version.startswith('3.12'):
200
- raise Exception("At this time, AppInspect may fail on valid apps under Python>=3.11 with "
201
- "the error 'global flags not at the start of the expression at position 1'. "
204
+ raise Exception("At this time, AppInspect may fail on valid apps under Python>=3.11 with "
205
+ "the error 'global flags not at the start of the expression at position 1'. "
202
206
  "If you encounter this error, please run AppInspect on a version of Python "
203
207
  "<3.11. This issue is currently tracked. Please review the appinspect "
204
208
  "report output above for errors.")
205
- else:
206
- raise Exception("AppInspect Failure - Please review the appinspect report output above for errors.")
209
+ else:
210
+ raise Exception("AppInspect Failure - Please review the appinspect report output above for errors.")
207
211
  finally:
208
- # appinspect outputs the log in json format, but does not format it to be easier
209
- # to read (it is all in one line). Read back that file and write it so it
210
- # is easier to understand
211
-
212
- #Note that this may raise an exception itself!
213
- self.parseAppinspectJsonLogFile(appinspect_output)
214
-
215
- def parseAppinspectJsonLogFile(self, logfile_path:pathlib.Path,
216
- status_types:list[str] = ["error", "failure", "manual_check", "warning"],
217
- exception_types = ["error","failure","manual_check"] )->None:
212
+ # appinspect outputs the log in json format, but does not format it to be easier
213
+ # to read (it is all in one line). Read back that file and write it so it
214
+ # is easier to understand
215
+
216
+ # Note that this may raise an exception itself!
217
+ self.parseAppinspectJsonLogFile(appinspect_output)
218
+
219
+ def parseAppinspectJsonLogFile(
220
+ self,
221
+ logfile_path: pathlib.Path,
222
+ status_types: list[str] = ["error", "failure", "manual_check", "warning"],
223
+ exception_types: list[str] = ["error", "failure", "manual_check"]
224
+ ) -> None:
218
225
  if not set(exception_types).issubset(set(status_types)):
219
- raise Exception(f"Error - exception_types {exception_types} MUST be a subset of status_types {status_types}, but it is not")
226
+ raise Exception(f"Error - exception_types {exception_types} MUST be a subset of status_types {status_types}, but it is not")
220
227
  with open(logfile_path, "r+") as logfile:
221
228
  j = json.load(logfile)
222
- #Seek back to the beginning of the file. We don't need to clear
223
- #it sice we will always write AT LEAST the same number of characters
224
- #back as we read (due to the addition of whitespace)
229
+ # Seek back to the beginning of the file. We don't need to clear
230
+ # it sice we will always write AT LEAST the same number of characters
231
+ # back as we read (due to the addition of whitespace)
225
232
  logfile.seek(0)
226
233
  json.dump(j, logfile, indent=3, )
227
-
234
+
228
235
  reports = j.get("reports", [])
229
236
  if len(reports) != 1:
230
237
  raise Exception("Expected to find one appinspect report but found 0")
231
238
  verbose_errors = []
232
-
239
+
233
240
  for group in reports[0].get("groups", []):
234
- for check in group.get("checks",[]):
235
- if check.get("result","") in status_types:
241
+ for check in group.get("checks", []):
242
+ if check.get("result", "") in status_types:
236
243
  verbose_errors.append(f" - {check.get('result','')} [{group.get('name','NONAME')}: {check.get('name', 'NONAME')}]")
237
244
  verbose_errors.sort()
238
-
245
+
239
246
  summary = j.get("summary", None)
240
247
  if summary is None:
241
248
  raise Exception("Missing summary from appinspect report")
242
249
  msgs = []
243
250
  generated_exception = False
244
251
  for key in status_types:
245
- if summary.get(key,0)>0:
252
+ if summary.get(key, 0) > 0:
246
253
  msgs.append(f" - {summary.get(key,0)} {key}s")
247
254
  if key in exception_types:
248
255
  generated_exception = True
249
- if len(msgs)>0 or len(verbose_errors):
256
+ if len(msgs) > 0 or len(verbose_errors):
250
257
  summary = '\n'.join(msgs)
251
258
  details = '\n'.join(verbose_errors)
252
259
  summary = f"{summary}\nDetails:\n{details}"
253
260
  if generated_exception:
254
- raise Exception(f"AppInspect found [{','.join(exception_types)}] that MUST be addressed to pass AppInspect API:\n{summary}")
261
+ raise Exception(f"AppInspect found [{','.join(exception_types)}] that MUST be addressed to pass AppInspect API:\n{summary}")
255
262
  else:
256
- print(f"AppInspect found [{','.join(status_types)}] that MAY cause a failure during AppInspect API:\n{summary}")
263
+ print(f"AppInspect found [{','.join(status_types)}] that MAY cause a failure during AppInspect API:\n{summary}")
257
264
  else:
258
265
  print("AppInspect was successful!")
259
-
266
+
260
267
  return
268
+
269
+ def check_detection_metadata(self, config: inspect) -> None:
270
+ """
271
+ Using a previous build, compare the savedsearches.conf files to detect any issues w/
272
+ detection metadata. **NOTE**: Detection metadata validation can only be performed between
273
+ two builds with theappropriate metadata structure. In ESCU, this was added as of release
274
+ v4.39.0, so all current and previous builds for use with this feature must be this version
275
+ or greater.
276
+
277
+ :param config: an inspect config
278
+ :type config: :class:`contentctl.objects.config.inspect`
279
+ """
280
+ # TODO (#282): We should be inspect the same artifact we're passing around from the
281
+ # build stage ideally
282
+ # Unpack the savedsearch.conf of each app package
283
+ current_build_conf = SavedsearchesConf.init_from_package(
284
+ package_path=config.getPackageFilePath(include_version=False),
285
+ app_name=config.app.label,
286
+ appid=config.app.appid
287
+ )
288
+ previous_build_conf = SavedsearchesConf.init_from_package(
289
+ package_path=config.get_previous_package_file_path(),
290
+ app_name=config.app.label,
291
+ appid=config.app.appid
292
+ )
293
+
294
+ # Compare the conf files
295
+ validation_errors: dict[str, list[MetadataValidationError]] = {}
296
+ for rule_name in previous_build_conf.detection_stanzas:
297
+ validation_errors[rule_name] = []
298
+ # No detections should be removed from build to build
299
+ if rule_name not in current_build_conf.detection_stanzas:
300
+ if config.suppress_missing_content_exceptions:
301
+ print(f"[SUPPRESSED] {DetectionMissingError(rule_name=rule_name).long_message}")
302
+ else:
303
+ validation_errors[rule_name].append(DetectionMissingError(rule_name=rule_name))
304
+ continue
305
+ # Pull out the individual stanza for readability
306
+ previous_stanza = previous_build_conf.detection_stanzas[rule_name]
307
+ current_stanza = current_build_conf.detection_stanzas[rule_name]
308
+
309
+ # Detection IDs should not change
310
+ if current_stanza.metadata.detection_id != previous_stanza.metadata.detection_id:
311
+ validation_errors[rule_name].append(
312
+ DetectionIDError(
313
+ rule_name=rule_name,
314
+ current_id=current_stanza.metadata.detection_id,
315
+ previous_id=previous_stanza.metadata.detection_id
316
+ )
317
+ )
318
+
319
+ # Versions should never decrement in successive builds
320
+ if current_stanza.metadata.detection_version < previous_stanza.metadata.detection_version:
321
+ validation_errors[rule_name].append(
322
+ VersionDecrementedError(
323
+ rule_name=rule_name,
324
+ current_version=current_stanza.metadata.detection_version,
325
+ previous_version=previous_stanza.metadata.detection_version
326
+ )
327
+ )
328
+
329
+ # Versions need to be bumped if the stanza changes at all
330
+ if current_stanza.version_should_be_bumped(previous_stanza):
331
+ validation_errors[rule_name].append(
332
+ VersionBumpingError(
333
+ rule_name=rule_name,
334
+ current_version=current_stanza.metadata.detection_version,
335
+ previous_version=previous_stanza.metadata.detection_version
336
+ )
337
+ )
338
+
339
+ # Convert our dict mapping to a flat list of errors for use in reporting
340
+ validation_error_list = [x for inner_list in validation_errors.values() for x in inner_list]
341
+
342
+ # Report failure/success
343
+ print("\nDetection Metadata Validation:")
344
+ if len(validation_error_list) > 0:
345
+ # Iterate over each rule and report the failures
346
+ for rule_name in validation_errors:
347
+ if len(validation_errors[rule_name]) > 0:
348
+ print(f"\t❌ {rule_name}")
349
+ for error in validation_errors[rule_name]:
350
+ print(f"\t\t🔸 {error.short_message}")
351
+ else:
352
+ # If no errors in the list, report success
353
+ print("\t✅ Detection metadata looks good and all versions were bumped appropriately :)")
354
+
355
+ # Raise an ExceptionGroup for all validation issues
356
+ if len(validation_error_list) > 0:
357
+ raise ExceptionGroup(
358
+ "Validation errors when comparing detection stanzas in current and previous build:",
359
+ validation_error_list
360
+ )
@@ -16,7 +16,11 @@ class NewContent:
16
16
 
17
17
  def buildDetection(self)->dict[str,Any]:
18
18
  questions = NewContentQuestions.get_questions_detection()
19
- answers = questionary.prompt(questions)
19
+ answers: dict[str,str] = questionary.prompt(
20
+ questions,
21
+ kbi_msg="User did not answer all of the prompt questions. Exiting...")
22
+ if not answers:
23
+ raise ValueError("User didn't answer one or more questions!")
20
24
  answers.update(answers)
21
25
  answers['name'] = answers['detection_name']
22
26
  del answers['detection_name']
@@ -70,7 +74,11 @@ class NewContent:
70
74
 
71
75
  def buildStory(self)->dict[str,Any]:
72
76
  questions = NewContentQuestions.get_questions_story()
73
- answers = questionary.prompt(questions)
77
+ answers = questionary.prompt(
78
+ questions,
79
+ kbi_msg="User did not answer all of the prompt questions. Exiting...")
80
+ if not answers:
81
+ raise ValueError("User didn't answer one or more questions!")
74
82
  answers['name'] = answers['story_name']
75
83
  del answers['story_name']
76
84
  answers['id'] = str(uuid.uuid4())
@@ -5,7 +5,7 @@ from contentctl.input.director import Director, DirectorOutputDto
5
5
  from contentctl.objects.config import validate
6
6
  from contentctl.enrichments.attack_enrichment import AttackEnrichment
7
7
  from contentctl.enrichments.cve_enrichment import CveEnrichment
8
- from contentctl.objects.atomic import AtomicTest
8
+ from contentctl.objects.atomic import AtomicEnrichment
9
9
  from contentctl.helper.utils import Utils
10
10
  from contentctl.objects.data_source import DataSource
11
11
  from contentctl.helper.splunk_app import SplunkApp
@@ -13,12 +13,8 @@ from contentctl.helper.splunk_app import SplunkApp
13
13
 
14
14
  class Validate:
15
15
  def execute(self, input_dto: validate) -> DirectorOutputDto:
16
-
17
16
  director_output_dto = DirectorOutputDto(
18
- AtomicTest.getAtomicTestsFromArtRepo(
19
- repo_path=input_dto.getAtomicRedTeamRepoPath(),
20
- enabled=input_dto.enrichments,
21
- ),
17
+ AtomicEnrichment.getAtomicEnrichment(input_dto),
22
18
  AttackEnrichment.getAttackEnrichment(input_dto),
23
19
  CveEnrichment.getCveEnrichment(input_dto),
24
20
  [],
@@ -30,6 +26,7 @@ class Validate:
30
26
  [],
31
27
  [],
32
28
  [],
29
+ []
33
30
  )
34
31
 
35
32
  director = Director(director_output_dto)
contentctl/api.py CHANGED
@@ -126,7 +126,7 @@ def update_config(config:Union[test,test_servers], **key_value_updates:dict[str,
126
126
  def content_to_dict(director:DirectorOutputDto)->dict[str,list[dict[str,Any]]]:
127
127
  output_dict:dict[str,list[dict[str,Any]]] = {}
128
128
  for contentType in ['detections','stories','baselines','investigations',
129
- 'playbooks','macros','lookups','deployments','ssa_detections']:
129
+ 'playbooks','macros','lookups','deployments',]:
130
130
 
131
131
  output_dict[contentType] = []
132
132
  t:list[SecurityContentObject] = getattr(director,contentType)
contentctl/contentctl.py CHANGED
@@ -211,6 +211,9 @@ def main():
211
211
  test_common_func(config)
212
212
  else:
213
213
  raise Exception(f"Unknown command line type '{type(config).__name__}'")
214
+ except FileNotFoundError as e:
215
+ print(e)
216
+ sys.exit(1)
214
217
  except Exception as e:
215
218
  if config is None:
216
219
  print("There was a serious issue where the config file could not be created.\n"