cyvest 4.4.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of cyvest might be problematic. Click here for more details.
- cyvest/__init__.py +38 -0
- cyvest/cli.py +365 -0
- cyvest/cyvest.py +1261 -0
- cyvest/investigation.py +1644 -0
- cyvest/io_rich.py +579 -0
- cyvest/io_schema.py +35 -0
- cyvest/io_serialization.py +459 -0
- cyvest/io_visualization.py +358 -0
- cyvest/keys.py +194 -0
- cyvest/level_score_rules.py +78 -0
- cyvest/levels.py +175 -0
- cyvest/model.py +583 -0
- cyvest/model_enums.py +69 -0
- cyvest/model_schema.py +164 -0
- cyvest/proxies.py +582 -0
- cyvest/score.py +473 -0
- cyvest/shared.py +496 -0
- cyvest/stats.py +316 -0
- cyvest/ulid.py +36 -0
- cyvest-4.4.0.dist-info/METADATA +538 -0
- cyvest-4.4.0.dist-info/RECORD +23 -0
- cyvest-4.4.0.dist-info/WHEEL +4 -0
- cyvest-4.4.0.dist-info/entry_points.txt +3 -0
cyvest/cyvest.py
ADDED
|
@@ -0,0 +1,1261 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Cyvest facade - high-level API for building cybersecurity investigations.
|
|
3
|
+
|
|
4
|
+
Provides a simplified interface for creating and managing investigation objects,
|
|
5
|
+
handling score propagation, and generating reports.
|
|
6
|
+
|
|
7
|
+
Includes JSON/Markdown export (io_save_json, io_save_markdown), import (io_load_json),
|
|
8
|
+
and investigation export (io_to_invest, io_to_markdown) methods.
|
|
9
|
+
"""
|
|
10
|
+
|
|
11
|
+
from __future__ import annotations
|
|
12
|
+
|
|
13
|
+
import threading
|
|
14
|
+
from collections.abc import Iterable
|
|
15
|
+
from decimal import Decimal
|
|
16
|
+
from pathlib import Path
|
|
17
|
+
from typing import TYPE_CHECKING, Any, Final, Literal, overload
|
|
18
|
+
|
|
19
|
+
from logurich import logger
|
|
20
|
+
|
|
21
|
+
from cyvest import keys
|
|
22
|
+
from cyvest.investigation import Investigation, InvestigationWhitelist
|
|
23
|
+
from cyvest.io_rich import display_statistics, display_summary
|
|
24
|
+
from cyvest.io_serialization import (
|
|
25
|
+
generate_markdown_report,
|
|
26
|
+
load_investigation_json,
|
|
27
|
+
save_investigation_json,
|
|
28
|
+
save_investigation_markdown,
|
|
29
|
+
serialize_investigation,
|
|
30
|
+
)
|
|
31
|
+
from cyvest.levels import Level
|
|
32
|
+
from cyvest.model import Check, Container, Enrichment, Observable, Taxonomy, ThreatIntel
|
|
33
|
+
from cyvest.model_enums import ObservableType, PropagationMode, RelationshipDirection, RelationshipType
|
|
34
|
+
from cyvest.model_schema import InvestigationSchema, StatisticsSchema
|
|
35
|
+
from cyvest.proxies import CheckProxy, ContainerProxy, EnrichmentProxy, ObservableProxy, ThreatIntelProxy
|
|
36
|
+
from cyvest.score import ScoreMode
|
|
37
|
+
|
|
38
|
+
if TYPE_CHECKING:
|
|
39
|
+
from cyvest.shared import SharedInvestigationContext
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
class Cyvest:
|
|
43
|
+
"""
|
|
44
|
+
High-level facade for building and managing cybersecurity investigations.
|
|
45
|
+
|
|
46
|
+
Provides methods for creating observables, checks, threat intel, enrichments,
|
|
47
|
+
and containers, with automatic score propagation and statistics tracking.
|
|
48
|
+
"""
|
|
49
|
+
|
|
50
|
+
OBS: Final[type[ObservableType]] = ObservableType
|
|
51
|
+
REL: Final[type[RelationshipType]] = RelationshipType
|
|
52
|
+
DIR: Final[type[RelationshipDirection]] = RelationshipDirection
|
|
53
|
+
PROP: Final[type[PropagationMode]] = PropagationMode
|
|
54
|
+
LVL: Final[type[Level]] = Level
|
|
55
|
+
|
|
56
|
+
def __init__(
|
|
57
|
+
self,
|
|
58
|
+
root_data: Any = None,
|
|
59
|
+
root_type: ObservableType | Literal["file", "artifact"] = ObservableType.FILE,
|
|
60
|
+
score_mode_obs: ScoreMode = ScoreMode.MAX,
|
|
61
|
+
investigation_name: str | None = None,
|
|
62
|
+
investigation_id: str | None = None,
|
|
63
|
+
) -> None:
|
|
64
|
+
"""
|
|
65
|
+
Initialize a new investigation.
|
|
66
|
+
|
|
67
|
+
Args:
|
|
68
|
+
root_data: The data being investigated (optional)
|
|
69
|
+
root_type: Root observable type (ObservableType.FILE or ObservableType.ARTIFACT)
|
|
70
|
+
score_mode_obs: Observable score calculation mode (MAX or SUM)
|
|
71
|
+
investigation_name: Optional human-readable investigation name
|
|
72
|
+
investigation_id: Optional deterministic investigation ID (auto-generated ULID if not provided)
|
|
73
|
+
"""
|
|
74
|
+
self._investigation = Investigation(
|
|
75
|
+
root_data,
|
|
76
|
+
root_type=root_type,
|
|
77
|
+
score_mode_obs=score_mode_obs,
|
|
78
|
+
investigation_name=investigation_name,
|
|
79
|
+
investigation_id=investigation_id,
|
|
80
|
+
)
|
|
81
|
+
|
|
82
|
+
@staticmethod
|
|
83
|
+
def io_load_json(filepath: str | Path) -> Cyvest:
|
|
84
|
+
"""
|
|
85
|
+
Load an investigation from a JSON file.
|
|
86
|
+
|
|
87
|
+
Args:
|
|
88
|
+
filepath: Path to the JSON file (relative or absolute)
|
|
89
|
+
|
|
90
|
+
Returns:
|
|
91
|
+
Reconstructed Cyvest investigation
|
|
92
|
+
|
|
93
|
+
Raises:
|
|
94
|
+
FileNotFoundError: If the file does not exist
|
|
95
|
+
json.JSONDecodeError: If the file contains invalid JSON
|
|
96
|
+
Exception: For other file-related errors
|
|
97
|
+
|
|
98
|
+
Example:
|
|
99
|
+
>>> cv = Cyvest.io_load_json("investigation.json")
|
|
100
|
+
>>> cv = Cyvest.io_load_json("/absolute/path/to/investigation.json")
|
|
101
|
+
"""
|
|
102
|
+
return load_investigation_json(filepath)
|
|
103
|
+
|
|
104
|
+
def shared_context(
|
|
105
|
+
self,
|
|
106
|
+
*,
|
|
107
|
+
lock: threading.RLock | None = None,
|
|
108
|
+
max_async_workers: int | None = None,
|
|
109
|
+
) -> SharedInvestigationContext:
|
|
110
|
+
"""
|
|
111
|
+
Create a SharedInvestigationContext tied to this Cyvest instance.
|
|
112
|
+
|
|
113
|
+
Args:
|
|
114
|
+
lock: Optional shared lock for advanced synchronization scenarios.
|
|
115
|
+
max_async_workers: Optional limit for concurrent async reconciliation workers.
|
|
116
|
+
"""
|
|
117
|
+
from cyvest.shared import SharedInvestigationContext
|
|
118
|
+
|
|
119
|
+
return SharedInvestigationContext(self, lock=lock, max_async_workers=max_async_workers)
|
|
120
|
+
|
|
121
|
+
# Internal helpers -------------------------------------------------
|
|
122
|
+
|
|
123
|
+
def _observable_proxy(self, observable: Observable | None) -> ObservableProxy | None:
|
|
124
|
+
if observable is None:
|
|
125
|
+
return None
|
|
126
|
+
return ObservableProxy(self._investigation, observable.key)
|
|
127
|
+
|
|
128
|
+
def _check_proxy(self, check: Check | None) -> CheckProxy | None:
|
|
129
|
+
if check is None:
|
|
130
|
+
return None
|
|
131
|
+
return CheckProxy(self._investigation, check.key)
|
|
132
|
+
|
|
133
|
+
def _container_proxy(self, container: Container | None) -> ContainerProxy | None:
|
|
134
|
+
if container is None:
|
|
135
|
+
return None
|
|
136
|
+
return ContainerProxy(self._investigation, container.key)
|
|
137
|
+
|
|
138
|
+
def _threat_intel_proxy(self, ti: ThreatIntel | None) -> ThreatIntelProxy | None:
|
|
139
|
+
if ti is None:
|
|
140
|
+
return None
|
|
141
|
+
return ThreatIntelProxy(self._investigation, ti.key)
|
|
142
|
+
|
|
143
|
+
def _enrichment_proxy(self, enrichment: Enrichment | None) -> EnrichmentProxy | None:
|
|
144
|
+
if enrichment is None:
|
|
145
|
+
return None
|
|
146
|
+
return EnrichmentProxy(self._investigation, enrichment.key)
|
|
147
|
+
|
|
148
|
+
@staticmethod
|
|
149
|
+
def _resolve_key(value: Observable | ObservableProxy | str) -> str:
|
|
150
|
+
if isinstance(value, str):
|
|
151
|
+
return value
|
|
152
|
+
if isinstance(value, (Observable, ObservableProxy)):
|
|
153
|
+
return value.key
|
|
154
|
+
raise TypeError("Expected an observable key, ObservableProxy, or Observable instance.")
|
|
155
|
+
|
|
156
|
+
@staticmethod
|
|
157
|
+
def _resolve_threat_intel_key(value: ThreatIntel | ThreatIntelProxy | str) -> str:
|
|
158
|
+
if isinstance(value, str):
|
|
159
|
+
return value
|
|
160
|
+
if isinstance(value, (ThreatIntel, ThreatIntelProxy)):
|
|
161
|
+
return value.key
|
|
162
|
+
raise TypeError("Expected a threat intel key, ThreatIntelProxy, or ThreatIntel instance.")
|
|
163
|
+
|
|
164
|
+
def _require_observable(self, key: str) -> Observable:
|
|
165
|
+
observable = self._investigation.get_observable(key)
|
|
166
|
+
if observable is None:
|
|
167
|
+
raise KeyError(f"observable '{key}' not found in investigation.")
|
|
168
|
+
return observable
|
|
169
|
+
|
|
170
|
+
def _require_check(self, key: str) -> Check:
|
|
171
|
+
check = self._investigation.get_check(key)
|
|
172
|
+
if check is None:
|
|
173
|
+
raise KeyError(f"check '{key}' not found in investigation.")
|
|
174
|
+
return check
|
|
175
|
+
|
|
176
|
+
# Investigation-level helpers
|
|
177
|
+
|
|
178
|
+
def investigation_is_whitelisted(self) -> bool:
|
|
179
|
+
"""
|
|
180
|
+
Return whether the investigation is whitelisted/marked safe.
|
|
181
|
+
|
|
182
|
+
Examples:
|
|
183
|
+
>>> cv = Cyvest()
|
|
184
|
+
>>> cv.investigation_add_whitelist("id-1", "False positive", "Sandboxed sample")
|
|
185
|
+
>>> cv.investigation_is_whitelisted()
|
|
186
|
+
True
|
|
187
|
+
"""
|
|
188
|
+
return self._investigation.is_whitelisted()
|
|
189
|
+
|
|
190
|
+
def investigation_get_name(self) -> str | None:
|
|
191
|
+
"""Return the human-readable investigation name (if set)."""
|
|
192
|
+
return self._investigation.investigation_name
|
|
193
|
+
|
|
194
|
+
def investigation_set_name(self, name: str | None, reason: str | None = None) -> None:
|
|
195
|
+
"""Set or clear the human-readable investigation name."""
|
|
196
|
+
self._investigation.set_investigation_name(name, reason=reason)
|
|
197
|
+
|
|
198
|
+
def investigation_get_audit_log(self) -> tuple:
|
|
199
|
+
"""Return the investigation-level audit log."""
|
|
200
|
+
return tuple(self._investigation.get_audit_log())
|
|
201
|
+
|
|
202
|
+
def investigation_add_whitelist(
|
|
203
|
+
self, identifier: str, name: str, justification: str | None = None
|
|
204
|
+
) -> InvestigationWhitelist:
|
|
205
|
+
"""
|
|
206
|
+
Add or update a whitelist entry for the investigation.
|
|
207
|
+
|
|
208
|
+
Args:
|
|
209
|
+
identifier: Unique identifier for the whitelist entry.
|
|
210
|
+
name: Human-readable name.
|
|
211
|
+
justification: Optional markdown justification.
|
|
212
|
+
"""
|
|
213
|
+
return self._investigation.add_whitelist(identifier, name, justification)
|
|
214
|
+
|
|
215
|
+
def investigation_remove_whitelist(self, identifier: str) -> bool:
|
|
216
|
+
"""
|
|
217
|
+
Remove a whitelist entry by identifier.
|
|
218
|
+
|
|
219
|
+
Returns:
|
|
220
|
+
True if removed, False if the identifier was not present.
|
|
221
|
+
"""
|
|
222
|
+
return self._investigation.remove_whitelist(identifier)
|
|
223
|
+
|
|
224
|
+
def investigation_clear_whitelists(self) -> None:
|
|
225
|
+
"""Remove all whitelist entries."""
|
|
226
|
+
self._investigation.clear_whitelists()
|
|
227
|
+
|
|
228
|
+
def investigation_get_whitelists(self) -> tuple[InvestigationWhitelist, ...]:
|
|
229
|
+
"""
|
|
230
|
+
Get all whitelist entries.
|
|
231
|
+
|
|
232
|
+
Returns:
|
|
233
|
+
Tuple of whitelist entries.
|
|
234
|
+
"""
|
|
235
|
+
return tuple(self._investigation.get_whitelists())
|
|
236
|
+
|
|
237
|
+
# Observable methods
|
|
238
|
+
|
|
239
|
+
def observable_create(
|
|
240
|
+
self,
|
|
241
|
+
obs_type: ObservableType,
|
|
242
|
+
value: str,
|
|
243
|
+
internal: bool = False,
|
|
244
|
+
whitelisted: bool = False,
|
|
245
|
+
comment: str = "",
|
|
246
|
+
extra: dict[str, Any] | None = None,
|
|
247
|
+
score: Decimal | float | None = None,
|
|
248
|
+
level: Level | None = None,
|
|
249
|
+
) -> ObservableProxy:
|
|
250
|
+
"""
|
|
251
|
+
Create a new observable or return existing one.
|
|
252
|
+
|
|
253
|
+
Args:
|
|
254
|
+
obs_type: Type of observable
|
|
255
|
+
value: Value of the observable
|
|
256
|
+
internal: Whether this is an internal asset
|
|
257
|
+
whitelisted: Whether this is whitelisted
|
|
258
|
+
comment: Optional comment
|
|
259
|
+
extra: Optional extra data
|
|
260
|
+
score: Optional explicit score
|
|
261
|
+
level: Optional explicit level
|
|
262
|
+
|
|
263
|
+
Returns:
|
|
264
|
+
The created or existing observable
|
|
265
|
+
"""
|
|
266
|
+
obs_kwargs: dict[str, Any] = {
|
|
267
|
+
"obs_type": obs_type,
|
|
268
|
+
"value": value,
|
|
269
|
+
"internal": internal,
|
|
270
|
+
"whitelisted": whitelisted,
|
|
271
|
+
"comment": comment,
|
|
272
|
+
"extra": extra or {},
|
|
273
|
+
}
|
|
274
|
+
if score is not None:
|
|
275
|
+
obs_kwargs["score"] = Decimal(str(score))
|
|
276
|
+
if level is not None:
|
|
277
|
+
obs_kwargs["level"] = level
|
|
278
|
+
obs = Observable(**obs_kwargs)
|
|
279
|
+
# Unwrap tuple - facade returns only Observable, discards deferred relationships
|
|
280
|
+
obs_result, _ = self._investigation.add_observable(obs)
|
|
281
|
+
return self._observable_proxy(obs_result)
|
|
282
|
+
|
|
283
|
+
@overload
|
|
284
|
+
def observable_get(self, key: str) -> ObservableProxy | None:
|
|
285
|
+
"""Get an observable by full key string."""
|
|
286
|
+
...
|
|
287
|
+
|
|
288
|
+
@overload
|
|
289
|
+
def observable_get(self, obs_type: ObservableType, value: str) -> ObservableProxy | None:
|
|
290
|
+
"""Get an observable by type and value."""
|
|
291
|
+
...
|
|
292
|
+
|
|
293
|
+
def observable_get(self, *args, **kwargs) -> ObservableProxy | None:
|
|
294
|
+
"""
|
|
295
|
+
Get an observable by key or by type and value.
|
|
296
|
+
|
|
297
|
+
Args:
|
|
298
|
+
key: Observable key (single argument)
|
|
299
|
+
obs_type: Observable type (when using two arguments)
|
|
300
|
+
value: Observable value (when using two arguments)
|
|
301
|
+
|
|
302
|
+
Returns:
|
|
303
|
+
Observable if found, None otherwise
|
|
304
|
+
|
|
305
|
+
Raises:
|
|
306
|
+
ValueError: If arguments are invalid or key generation fails
|
|
307
|
+
"""
|
|
308
|
+
if kwargs:
|
|
309
|
+
if not args and set(kwargs) == {"key"}:
|
|
310
|
+
key = kwargs["key"]
|
|
311
|
+
elif not args and set(kwargs) == {"obs_type", "value"}:
|
|
312
|
+
obs_type = kwargs["obs_type"]
|
|
313
|
+
value = kwargs["value"]
|
|
314
|
+
try:
|
|
315
|
+
key = keys.generate_observable_key(obs_type.value, value)
|
|
316
|
+
except Exception as e:
|
|
317
|
+
raise ValueError(
|
|
318
|
+
f"Failed to generate observable key for type='{obs_type}', value='{value}': {e}"
|
|
319
|
+
) from e
|
|
320
|
+
else:
|
|
321
|
+
raise ValueError("observable_get() accepts either (key: str) or (obs_type: ObservableType, value: str)")
|
|
322
|
+
elif len(args) == 1:
|
|
323
|
+
key = args[0]
|
|
324
|
+
elif len(args) == 2:
|
|
325
|
+
obs_type, value = args
|
|
326
|
+
try:
|
|
327
|
+
key = keys.generate_observable_key(obs_type.value, value)
|
|
328
|
+
except Exception as e:
|
|
329
|
+
raise ValueError(
|
|
330
|
+
f"Failed to generate observable key for type='{obs_type}', value='{value}': {e}"
|
|
331
|
+
) from e
|
|
332
|
+
else:
|
|
333
|
+
raise ValueError("observable_get() accepts either (key: str) or (obs_type: ObservableType, value: str)")
|
|
334
|
+
return self._observable_proxy(self._investigation.get_observable(key))
|
|
335
|
+
|
|
336
|
+
def observable_get_root(self) -> ObservableProxy:
|
|
337
|
+
"""
|
|
338
|
+
Get the root observable.
|
|
339
|
+
|
|
340
|
+
Returns:
|
|
341
|
+
Root observable
|
|
342
|
+
"""
|
|
343
|
+
return self._observable_proxy(self._investigation.get_root())
|
|
344
|
+
|
|
345
|
+
def observable_get_all(self) -> dict[str, ObservableProxy]:
|
|
346
|
+
"""Get read-only proxies for all observables."""
|
|
347
|
+
return {
|
|
348
|
+
key: ObservableProxy(self._investigation, key) for key in self._investigation.get_all_observables().keys()
|
|
349
|
+
}
|
|
350
|
+
|
|
351
|
+
def observable_add_relationship(
|
|
352
|
+
self,
|
|
353
|
+
source: Observable | ObservableProxy | str,
|
|
354
|
+
target: Observable | ObservableProxy | str,
|
|
355
|
+
relationship_type: RelationshipType,
|
|
356
|
+
direction: RelationshipDirection | None = None,
|
|
357
|
+
) -> ObservableProxy:
|
|
358
|
+
"""
|
|
359
|
+
Add a relationship between observables.
|
|
360
|
+
|
|
361
|
+
Args:
|
|
362
|
+
source: Source observable or its key
|
|
363
|
+
target: Target observable or its key
|
|
364
|
+
relationship_type: Type of relationship
|
|
365
|
+
direction: Direction of the relationship (None = use semantic default for relationship type)
|
|
366
|
+
|
|
367
|
+
Returns:
|
|
368
|
+
The source observable
|
|
369
|
+
|
|
370
|
+
Raises:
|
|
371
|
+
KeyError: If the source or target observable does not exist
|
|
372
|
+
"""
|
|
373
|
+
source_key = self._resolve_key(source)
|
|
374
|
+
target_key = self._resolve_key(target)
|
|
375
|
+
result = self._investigation.add_relationship(source_key, target_key, relationship_type, direction)
|
|
376
|
+
return self._observable_proxy(result)
|
|
377
|
+
|
|
378
|
+
def observable_add_threat_intel(
|
|
379
|
+
self,
|
|
380
|
+
observable: Observable | ObservableProxy | str,
|
|
381
|
+
source: str,
|
|
382
|
+
score: Decimal | float,
|
|
383
|
+
comment: str = "",
|
|
384
|
+
extra: dict[str, Any] | None = None,
|
|
385
|
+
level: Level | None = None,
|
|
386
|
+
taxonomies: list[Taxonomy | dict[str, Any]] | None = None,
|
|
387
|
+
) -> ThreatIntelProxy:
|
|
388
|
+
"""
|
|
389
|
+
Add threat intelligence to an observable.
|
|
390
|
+
|
|
391
|
+
Args:
|
|
392
|
+
observable: Observable, ObservableProxy, or its key
|
|
393
|
+
source: Threat intel source name
|
|
394
|
+
score: Score from threat intel
|
|
395
|
+
comment: Optional comment
|
|
396
|
+
extra: Optional extra data
|
|
397
|
+
level: Optional explicit level
|
|
398
|
+
taxonomies: Optional taxonomies
|
|
399
|
+
|
|
400
|
+
Returns:
|
|
401
|
+
The created threat intel
|
|
402
|
+
|
|
403
|
+
Raises:
|
|
404
|
+
KeyError: If the observable does not exist
|
|
405
|
+
"""
|
|
406
|
+
observable_key = self._resolve_key(observable)
|
|
407
|
+
observable = self._require_observable(observable_key)
|
|
408
|
+
|
|
409
|
+
ti_kwargs: dict[str, Any] = {
|
|
410
|
+
"source": source,
|
|
411
|
+
"observable_key": observable_key,
|
|
412
|
+
"comment": comment,
|
|
413
|
+
"extra": extra or {},
|
|
414
|
+
"score": Decimal(str(score)),
|
|
415
|
+
"taxonomies": taxonomies or [],
|
|
416
|
+
}
|
|
417
|
+
if level is not None:
|
|
418
|
+
ti_kwargs["level"] = level
|
|
419
|
+
ti = ThreatIntel(**ti_kwargs)
|
|
420
|
+
result = self._investigation.add_threat_intel(ti, observable)
|
|
421
|
+
return self._threat_intel_proxy(result)
|
|
422
|
+
|
|
423
|
+
def observable_with_ti_draft(
|
|
424
|
+
self,
|
|
425
|
+
observable: Observable | ObservableProxy | str,
|
|
426
|
+
threat_intel: ThreatIntel,
|
|
427
|
+
) -> ThreatIntelProxy:
|
|
428
|
+
"""
|
|
429
|
+
Attach a threat intel draft to an observable.
|
|
430
|
+
|
|
431
|
+
Args:
|
|
432
|
+
observable: Observable, ObservableProxy, or its key
|
|
433
|
+
threat_intel: Threat intel draft entry (unbound or matching observable)
|
|
434
|
+
|
|
435
|
+
Returns:
|
|
436
|
+
The created/merged threat intel
|
|
437
|
+
|
|
438
|
+
Raises:
|
|
439
|
+
KeyError: If the observable does not exist
|
|
440
|
+
"""
|
|
441
|
+
if not isinstance(threat_intel, ThreatIntel):
|
|
442
|
+
raise TypeError("Threat intel draft must be a ThreatIntel instance.")
|
|
443
|
+
|
|
444
|
+
observable_key = self._resolve_key(observable)
|
|
445
|
+
model_observable = self._require_observable(observable_key)
|
|
446
|
+
|
|
447
|
+
if threat_intel.observable_key and threat_intel.observable_key != observable_key:
|
|
448
|
+
raise ValueError("Threat intel is already bound to a different observable.")
|
|
449
|
+
|
|
450
|
+
threat_intel.observable_key = observable_key
|
|
451
|
+
expected_key = keys.generate_threat_intel_key(threat_intel.source, observable_key)
|
|
452
|
+
if not threat_intel.key or threat_intel.key != expected_key:
|
|
453
|
+
threat_intel.key = expected_key
|
|
454
|
+
|
|
455
|
+
result = self._investigation.add_threat_intel(threat_intel, model_observable)
|
|
456
|
+
return self._threat_intel_proxy(result)
|
|
457
|
+
|
|
458
|
+
def observable_set_level(
|
|
459
|
+
self,
|
|
460
|
+
observable: Observable | ObservableProxy | str,
|
|
461
|
+
level: Level,
|
|
462
|
+
reason: str | None = None,
|
|
463
|
+
) -> ObservableProxy:
|
|
464
|
+
"""
|
|
465
|
+
Explicitly set an observable's level via the service layer.
|
|
466
|
+
|
|
467
|
+
Args:
|
|
468
|
+
observable: Observable, ObservableProxy, or its key
|
|
469
|
+
level: Level to apply
|
|
470
|
+
|
|
471
|
+
Returns:
|
|
472
|
+
Updated observable proxy
|
|
473
|
+
|
|
474
|
+
Raises:
|
|
475
|
+
KeyError: If the observable does not exist
|
|
476
|
+
"""
|
|
477
|
+
observable_key = self._resolve_key(observable)
|
|
478
|
+
model_observable = self._require_observable(observable_key)
|
|
479
|
+
self._investigation.apply_level_change(
|
|
480
|
+
model_observable,
|
|
481
|
+
level,
|
|
482
|
+
reason=reason or "Manual level update",
|
|
483
|
+
)
|
|
484
|
+
return self._observable_proxy(model_observable)
|
|
485
|
+
|
|
486
|
+
# Threat intel methods
|
|
487
|
+
|
|
488
|
+
def threat_intel_draft_create(
|
|
489
|
+
self,
|
|
490
|
+
source: str,
|
|
491
|
+
score: Decimal | float,
|
|
492
|
+
comment: str = "",
|
|
493
|
+
extra: dict[str, Any] | None = None,
|
|
494
|
+
level: Level | None = None,
|
|
495
|
+
taxonomies: list[Taxonomy | dict[str, Any]] | None = None,
|
|
496
|
+
) -> ThreatIntel:
|
|
497
|
+
"""
|
|
498
|
+
Create an unbound threat intel draft entry.
|
|
499
|
+
|
|
500
|
+
Args:
|
|
501
|
+
source: Threat intel source name
|
|
502
|
+
score: Score from threat intel
|
|
503
|
+
comment: Optional comment
|
|
504
|
+
extra: Optional extra data
|
|
505
|
+
level: Optional explicit level
|
|
506
|
+
taxonomies: Optional taxonomies
|
|
507
|
+
|
|
508
|
+
Returns:
|
|
509
|
+
Unbound ThreatIntel instance
|
|
510
|
+
"""
|
|
511
|
+
ti_kwargs: dict[str, Any] = {
|
|
512
|
+
"source": source,
|
|
513
|
+
"observable_key": "",
|
|
514
|
+
"comment": comment,
|
|
515
|
+
"extra": extra or {},
|
|
516
|
+
"score": Decimal(str(score)),
|
|
517
|
+
"taxonomies": taxonomies or [],
|
|
518
|
+
}
|
|
519
|
+
if level is not None:
|
|
520
|
+
ti_kwargs["level"] = level
|
|
521
|
+
return ThreatIntel(**ti_kwargs)
|
|
522
|
+
|
|
523
|
+
def threat_intel_add_taxonomy(
|
|
524
|
+
self,
|
|
525
|
+
threat_intel: ThreatIntel | ThreatIntelProxy | str,
|
|
526
|
+
*,
|
|
527
|
+
level: Level,
|
|
528
|
+
name: str,
|
|
529
|
+
value: str,
|
|
530
|
+
) -> ThreatIntelProxy:
|
|
531
|
+
"""
|
|
532
|
+
Add or replace a taxonomy entry by name on a threat intel.
|
|
533
|
+
|
|
534
|
+
Args:
|
|
535
|
+
threat_intel: ThreatIntel, ThreatIntelProxy, or its key
|
|
536
|
+
level: Taxonomy level
|
|
537
|
+
name: Taxonomy name (unique per threat intel)
|
|
538
|
+
value: Taxonomy value
|
|
539
|
+
|
|
540
|
+
Returns:
|
|
541
|
+
Updated threat intel proxy
|
|
542
|
+
|
|
543
|
+
Raises:
|
|
544
|
+
KeyError: If the threat intel does not exist
|
|
545
|
+
"""
|
|
546
|
+
ti_key = self._resolve_threat_intel_key(threat_intel)
|
|
547
|
+
taxonomy = Taxonomy(level=level, name=name, value=value)
|
|
548
|
+
updated = self._investigation.add_threat_intel_taxonomy(ti_key, taxonomy)
|
|
549
|
+
return self._threat_intel_proxy(updated)
|
|
550
|
+
|
|
551
|
+
def threat_intel_remove_taxonomy(
|
|
552
|
+
self,
|
|
553
|
+
threat_intel: ThreatIntel | ThreatIntelProxy | str,
|
|
554
|
+
name: str,
|
|
555
|
+
) -> ThreatIntelProxy:
|
|
556
|
+
"""
|
|
557
|
+
Remove a taxonomy entry by name from a threat intel.
|
|
558
|
+
|
|
559
|
+
Args:
|
|
560
|
+
threat_intel: ThreatIntel, ThreatIntelProxy, or its key
|
|
561
|
+
name: Taxonomy name to remove
|
|
562
|
+
|
|
563
|
+
Returns:
|
|
564
|
+
Updated threat intel proxy
|
|
565
|
+
|
|
566
|
+
Raises:
|
|
567
|
+
KeyError: If the threat intel does not exist
|
|
568
|
+
"""
|
|
569
|
+
ti_key = self._resolve_threat_intel_key(threat_intel)
|
|
570
|
+
updated = self._investigation.remove_threat_intel_taxonomy(ti_key, name)
|
|
571
|
+
return self._threat_intel_proxy(updated)
|
|
572
|
+
|
|
573
|
+
def threat_intel_get_all(self) -> dict[str, ThreatIntelProxy]:
|
|
574
|
+
"""Get read-only proxies for all threat intel entries."""
|
|
575
|
+
return {
|
|
576
|
+
key: ThreatIntelProxy(self._investigation, key)
|
|
577
|
+
for key in self._investigation.get_all_threat_intels().keys()
|
|
578
|
+
}
|
|
579
|
+
|
|
580
|
+
# Check methods
|
|
581
|
+
|
|
582
|
+
def check_create(
|
|
583
|
+
self,
|
|
584
|
+
check_id: str,
|
|
585
|
+
scope: str,
|
|
586
|
+
description: str,
|
|
587
|
+
comment: str = "",
|
|
588
|
+
extra: dict[str, Any] | None = None,
|
|
589
|
+
score: Decimal | float | None = None,
|
|
590
|
+
level: Level | None = None,
|
|
591
|
+
) -> CheckProxy:
|
|
592
|
+
"""
|
|
593
|
+
Create a new check.
|
|
594
|
+
|
|
595
|
+
Args:
|
|
596
|
+
check_id: Check identifier
|
|
597
|
+
scope: Check scope
|
|
598
|
+
description: Check description
|
|
599
|
+
comment: Optional comment
|
|
600
|
+
extra: Optional extra data
|
|
601
|
+
score: Optional explicit score
|
|
602
|
+
level: Optional explicit level
|
|
603
|
+
|
|
604
|
+
Returns:
|
|
605
|
+
The created check
|
|
606
|
+
"""
|
|
607
|
+
check_kwargs: dict[str, Any] = {
|
|
608
|
+
"check_id": check_id,
|
|
609
|
+
"scope": scope,
|
|
610
|
+
"description": description,
|
|
611
|
+
"comment": comment,
|
|
612
|
+
"extra": extra or {},
|
|
613
|
+
"origin_investigation_id": self._investigation.investigation_id,
|
|
614
|
+
}
|
|
615
|
+
if score is not None:
|
|
616
|
+
check_kwargs["score"] = Decimal(str(score))
|
|
617
|
+
if level is not None:
|
|
618
|
+
check_kwargs["level"] = level
|
|
619
|
+
check = Check(**check_kwargs)
|
|
620
|
+
return self._check_proxy(self._investigation.add_check(check))
|
|
621
|
+
|
|
622
|
+
@overload
|
|
623
|
+
def check_get(self, key: str) -> CheckProxy | None:
|
|
624
|
+
"""Get a check by full key string."""
|
|
625
|
+
...
|
|
626
|
+
|
|
627
|
+
@overload
|
|
628
|
+
def check_get(self, check_id: str, scope: str) -> CheckProxy | None:
|
|
629
|
+
"""Get a check by ID and scope."""
|
|
630
|
+
...
|
|
631
|
+
|
|
632
|
+
def check_get(self, *args, **kwargs) -> CheckProxy | None:
|
|
633
|
+
"""
|
|
634
|
+
Get a check by key or by check ID and scope.
|
|
635
|
+
|
|
636
|
+
Args:
|
|
637
|
+
key: Check key (single argument)
|
|
638
|
+
check_id: Check identifier (when using two arguments)
|
|
639
|
+
scope: Check scope (when using two arguments)
|
|
640
|
+
|
|
641
|
+
Returns:
|
|
642
|
+
Check if found, None otherwise
|
|
643
|
+
|
|
644
|
+
Raises:
|
|
645
|
+
ValueError: If arguments are invalid or key generation fails
|
|
646
|
+
"""
|
|
647
|
+
if kwargs:
|
|
648
|
+
if not args and set(kwargs) == {"key"}:
|
|
649
|
+
key = kwargs["key"]
|
|
650
|
+
elif not args and set(kwargs) == {"check_id", "scope"}:
|
|
651
|
+
check_id = kwargs["check_id"]
|
|
652
|
+
scope = kwargs["scope"]
|
|
653
|
+
try:
|
|
654
|
+
key = keys.generate_check_key(check_id, scope)
|
|
655
|
+
except Exception as e:
|
|
656
|
+
raise ValueError(
|
|
657
|
+
f"Failed to generate check key for check_id='{check_id}', scope='{scope}': {e}"
|
|
658
|
+
) from e
|
|
659
|
+
else:
|
|
660
|
+
raise ValueError("check_get() accepts either (key: str) or (check_id: str, scope: str)")
|
|
661
|
+
elif len(args) == 1:
|
|
662
|
+
key = args[0]
|
|
663
|
+
elif len(args) == 2:
|
|
664
|
+
check_id, scope = args
|
|
665
|
+
try:
|
|
666
|
+
key = keys.generate_check_key(check_id, scope)
|
|
667
|
+
except Exception as e:
|
|
668
|
+
raise ValueError(f"Failed to generate check key for check_id='{check_id}', scope='{scope}': {e}") from e
|
|
669
|
+
else:
|
|
670
|
+
raise ValueError("check_get() accepts either (key: str) or (check_id: str, scope: str)")
|
|
671
|
+
return self._check_proxy(self._investigation.get_check(key))
|
|
672
|
+
|
|
673
|
+
def check_get_all(self) -> dict[str, CheckProxy]:
|
|
674
|
+
"""Get read-only proxies for all checks."""
|
|
675
|
+
return {key: CheckProxy(self._investigation, key) for key in self._investigation.get_all_checks().keys()}
|
|
676
|
+
|
|
677
|
+
def check_link_observable(
|
|
678
|
+
self,
|
|
679
|
+
check_key: str,
|
|
680
|
+
observable: Observable | ObservableProxy | str,
|
|
681
|
+
propagation_mode: PropagationMode = PropagationMode.LOCAL_ONLY,
|
|
682
|
+
) -> CheckProxy:
|
|
683
|
+
"""
|
|
684
|
+
Link an observable to a check.
|
|
685
|
+
|
|
686
|
+
Args:
|
|
687
|
+
check_key: Key of the check
|
|
688
|
+
observable: Observable, ObservableProxy, or its key
|
|
689
|
+
propagation_mode: Propagation behavior for this link
|
|
690
|
+
|
|
691
|
+
Returns:
|
|
692
|
+
The check
|
|
693
|
+
|
|
694
|
+
Raises:
|
|
695
|
+
KeyError: If the check or observable does not exist
|
|
696
|
+
"""
|
|
697
|
+
observable_key = self._resolve_key(observable)
|
|
698
|
+
result = self._investigation.link_check_observable(check_key, observable_key, propagation_mode=propagation_mode)
|
|
699
|
+
return self._check_proxy(result)
|
|
700
|
+
|
|
701
|
+
def check_update_score(self, check_key: str, score: Decimal | float, reason: str = "") -> CheckProxy:
|
|
702
|
+
"""
|
|
703
|
+
Update a check's score.
|
|
704
|
+
|
|
705
|
+
Args:
|
|
706
|
+
check_key: Key of the check
|
|
707
|
+
score: New score
|
|
708
|
+
reason: Reason for update
|
|
709
|
+
|
|
710
|
+
Returns:
|
|
711
|
+
The check
|
|
712
|
+
|
|
713
|
+
Raises:
|
|
714
|
+
KeyError: If the check does not exist
|
|
715
|
+
"""
|
|
716
|
+
check = self._require_check(check_key)
|
|
717
|
+
self._investigation.apply_score_change(check, Decimal(str(score)), reason=reason)
|
|
718
|
+
return self._check_proxy(check)
|
|
719
|
+
|
|
720
|
+
# Container methods
|
|
721
|
+
|
|
722
|
+
def container_create(self, path: str, description: str = "") -> ContainerProxy:
|
|
723
|
+
"""
|
|
724
|
+
Create a new container.
|
|
725
|
+
|
|
726
|
+
Args:
|
|
727
|
+
path: Container path
|
|
728
|
+
description: Container description
|
|
729
|
+
|
|
730
|
+
Returns:
|
|
731
|
+
The created container
|
|
732
|
+
"""
|
|
733
|
+
container = Container(path=path, description=description)
|
|
734
|
+
return self._container_proxy(self._investigation.add_container(container))
|
|
735
|
+
|
|
736
|
+
def container_get(self, *args, **kwargs) -> ContainerProxy | None:
|
|
737
|
+
"""
|
|
738
|
+
Get a container by key or by path.
|
|
739
|
+
|
|
740
|
+
Args:
|
|
741
|
+
key: Container key (single argument, prefixed with ctr:)
|
|
742
|
+
path: Container path (single argument without prefix)
|
|
743
|
+
|
|
744
|
+
Returns:
|
|
745
|
+
Container if found, None otherwise
|
|
746
|
+
|
|
747
|
+
Raises:
|
|
748
|
+
ValueError: If arguments are invalid or key generation fails
|
|
749
|
+
"""
|
|
750
|
+
if kwargs:
|
|
751
|
+
if not args and set(kwargs) == {"key"}:
|
|
752
|
+
key = kwargs["key"]
|
|
753
|
+
elif not args and set(kwargs) == {"path"}:
|
|
754
|
+
path = kwargs["path"]
|
|
755
|
+
try:
|
|
756
|
+
key = keys.generate_container_key(path)
|
|
757
|
+
except Exception as e:
|
|
758
|
+
raise ValueError(f"Failed to generate container key for path='{path}': {e}") from e
|
|
759
|
+
else:
|
|
760
|
+
raise ValueError("container_get() accepts either (key: str) or (path: str)")
|
|
761
|
+
elif len(args) == 1:
|
|
762
|
+
key_or_path = args[0]
|
|
763
|
+
if isinstance(key_or_path, str) and key_or_path.startswith("ctr:"):
|
|
764
|
+
key = key_or_path
|
|
765
|
+
else:
|
|
766
|
+
try:
|
|
767
|
+
key = keys.generate_container_key(key_or_path)
|
|
768
|
+
except Exception as e:
|
|
769
|
+
raise ValueError(f"Failed to generate container key for path='{key_or_path}': {e}") from e
|
|
770
|
+
else:
|
|
771
|
+
raise ValueError("container_get() accepts either (key: str) or (path: str)")
|
|
772
|
+
return self._container_proxy(self._investigation.get_container(key))
|
|
773
|
+
|
|
774
|
+
def container_get_all(self) -> dict[str, ContainerProxy]:
|
|
775
|
+
"""Get read-only proxies for all containers."""
|
|
776
|
+
return {
|
|
777
|
+
key: ContainerProxy(self._investigation, key) for key in self._investigation.get_all_containers().keys()
|
|
778
|
+
}
|
|
779
|
+
|
|
780
|
+
def container_add_check(self, container_key: str, check_key: str) -> ContainerProxy:
|
|
781
|
+
"""
|
|
782
|
+
Add a check to a container.
|
|
783
|
+
|
|
784
|
+
Args:
|
|
785
|
+
container_key: Key of the container
|
|
786
|
+
check_key: Key of the check
|
|
787
|
+
|
|
788
|
+
Returns:
|
|
789
|
+
The container
|
|
790
|
+
|
|
791
|
+
Raises:
|
|
792
|
+
KeyError: If the container or check does not exist
|
|
793
|
+
"""
|
|
794
|
+
container = self._investigation.add_check_to_container(container_key, check_key)
|
|
795
|
+
return self._container_proxy(container)
|
|
796
|
+
|
|
797
|
+
def container_add_sub_container(self, parent_key: str, child_key: str) -> ContainerProxy:
|
|
798
|
+
"""
|
|
799
|
+
Add a sub-container to a container.
|
|
800
|
+
|
|
801
|
+
Args:
|
|
802
|
+
parent_key: Key of the parent container
|
|
803
|
+
child_key: Key of the child container
|
|
804
|
+
|
|
805
|
+
Returns:
|
|
806
|
+
The parent container
|
|
807
|
+
|
|
808
|
+
Raises:
|
|
809
|
+
KeyError: If the parent or child container does not exist
|
|
810
|
+
"""
|
|
811
|
+
parent = self._investigation.add_sub_container(parent_key, child_key)
|
|
812
|
+
return self._container_proxy(parent)
|
|
813
|
+
|
|
814
|
+
# Enrichment methods
|
|
815
|
+
|
|
816
|
+
def enrichment_create(self, name: str, data: dict[str, Any], context: str = "") -> EnrichmentProxy:
|
|
817
|
+
"""
|
|
818
|
+
Create a new enrichment.
|
|
819
|
+
|
|
820
|
+
Args:
|
|
821
|
+
name: Enrichment name
|
|
822
|
+
data: Enrichment data
|
|
823
|
+
context: Optional context
|
|
824
|
+
|
|
825
|
+
Returns:
|
|
826
|
+
The created enrichment
|
|
827
|
+
"""
|
|
828
|
+
enrichment = Enrichment(name=name, data=data, context=context)
|
|
829
|
+
return self._enrichment_proxy(self._investigation.add_enrichment(enrichment))
|
|
830
|
+
|
|
831
|
+
@overload
|
|
832
|
+
def enrichment_get(self, key: str) -> EnrichmentProxy | None:
|
|
833
|
+
"""Get an enrichment by full key string."""
|
|
834
|
+
...
|
|
835
|
+
|
|
836
|
+
@overload
|
|
837
|
+
def enrichment_get(self, name: str, context: str = "") -> EnrichmentProxy | None:
|
|
838
|
+
"""Get an enrichment by name and optional context."""
|
|
839
|
+
...
|
|
840
|
+
|
|
841
|
+
def enrichment_get(self, *args, **kwargs) -> EnrichmentProxy | None:
|
|
842
|
+
"""
|
|
843
|
+
Get an enrichment by key or by name and context.
|
|
844
|
+
|
|
845
|
+
Args:
|
|
846
|
+
key: Enrichment key (single argument, prefixed with enr:)
|
|
847
|
+
name: Enrichment name (when using one or two arguments)
|
|
848
|
+
context: Optional context (second argument or context= kw)
|
|
849
|
+
|
|
850
|
+
Returns:
|
|
851
|
+
Enrichment if found, None otherwise
|
|
852
|
+
|
|
853
|
+
Raises:
|
|
854
|
+
ValueError: If arguments are invalid or key generation fails
|
|
855
|
+
"""
|
|
856
|
+
if kwargs:
|
|
857
|
+
if not args and set(kwargs) == {"key"}:
|
|
858
|
+
key = kwargs["key"]
|
|
859
|
+
elif not args and set(kwargs) == {"name"}:
|
|
860
|
+
name = kwargs["name"]
|
|
861
|
+
try:
|
|
862
|
+
key = keys.generate_enrichment_key(name)
|
|
863
|
+
except Exception as e:
|
|
864
|
+
raise ValueError(f"Failed to generate enrichment key for name='{name}': {e}") from e
|
|
865
|
+
elif not args and set(kwargs) == {"name", "context"}:
|
|
866
|
+
name = kwargs["name"]
|
|
867
|
+
context = kwargs["context"]
|
|
868
|
+
try:
|
|
869
|
+
key = keys.generate_enrichment_key(name, context)
|
|
870
|
+
except Exception as e:
|
|
871
|
+
raise ValueError(
|
|
872
|
+
f"Failed to generate enrichment key for name='{name}', context='{context}': {e}"
|
|
873
|
+
) from e
|
|
874
|
+
elif len(args) == 1 and set(kwargs) == {"context"}:
|
|
875
|
+
name = args[0]
|
|
876
|
+
context = kwargs["context"]
|
|
877
|
+
try:
|
|
878
|
+
key = keys.generate_enrichment_key(name, context)
|
|
879
|
+
except Exception as e:
|
|
880
|
+
raise ValueError(
|
|
881
|
+
f"Failed to generate enrichment key for name='{name}', context='{context}': {e}"
|
|
882
|
+
) from e
|
|
883
|
+
else:
|
|
884
|
+
raise ValueError('enrichment_get() accepts either (key: str) or (name: str, context: str = "")')
|
|
885
|
+
elif len(args) == 1:
|
|
886
|
+
key_or_name = args[0]
|
|
887
|
+
if isinstance(key_or_name, str) and key_or_name.startswith("enr:"):
|
|
888
|
+
key = key_or_name
|
|
889
|
+
else:
|
|
890
|
+
try:
|
|
891
|
+
key = keys.generate_enrichment_key(key_or_name)
|
|
892
|
+
except Exception as e:
|
|
893
|
+
raise ValueError(f"Failed to generate enrichment key for name='{key_or_name}': {e}") from e
|
|
894
|
+
elif len(args) == 2:
|
|
895
|
+
name, context = args
|
|
896
|
+
try:
|
|
897
|
+
key = keys.generate_enrichment_key(name, context)
|
|
898
|
+
except Exception as e:
|
|
899
|
+
raise ValueError(
|
|
900
|
+
f"Failed to generate enrichment key for name='{name}', context='{context}': {e}"
|
|
901
|
+
) from e
|
|
902
|
+
else:
|
|
903
|
+
raise ValueError('enrichment_get() accepts either (key: str) or (name: str, context: str = "")')
|
|
904
|
+
return self._enrichment_proxy(self._investigation.get_enrichment(key))
|
|
905
|
+
|
|
906
|
+
def enrichment_get_all(self) -> dict[str, EnrichmentProxy]:
|
|
907
|
+
"""Get read-only proxies for all enrichments."""
|
|
908
|
+
return {
|
|
909
|
+
key: EnrichmentProxy(self._investigation, key) for key in self._investigation.get_all_enrichments().keys()
|
|
910
|
+
}
|
|
911
|
+
|
|
912
|
+
# Score and statistics methods
|
|
913
|
+
|
|
914
|
+
def get_global_score(self) -> Decimal:
|
|
915
|
+
"""
|
|
916
|
+
Get the global investigation score.
|
|
917
|
+
|
|
918
|
+
Returns:
|
|
919
|
+
Global score
|
|
920
|
+
"""
|
|
921
|
+
return self._investigation.get_global_score()
|
|
922
|
+
|
|
923
|
+
def get_global_level(self) -> Level:
|
|
924
|
+
"""
|
|
925
|
+
Get the global investigation level.
|
|
926
|
+
|
|
927
|
+
Returns:
|
|
928
|
+
Global level
|
|
929
|
+
"""
|
|
930
|
+
return self._investigation.get_global_level()
|
|
931
|
+
|
|
932
|
+
def get_statistics(self) -> StatisticsSchema:
|
|
933
|
+
"""
|
|
934
|
+
Get comprehensive investigation statistics.
|
|
935
|
+
|
|
936
|
+
Returns:
|
|
937
|
+
Statistics schema with typed fields
|
|
938
|
+
"""
|
|
939
|
+
return self._investigation.get_statistics()
|
|
940
|
+
|
|
941
|
+
# Serialization and I/O methods
|
|
942
|
+
|
|
943
|
+
def io_save_json(self, filepath: str | Path, *, include_audit_log: bool = True) -> str:
|
|
944
|
+
"""
|
|
945
|
+
Save the investigation to a JSON file.
|
|
946
|
+
|
|
947
|
+
Relative paths are converted to absolute paths before saving.
|
|
948
|
+
|
|
949
|
+
Args:
|
|
950
|
+
filepath: Path to save the JSON file (relative or absolute)
|
|
951
|
+
include_audit_log: Include audit log in output (default: True).
|
|
952
|
+
When False, audit_log is set to null for compact, deterministic output.
|
|
953
|
+
|
|
954
|
+
Returns:
|
|
955
|
+
Absolute path to the saved file as a string
|
|
956
|
+
|
|
957
|
+
Raises:
|
|
958
|
+
PermissionError: If the file cannot be written
|
|
959
|
+
OSError: If there are file system issues
|
|
960
|
+
|
|
961
|
+
Examples:
|
|
962
|
+
>>> cv = Cyvest()
|
|
963
|
+
>>> path = cv.io_save_json("investigation.json")
|
|
964
|
+
>>> print(path) # /absolute/path/to/investigation.json
|
|
965
|
+
>>> # For compact, deterministic output:
|
|
966
|
+
>>> path = cv.io_save_json("output.json", include_audit_log=False)
|
|
967
|
+
"""
|
|
968
|
+
save_investigation_json(self._investigation, filepath, include_audit_log=include_audit_log)
|
|
969
|
+
return str(Path(filepath).resolve())
|
|
970
|
+
|
|
971
|
+
def io_save_markdown(
|
|
972
|
+
self,
|
|
973
|
+
filepath: str | Path,
|
|
974
|
+
include_containers: bool = False,
|
|
975
|
+
include_enrichments: bool = False,
|
|
976
|
+
include_observables: bool = True,
|
|
977
|
+
) -> str:
|
|
978
|
+
"""
|
|
979
|
+
Save the investigation as a Markdown report.
|
|
980
|
+
|
|
981
|
+
Relative paths are converted to absolute paths before saving.
|
|
982
|
+
|
|
983
|
+
Args:
|
|
984
|
+
filepath: Path to save the Markdown file (relative or absolute)
|
|
985
|
+
include_containers: Include containers section in the report (default: False)
|
|
986
|
+
include_enrichments: Include enrichments section in the report (default: False)
|
|
987
|
+
include_observables: Include observables section in the report (default: True)
|
|
988
|
+
|
|
989
|
+
Returns:
|
|
990
|
+
Absolute path to the saved file as a string
|
|
991
|
+
|
|
992
|
+
Raises:
|
|
993
|
+
PermissionError: If the file cannot be written
|
|
994
|
+
OSError: If there are file system issues
|
|
995
|
+
|
|
996
|
+
Examples:
|
|
997
|
+
>>> cv = Cyvest()
|
|
998
|
+
>>> path = cv.io_save_markdown("report.md")
|
|
999
|
+
>>> print(path) # /absolute/path/to/report.md
|
|
1000
|
+
"""
|
|
1001
|
+
save_investigation_markdown(
|
|
1002
|
+
self._investigation, filepath, include_containers, include_enrichments, include_observables
|
|
1003
|
+
)
|
|
1004
|
+
return str(Path(filepath).resolve())
|
|
1005
|
+
|
|
1006
|
+
def io_to_markdown(
|
|
1007
|
+
self,
|
|
1008
|
+
include_containers: bool = False,
|
|
1009
|
+
include_enrichments: bool = False,
|
|
1010
|
+
include_observables: bool = True,
|
|
1011
|
+
) -> str:
|
|
1012
|
+
"""
|
|
1013
|
+
Generate a Markdown report of the investigation.
|
|
1014
|
+
|
|
1015
|
+
Args:
|
|
1016
|
+
include_containers: Include containers section in the report (default: False)
|
|
1017
|
+
include_enrichments: Include enrichments section in the report (default: False)
|
|
1018
|
+
include_observables: Include observables section in the report (default: True)
|
|
1019
|
+
|
|
1020
|
+
Returns:
|
|
1021
|
+
Markdown formatted report as a string
|
|
1022
|
+
|
|
1023
|
+
Examples:
|
|
1024
|
+
>>> cv = Cyvest()
|
|
1025
|
+
>>> markdown = cv.io_to_markdown()
|
|
1026
|
+
>>> print(markdown)
|
|
1027
|
+
# Cybersecurity Investigation Report
|
|
1028
|
+
...
|
|
1029
|
+
"""
|
|
1030
|
+
return generate_markdown_report(
|
|
1031
|
+
self._investigation, include_containers, include_enrichments, include_observables
|
|
1032
|
+
)
|
|
1033
|
+
|
|
1034
|
+
def io_to_invest(self, *, include_audit_log: bool = True) -> InvestigationSchema:
|
|
1035
|
+
"""
|
|
1036
|
+
Serialize the investigation to an InvestigationSchema.
|
|
1037
|
+
|
|
1038
|
+
Args:
|
|
1039
|
+
include_audit_log: Include audit log in serialization (default: True).
|
|
1040
|
+
When False, audit_log is set to None for compact, deterministic output.
|
|
1041
|
+
|
|
1042
|
+
Returns:
|
|
1043
|
+
InvestigationSchema instance (use .model_dump() for dict)
|
|
1044
|
+
|
|
1045
|
+
Examples:
|
|
1046
|
+
>>> cv = Cyvest()
|
|
1047
|
+
>>> schema = cv.io_to_invest()
|
|
1048
|
+
>>> print(schema.score, schema.level)
|
|
1049
|
+
>>> dict_data = schema.model_dump(by_alias=True)
|
|
1050
|
+
>>> # For compact, deterministic output:
|
|
1051
|
+
>>> schema = cv.io_to_invest(include_audit_log=False)
|
|
1052
|
+
>>> assert schema.audit_log is None
|
|
1053
|
+
"""
|
|
1054
|
+
return serialize_investigation(self._investigation, include_audit_log=include_audit_log)
|
|
1055
|
+
|
|
1056
|
+
# Merge methods
|
|
1057
|
+
|
|
1058
|
+
def merge_investigation(self, other: Cyvest) -> None:
|
|
1059
|
+
"""
|
|
1060
|
+
Merge another investigation into this one.
|
|
1061
|
+
|
|
1062
|
+
Args:
|
|
1063
|
+
other: The investigation to merge
|
|
1064
|
+
"""
|
|
1065
|
+
self._investigation.merge_investigation(other._investigation)
|
|
1066
|
+
|
|
1067
|
+
def finalize_relationships(self) -> None:
|
|
1068
|
+
"""
|
|
1069
|
+
Finalize observable relationships by linking orphan sub-graphs to root.
|
|
1070
|
+
|
|
1071
|
+
Any observable or sub-graph not connected to the root will be automatically
|
|
1072
|
+
linked by finding the best starting node of each disconnected component.
|
|
1073
|
+
"""
|
|
1074
|
+
self._investigation.finalize_relationships()
|
|
1075
|
+
|
|
1076
|
+
def display_summary(
|
|
1077
|
+
self,
|
|
1078
|
+
show_graph: bool = True,
|
|
1079
|
+
exclude_levels: Level | Iterable[Level] = Level.NONE,
|
|
1080
|
+
show_audit_log: bool = False,
|
|
1081
|
+
) -> None:
|
|
1082
|
+
display_summary(
|
|
1083
|
+
self,
|
|
1084
|
+
lambda renderables: logger.rich("INFO", renderables),
|
|
1085
|
+
show_graph=show_graph,
|
|
1086
|
+
exclude_levels=exclude_levels,
|
|
1087
|
+
show_audit_log=show_audit_log,
|
|
1088
|
+
)
|
|
1089
|
+
|
|
1090
|
+
def display_statistics(self) -> None:
|
|
1091
|
+
display_statistics(self, lambda renderables: logger.rich("INFO", renderables))
|
|
1092
|
+
|
|
1093
|
+
def display_network(
|
|
1094
|
+
self,
|
|
1095
|
+
output_dir: str | None = None,
|
|
1096
|
+
open_browser: bool = True,
|
|
1097
|
+
min_level: Level | None = None,
|
|
1098
|
+
observable_types: list[ObservableType] | None = None,
|
|
1099
|
+
physics: bool = True,
|
|
1100
|
+
group_by_type: bool = False,
|
|
1101
|
+
max_label_length: int = 60,
|
|
1102
|
+
title: str = "Cyvest Investigation Network",
|
|
1103
|
+
) -> str:
|
|
1104
|
+
"""
|
|
1105
|
+
Generate and display an interactive network graph visualization.
|
|
1106
|
+
|
|
1107
|
+
Creates an HTML file with a pyvis network graph showing observables as nodes
|
|
1108
|
+
(colored by level, sized by score, shaped by type) and relationships as edges
|
|
1109
|
+
(colored by direction, labeled by type).
|
|
1110
|
+
|
|
1111
|
+
Args:
|
|
1112
|
+
output_dir: Directory to save HTML file (defaults to temp directory)
|
|
1113
|
+
open_browser: Whether to automatically open the HTML file in a browser
|
|
1114
|
+
min_level: Minimum security level to include (filters out lower levels)
|
|
1115
|
+
observable_types: List of observable types to include (filters out others)
|
|
1116
|
+
physics: Enable physics simulation for organic layout (default: False for static layout)
|
|
1117
|
+
group_by_type: Group observables by type using hierarchical layout (default: False)
|
|
1118
|
+
max_label_length: Maximum length for node labels before truncation (default: 60)
|
|
1119
|
+
title: Title displayed in the generated HTML visualization
|
|
1120
|
+
|
|
1121
|
+
Returns:
|
|
1122
|
+
Path to the generated HTML file
|
|
1123
|
+
|
|
1124
|
+
Examples:
|
|
1125
|
+
>>> cv = Cyvest()
|
|
1126
|
+
>>> # Create investigation with observables
|
|
1127
|
+
>>> cv.display_network()
|
|
1128
|
+
'/tmp/cyvest_12345/cyvest_network.html'
|
|
1129
|
+
"""
|
|
1130
|
+
from cyvest.io_visualization import generate_network_graph
|
|
1131
|
+
|
|
1132
|
+
return generate_network_graph(
|
|
1133
|
+
self,
|
|
1134
|
+
output_dir=output_dir,
|
|
1135
|
+
open_browser=open_browser,
|
|
1136
|
+
min_level=min_level,
|
|
1137
|
+
observable_types=observable_types,
|
|
1138
|
+
physics=physics,
|
|
1139
|
+
group_by_type=group_by_type,
|
|
1140
|
+
max_label_length=max_label_length,
|
|
1141
|
+
title=title,
|
|
1142
|
+
)
|
|
1143
|
+
|
|
1144
|
+
# Fluent helper entrypoints
|
|
1145
|
+
|
|
1146
|
+
def taxonomy(self, *, level: Level, name: str, value: str) -> Taxonomy:
|
|
1147
|
+
"""
|
|
1148
|
+
Create a taxonomy object for threat intelligence entries.
|
|
1149
|
+
|
|
1150
|
+
Args:
|
|
1151
|
+
level: Taxonomy level (Level enum)
|
|
1152
|
+
name: Taxonomy name (unique per threat intel)
|
|
1153
|
+
value: Taxonomy value
|
|
1154
|
+
|
|
1155
|
+
Returns:
|
|
1156
|
+
Taxonomy instance
|
|
1157
|
+
"""
|
|
1158
|
+
return Taxonomy(level=level, name=name, value=value)
|
|
1159
|
+
|
|
1160
|
+
def threat_intel_draft(
|
|
1161
|
+
self,
|
|
1162
|
+
source: str,
|
|
1163
|
+
score: Decimal | float,
|
|
1164
|
+
comment: str = "",
|
|
1165
|
+
extra: dict[str, Any] | None = None,
|
|
1166
|
+
level: Level | None = None,
|
|
1167
|
+
taxonomies: list[Taxonomy | dict[str, Any]] | None = None,
|
|
1168
|
+
) -> ThreatIntel:
|
|
1169
|
+
"""
|
|
1170
|
+
Create an unbound threat intel draft entry with fluent helper methods.
|
|
1171
|
+
|
|
1172
|
+
Args:
|
|
1173
|
+
source: Threat intel source name
|
|
1174
|
+
score: Score from threat intel
|
|
1175
|
+
comment: Optional comment
|
|
1176
|
+
extra: Optional extra data
|
|
1177
|
+
level: Optional explicit level
|
|
1178
|
+
taxonomies: Optional taxonomies
|
|
1179
|
+
|
|
1180
|
+
Returns:
|
|
1181
|
+
Unbound ThreatIntel instance
|
|
1182
|
+
"""
|
|
1183
|
+
return self.threat_intel_draft_create(source, score, comment, extra, level, taxonomies)
|
|
1184
|
+
|
|
1185
|
+
def observable(
|
|
1186
|
+
self,
|
|
1187
|
+
obs_type: ObservableType,
|
|
1188
|
+
value: str,
|
|
1189
|
+
internal: bool = False,
|
|
1190
|
+
whitelisted: bool = False,
|
|
1191
|
+
comment: str = "",
|
|
1192
|
+
extra: dict[str, Any] | None = None,
|
|
1193
|
+
score: Decimal | float | None = None,
|
|
1194
|
+
level: Level | None = None,
|
|
1195
|
+
) -> ObservableProxy:
|
|
1196
|
+
"""
|
|
1197
|
+
Create (or fetch) an observable with fluent helper methods.
|
|
1198
|
+
|
|
1199
|
+
Args:
|
|
1200
|
+
obs_type: Type of observable
|
|
1201
|
+
value: Value of the observable
|
|
1202
|
+
internal: Whether this is an internal asset
|
|
1203
|
+
whitelisted: Whether this is whitelisted
|
|
1204
|
+
comment: Optional comment
|
|
1205
|
+
extra: Optional extra data
|
|
1206
|
+
score: Optional explicit score
|
|
1207
|
+
level: Optional explicit level
|
|
1208
|
+
|
|
1209
|
+
Returns:
|
|
1210
|
+
Observable proxy exposing mutation helpers for chaining
|
|
1211
|
+
"""
|
|
1212
|
+
return self.observable_create(obs_type, value, internal, whitelisted, comment, extra, score, level)
|
|
1213
|
+
|
|
1214
|
+
def check(
|
|
1215
|
+
self,
|
|
1216
|
+
check_id: str,
|
|
1217
|
+
scope: str,
|
|
1218
|
+
description: str,
|
|
1219
|
+
comment: str = "",
|
|
1220
|
+
extra: dict[str, Any] | None = None,
|
|
1221
|
+
score: Decimal | float | None = None,
|
|
1222
|
+
level: Level | None = None,
|
|
1223
|
+
) -> CheckProxy:
|
|
1224
|
+
"""
|
|
1225
|
+
Create a check with fluent helper methods.
|
|
1226
|
+
|
|
1227
|
+
Args:
|
|
1228
|
+
check_id: Check identifier
|
|
1229
|
+
scope: Check scope
|
|
1230
|
+
description: Check description
|
|
1231
|
+
comment: Optional comment
|
|
1232
|
+
extra: Optional extra data
|
|
1233
|
+
score: Optional explicit score
|
|
1234
|
+
level: Optional explicit level
|
|
1235
|
+
|
|
1236
|
+
Returns:
|
|
1237
|
+
Check proxy exposing mutation helpers for chaining
|
|
1238
|
+
"""
|
|
1239
|
+
return self.check_create(check_id, scope, description, comment, extra, score, level)
|
|
1240
|
+
|
|
1241
|
+
def container(self, path: str, description: str = "") -> ContainerProxy:
|
|
1242
|
+
"""
|
|
1243
|
+
Create a container with fluent helper methods.
|
|
1244
|
+
|
|
1245
|
+
Args:
|
|
1246
|
+
path: Container path
|
|
1247
|
+
description: Container description
|
|
1248
|
+
|
|
1249
|
+
Returns:
|
|
1250
|
+
Container proxy exposing mutation helpers for chaining
|
|
1251
|
+
"""
|
|
1252
|
+
return self.container_create(path, description)
|
|
1253
|
+
|
|
1254
|
+
def root(self) -> ObservableProxy:
|
|
1255
|
+
"""
|
|
1256
|
+
Get the root observable.
|
|
1257
|
+
|
|
1258
|
+
Returns:
|
|
1259
|
+
Root observable
|
|
1260
|
+
"""
|
|
1261
|
+
return self.observable_get_root()
|