cyvest 0.1.0__py3-none-any.whl → 5.1.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- cyvest/__init__.py +48 -38
- cyvest/cli.py +487 -0
- cyvest/compare.py +318 -0
- cyvest/cyvest.py +1431 -0
- cyvest/investigation.py +1682 -0
- cyvest/io_rich.py +1153 -0
- cyvest/io_schema.py +35 -0
- cyvest/io_serialization.py +465 -0
- cyvest/io_visualization.py +358 -0
- cyvest/keys.py +237 -0
- cyvest/level_score_rules.py +78 -0
- cyvest/levels.py +175 -0
- cyvest/model.py +595 -0
- cyvest/model_enums.py +69 -0
- cyvest/model_schema.py +164 -0
- cyvest/proxies.py +595 -0
- cyvest/score.py +473 -0
- cyvest/shared.py +508 -0
- cyvest/stats.py +291 -0
- cyvest/ulid.py +36 -0
- cyvest-5.1.3.dist-info/METADATA +632 -0
- cyvest-5.1.3.dist-info/RECORD +24 -0
- {cyvest-0.1.0.dist-info → cyvest-5.1.3.dist-info}/WHEEL +1 -2
- cyvest-5.1.3.dist-info/entry_points.txt +3 -0
- cyvest/builder.py +0 -182
- cyvest/check_tree.py +0 -117
- cyvest/models.py +0 -785
- cyvest/observable_registry.py +0 -69
- cyvest/report_render.py +0 -306
- cyvest/report_serialization.py +0 -237
- cyvest/visitors.py +0 -332
- cyvest-0.1.0.dist-info/METADATA +0 -110
- cyvest-0.1.0.dist-info/RECORD +0 -13
- cyvest-0.1.0.dist-info/licenses/LICENSE +0 -21
- cyvest-0.1.0.dist-info/top_level.txt +0 -1
cyvest/cyvest.py
ADDED
|
@@ -0,0 +1,1431 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Cyvest facade - high-level API for building cybersecurity investigations.
|
|
3
|
+
|
|
4
|
+
Provides a simplified interface for creating and managing investigation objects,
|
|
5
|
+
handling score propagation, and generating reports.
|
|
6
|
+
|
|
7
|
+
Includes JSON/Markdown export (io_save_json, io_save_markdown), import (io_load_json, io_load_dict),
|
|
8
|
+
and investigation export (io_to_invest, io_to_dict, io_to_markdown) methods.
|
|
9
|
+
"""
|
|
10
|
+
|
|
11
|
+
from __future__ import annotations
|
|
12
|
+
|
|
13
|
+
import threading
|
|
14
|
+
from collections.abc import Callable, Iterable
|
|
15
|
+
from decimal import Decimal
|
|
16
|
+
from pathlib import Path
|
|
17
|
+
from typing import TYPE_CHECKING, Any, Final, Literal, overload
|
|
18
|
+
|
|
19
|
+
from logurich import logger
|
|
20
|
+
|
|
21
|
+
from cyvest import keys
|
|
22
|
+
from cyvest.compare import compare_investigations
|
|
23
|
+
from cyvest.investigation import Investigation, InvestigationWhitelist
|
|
24
|
+
from cyvest.io_rich import (
|
|
25
|
+
display_check_query,
|
|
26
|
+
display_diff,
|
|
27
|
+
display_observable_query,
|
|
28
|
+
display_statistics,
|
|
29
|
+
display_summary,
|
|
30
|
+
display_threat_intel_query,
|
|
31
|
+
)
|
|
32
|
+
from cyvest.io_serialization import (
|
|
33
|
+
generate_markdown_report,
|
|
34
|
+
load_investigation_dict,
|
|
35
|
+
load_investigation_json,
|
|
36
|
+
save_investigation_json,
|
|
37
|
+
save_investigation_markdown,
|
|
38
|
+
serialize_investigation,
|
|
39
|
+
)
|
|
40
|
+
from cyvest.io_visualization import generate_network_graph
|
|
41
|
+
from cyvest.levels import Level
|
|
42
|
+
from cyvest.model import Check, Enrichment, Observable, Tag, Taxonomy, ThreatIntel
|
|
43
|
+
from cyvest.model_enums import ObservableType, PropagationMode, RelationshipDirection, RelationshipType
|
|
44
|
+
from cyvest.model_schema import InvestigationSchema, StatisticsSchema
|
|
45
|
+
from cyvest.proxies import CheckProxy, EnrichmentProxy, ObservableProxy, TagProxy, ThreatIntelProxy
|
|
46
|
+
from cyvest.score import ScoreMode
|
|
47
|
+
|
|
48
|
+
if TYPE_CHECKING:
|
|
49
|
+
from cyvest.shared import SharedInvestigationContext
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
class Cyvest:
|
|
53
|
+
"""
|
|
54
|
+
High-level facade for building and managing cybersecurity investigations.
|
|
55
|
+
|
|
56
|
+
Provides methods for creating observables, checks, threat intel, enrichments,
|
|
57
|
+
and tags, with automatic score propagation and statistics tracking.
|
|
58
|
+
"""
|
|
59
|
+
|
|
60
|
+
OBS: Final[type[ObservableType]] = ObservableType
|
|
61
|
+
REL: Final[type[RelationshipType]] = RelationshipType
|
|
62
|
+
DIR: Final[type[RelationshipDirection]] = RelationshipDirection
|
|
63
|
+
PROP: Final[type[PropagationMode]] = PropagationMode
|
|
64
|
+
LVL: Final[type[Level]] = Level
|
|
65
|
+
|
|
66
|
+
def __init__(
|
|
67
|
+
self,
|
|
68
|
+
root_data: Any = None,
|
|
69
|
+
root_type: ObservableType | Literal["file", "artifact"] = ObservableType.FILE,
|
|
70
|
+
score_mode_obs: ScoreMode = ScoreMode.MAX,
|
|
71
|
+
investigation_name: str | None = None,
|
|
72
|
+
investigation_id: str | None = None,
|
|
73
|
+
) -> None:
|
|
74
|
+
"""
|
|
75
|
+
Initialize a new investigation.
|
|
76
|
+
|
|
77
|
+
Args:
|
|
78
|
+
root_data: The data being investigated (optional)
|
|
79
|
+
root_type: Root observable type (ObservableType.FILE or ObservableType.ARTIFACT)
|
|
80
|
+
score_mode_obs: Observable score calculation mode (MAX or SUM)
|
|
81
|
+
investigation_name: Optional human-readable investigation name
|
|
82
|
+
investigation_id: Optional deterministic investigation ID (auto-generated ULID if not provided)
|
|
83
|
+
"""
|
|
84
|
+
self._investigation = Investigation(
|
|
85
|
+
root_data,
|
|
86
|
+
root_type=root_type,
|
|
87
|
+
score_mode_obs=score_mode_obs,
|
|
88
|
+
investigation_name=investigation_name,
|
|
89
|
+
investigation_id=investigation_id,
|
|
90
|
+
)
|
|
91
|
+
|
|
92
|
+
# Internal helpers
|
|
93
|
+
|
|
94
|
+
def _observable_proxy(self, observable: Observable | None) -> ObservableProxy | None:
|
|
95
|
+
if observable is None:
|
|
96
|
+
return None
|
|
97
|
+
return ObservableProxy(self._investigation, observable.key)
|
|
98
|
+
|
|
99
|
+
def _check_proxy(self, check: Check | None) -> CheckProxy | None:
|
|
100
|
+
if check is None:
|
|
101
|
+
return None
|
|
102
|
+
return CheckProxy(self._investigation, check.key)
|
|
103
|
+
|
|
104
|
+
def _tag_proxy(self, tag: Tag | None) -> TagProxy | None:
|
|
105
|
+
if tag is None:
|
|
106
|
+
return None
|
|
107
|
+
return TagProxy(self._investigation, tag.key)
|
|
108
|
+
|
|
109
|
+
def _threat_intel_proxy(self, ti: ThreatIntel | None) -> ThreatIntelProxy | None:
|
|
110
|
+
if ti is None:
|
|
111
|
+
return None
|
|
112
|
+
return ThreatIntelProxy(self._investigation, ti.key)
|
|
113
|
+
|
|
114
|
+
def _enrichment_proxy(self, enrichment: Enrichment | None) -> EnrichmentProxy | None:
|
|
115
|
+
if enrichment is None:
|
|
116
|
+
return None
|
|
117
|
+
return EnrichmentProxy(self._investigation, enrichment.key)
|
|
118
|
+
|
|
119
|
+
@staticmethod
|
|
120
|
+
def _resolve_observable_key(value: Observable | ObservableProxy | str) -> str:
|
|
121
|
+
if isinstance(value, str):
|
|
122
|
+
return value
|
|
123
|
+
if isinstance(value, (Observable, ObservableProxy)):
|
|
124
|
+
return value.key
|
|
125
|
+
raise TypeError("Expected an observable key, ObservableProxy, or Observable instance.")
|
|
126
|
+
|
|
127
|
+
@staticmethod
|
|
128
|
+
def _resolve_threat_intel_key(value: ThreatIntel | ThreatIntelProxy | str) -> str:
|
|
129
|
+
if isinstance(value, str):
|
|
130
|
+
return value
|
|
131
|
+
if isinstance(value, (ThreatIntel, ThreatIntelProxy)):
|
|
132
|
+
return value.key
|
|
133
|
+
raise TypeError("Expected a threat intel key, ThreatIntelProxy, or ThreatIntel instance.")
|
|
134
|
+
|
|
135
|
+
def _require_observable(self, key: str) -> Observable:
|
|
136
|
+
observable = self._investigation.get_observable(key)
|
|
137
|
+
if observable is None:
|
|
138
|
+
raise KeyError(f"observable '{key}' not found in investigation.")
|
|
139
|
+
return observable
|
|
140
|
+
|
|
141
|
+
def _require_check(self, key: str) -> Check:
|
|
142
|
+
check = self._investigation.get_check(key)
|
|
143
|
+
if check is None:
|
|
144
|
+
raise KeyError(f"check '{key}' not found in investigation.")
|
|
145
|
+
return check
|
|
146
|
+
|
|
147
|
+
# Investigation-level helpers
|
|
148
|
+
|
|
149
|
+
def investigation_is_whitelisted(self) -> bool:
|
|
150
|
+
"""
|
|
151
|
+
Return whether the investigation is whitelisted/marked safe.
|
|
152
|
+
|
|
153
|
+
Examples:
|
|
154
|
+
>>> cv = Cyvest()
|
|
155
|
+
>>> cv.investigation_add_whitelist("id-1", "False positive", "Sandboxed sample")
|
|
156
|
+
>>> cv.investigation_is_whitelisted()
|
|
157
|
+
True
|
|
158
|
+
"""
|
|
159
|
+
return self._investigation.is_whitelisted()
|
|
160
|
+
|
|
161
|
+
def investigation_get_name(self) -> str | None:
|
|
162
|
+
"""Return the human-readable investigation name (if set)."""
|
|
163
|
+
return self._investigation.investigation_name
|
|
164
|
+
|
|
165
|
+
def investigation_set_name(self, name: str | None, reason: str | None = None) -> None:
|
|
166
|
+
"""Set or clear the human-readable investigation name."""
|
|
167
|
+
self._investigation.set_investigation_name(name, reason=reason)
|
|
168
|
+
|
|
169
|
+
def investigation_get_audit_log(self) -> tuple:
|
|
170
|
+
"""Return the investigation-level audit log."""
|
|
171
|
+
return tuple(self._investigation.get_audit_log())
|
|
172
|
+
|
|
173
|
+
def investigation_add_whitelist(
|
|
174
|
+
self, identifier: str, name: str, justification: str | None = None
|
|
175
|
+
) -> InvestigationWhitelist:
|
|
176
|
+
"""
|
|
177
|
+
Add or update a whitelist entry for the investigation.
|
|
178
|
+
|
|
179
|
+
Args:
|
|
180
|
+
identifier: Unique identifier for the whitelist entry.
|
|
181
|
+
name: Human-readable name.
|
|
182
|
+
justification: Optional markdown justification.
|
|
183
|
+
"""
|
|
184
|
+
return self._investigation.add_whitelist(identifier, name, justification)
|
|
185
|
+
|
|
186
|
+
def investigation_remove_whitelist(self, identifier: str) -> bool:
|
|
187
|
+
"""
|
|
188
|
+
Remove a whitelist entry by identifier.
|
|
189
|
+
|
|
190
|
+
Returns:
|
|
191
|
+
True if removed, False if the identifier was not present.
|
|
192
|
+
"""
|
|
193
|
+
return self._investigation.remove_whitelist(identifier)
|
|
194
|
+
|
|
195
|
+
def investigation_clear_whitelists(self) -> None:
|
|
196
|
+
"""Remove all whitelist entries."""
|
|
197
|
+
self._investigation.clear_whitelists()
|
|
198
|
+
|
|
199
|
+
def investigation_get_whitelists(self) -> tuple[InvestigationWhitelist, ...]:
|
|
200
|
+
"""
|
|
201
|
+
Get all whitelist entries.
|
|
202
|
+
|
|
203
|
+
Returns:
|
|
204
|
+
Tuple of whitelist entries.
|
|
205
|
+
"""
|
|
206
|
+
return tuple(self._investigation.get_whitelists())
|
|
207
|
+
|
|
208
|
+
# Observable methods
|
|
209
|
+
|
|
210
|
+
def observable_create(
|
|
211
|
+
self,
|
|
212
|
+
obs_type: ObservableType,
|
|
213
|
+
value: str,
|
|
214
|
+
internal: bool = False,
|
|
215
|
+
whitelisted: bool = False,
|
|
216
|
+
comment: str = "",
|
|
217
|
+
extra: dict[str, Any] | None = None,
|
|
218
|
+
score: Decimal | float | None = None,
|
|
219
|
+
level: Level | None = None,
|
|
220
|
+
) -> ObservableProxy:
|
|
221
|
+
"""
|
|
222
|
+
Create a new observable or return existing one.
|
|
223
|
+
|
|
224
|
+
Args:
|
|
225
|
+
obs_type: Type of observable
|
|
226
|
+
value: Value of the observable
|
|
227
|
+
internal: Whether this is an internal asset
|
|
228
|
+
whitelisted: Whether this is whitelisted
|
|
229
|
+
comment: Optional comment
|
|
230
|
+
extra: Optional extra data
|
|
231
|
+
score: Optional explicit score
|
|
232
|
+
level: Optional explicit level
|
|
233
|
+
|
|
234
|
+
Returns:
|
|
235
|
+
The created or existing observable
|
|
236
|
+
"""
|
|
237
|
+
obs_kwargs: dict[str, Any] = {
|
|
238
|
+
"obs_type": obs_type,
|
|
239
|
+
"value": value,
|
|
240
|
+
"internal": internal,
|
|
241
|
+
"whitelisted": whitelisted,
|
|
242
|
+
"comment": comment,
|
|
243
|
+
"extra": extra or {},
|
|
244
|
+
}
|
|
245
|
+
if score is not None:
|
|
246
|
+
obs_kwargs["score"] = Decimal(str(score))
|
|
247
|
+
if level is not None:
|
|
248
|
+
obs_kwargs["level"] = level
|
|
249
|
+
obs = Observable(**obs_kwargs)
|
|
250
|
+
# Unwrap tuple - facade returns only Observable, discards deferred relationships
|
|
251
|
+
obs_result, _ = self._investigation.add_observable(obs)
|
|
252
|
+
return self._observable_proxy(obs_result)
|
|
253
|
+
|
|
254
|
+
@overload
|
|
255
|
+
def observable_get(self, key: str) -> ObservableProxy | None:
|
|
256
|
+
"""Get an observable by full key string."""
|
|
257
|
+
...
|
|
258
|
+
|
|
259
|
+
@overload
|
|
260
|
+
def observable_get(self, obs_type: ObservableType, value: str) -> ObservableProxy | None:
|
|
261
|
+
"""Get an observable by type and value."""
|
|
262
|
+
...
|
|
263
|
+
|
|
264
|
+
def observable_get(self, *args, **kwargs) -> ObservableProxy | None:
|
|
265
|
+
"""
|
|
266
|
+
Get an observable by key or by type and value.
|
|
267
|
+
|
|
268
|
+
Args:
|
|
269
|
+
key: Observable key (single argument)
|
|
270
|
+
obs_type: Observable type (when using two arguments)
|
|
271
|
+
value: Observable value (when using two arguments)
|
|
272
|
+
|
|
273
|
+
Returns:
|
|
274
|
+
Observable if found, None otherwise
|
|
275
|
+
|
|
276
|
+
Raises:
|
|
277
|
+
ValueError: If arguments are invalid or key generation fails
|
|
278
|
+
"""
|
|
279
|
+
if kwargs:
|
|
280
|
+
if not args and set(kwargs) == {"key"}:
|
|
281
|
+
key = kwargs["key"]
|
|
282
|
+
elif not args and set(kwargs) == {"obs_type", "value"}:
|
|
283
|
+
obs_type = kwargs["obs_type"]
|
|
284
|
+
value = kwargs["value"]
|
|
285
|
+
try:
|
|
286
|
+
key = keys.generate_observable_key(obs_type.value, value)
|
|
287
|
+
except Exception as e:
|
|
288
|
+
raise ValueError(
|
|
289
|
+
f"Failed to generate observable key for type='{obs_type}', value='{value}': {e}"
|
|
290
|
+
) from e
|
|
291
|
+
else:
|
|
292
|
+
raise ValueError("observable_get() accepts either (key: str) or (obs_type: ObservableType, value: str)")
|
|
293
|
+
elif len(args) == 1:
|
|
294
|
+
key = args[0]
|
|
295
|
+
elif len(args) == 2:
|
|
296
|
+
obs_type, value = args
|
|
297
|
+
try:
|
|
298
|
+
key = keys.generate_observable_key(obs_type.value, value)
|
|
299
|
+
except Exception as e:
|
|
300
|
+
raise ValueError(
|
|
301
|
+
f"Failed to generate observable key for type='{obs_type}', value='{value}': {e}"
|
|
302
|
+
) from e
|
|
303
|
+
else:
|
|
304
|
+
raise ValueError("observable_get() accepts either (key: str) or (obs_type: ObservableType, value: str)")
|
|
305
|
+
return self._observable_proxy(self._investigation.get_observable(key))
|
|
306
|
+
|
|
307
|
+
def observable_get_root(self) -> ObservableProxy:
|
|
308
|
+
"""
|
|
309
|
+
Get the root observable.
|
|
310
|
+
|
|
311
|
+
Returns:
|
|
312
|
+
Root observable
|
|
313
|
+
"""
|
|
314
|
+
return self._observable_proxy(self._investigation.get_root())
|
|
315
|
+
|
|
316
|
+
def observable_get_all(self) -> dict[str, ObservableProxy]:
|
|
317
|
+
"""Get read-only proxies for all observables."""
|
|
318
|
+
return {
|
|
319
|
+
key: ObservableProxy(self._investigation, key) for key in self._investigation.get_all_observables().keys()
|
|
320
|
+
}
|
|
321
|
+
|
|
322
|
+
def observable_add_relationship(
|
|
323
|
+
self,
|
|
324
|
+
source: Observable | ObservableProxy | str,
|
|
325
|
+
target: Observable | ObservableProxy | str,
|
|
326
|
+
relationship_type: RelationshipType,
|
|
327
|
+
direction: RelationshipDirection | None = None,
|
|
328
|
+
) -> ObservableProxy:
|
|
329
|
+
"""
|
|
330
|
+
Add a relationship between observables.
|
|
331
|
+
|
|
332
|
+
Args:
|
|
333
|
+
source: Source observable or its key
|
|
334
|
+
target: Target observable or its key
|
|
335
|
+
relationship_type: Type of relationship
|
|
336
|
+
direction: Direction of the relationship (None = use semantic default for relationship type)
|
|
337
|
+
|
|
338
|
+
Returns:
|
|
339
|
+
The source observable
|
|
340
|
+
|
|
341
|
+
Raises:
|
|
342
|
+
KeyError: If the source or target observable does not exist
|
|
343
|
+
"""
|
|
344
|
+
source_key = self._resolve_observable_key(source)
|
|
345
|
+
target_key = self._resolve_observable_key(target)
|
|
346
|
+
result = self._investigation.add_relationship(source_key, target_key, relationship_type, direction)
|
|
347
|
+
return self._observable_proxy(result)
|
|
348
|
+
|
|
349
|
+
def observable_add_threat_intel(
|
|
350
|
+
self,
|
|
351
|
+
observable: Observable | ObservableProxy | str,
|
|
352
|
+
source: str,
|
|
353
|
+
score: Decimal | float,
|
|
354
|
+
comment: str = "",
|
|
355
|
+
extra: dict[str, Any] | None = None,
|
|
356
|
+
level: Level | None = None,
|
|
357
|
+
taxonomies: list[Taxonomy | dict[str, Any]] | None = None,
|
|
358
|
+
) -> ThreatIntelProxy:
|
|
359
|
+
"""
|
|
360
|
+
Add threat intelligence to an observable.
|
|
361
|
+
|
|
362
|
+
Args:
|
|
363
|
+
observable: Observable, ObservableProxy, or its key
|
|
364
|
+
source: Threat intel source name
|
|
365
|
+
score: Score from threat intel
|
|
366
|
+
comment: Optional comment
|
|
367
|
+
extra: Optional extra data
|
|
368
|
+
level: Optional explicit level
|
|
369
|
+
taxonomies: Optional taxonomies
|
|
370
|
+
|
|
371
|
+
Returns:
|
|
372
|
+
The created threat intel
|
|
373
|
+
|
|
374
|
+
Raises:
|
|
375
|
+
KeyError: If the observable does not exist
|
|
376
|
+
"""
|
|
377
|
+
observable_key = self._resolve_observable_key(observable)
|
|
378
|
+
observable = self._require_observable(observable_key)
|
|
379
|
+
|
|
380
|
+
ti_kwargs: dict[str, Any] = {
|
|
381
|
+
"source": source,
|
|
382
|
+
"observable_key": observable_key,
|
|
383
|
+
"comment": comment,
|
|
384
|
+
"extra": extra or {},
|
|
385
|
+
"score": Decimal(str(score)),
|
|
386
|
+
"taxonomies": taxonomies or [],
|
|
387
|
+
}
|
|
388
|
+
if level is not None:
|
|
389
|
+
ti_kwargs["level"] = level
|
|
390
|
+
ti = ThreatIntel(**ti_kwargs)
|
|
391
|
+
result = self._investigation.add_threat_intel(ti, observable)
|
|
392
|
+
return self._threat_intel_proxy(result)
|
|
393
|
+
|
|
394
|
+
def observable_with_ti_draft(
|
|
395
|
+
self,
|
|
396
|
+
observable: Observable | ObservableProxy | str,
|
|
397
|
+
threat_intel: ThreatIntel,
|
|
398
|
+
) -> ThreatIntelProxy:
|
|
399
|
+
"""
|
|
400
|
+
Attach a threat intel draft to an observable.
|
|
401
|
+
|
|
402
|
+
Args:
|
|
403
|
+
observable: Observable, ObservableProxy, or its key
|
|
404
|
+
threat_intel: Threat intel draft entry (unbound or matching observable)
|
|
405
|
+
|
|
406
|
+
Returns:
|
|
407
|
+
The created/merged threat intel
|
|
408
|
+
|
|
409
|
+
Raises:
|
|
410
|
+
KeyError: If the observable does not exist
|
|
411
|
+
"""
|
|
412
|
+
if not isinstance(threat_intel, ThreatIntel):
|
|
413
|
+
raise TypeError("Threat intel draft must be a ThreatIntel instance.")
|
|
414
|
+
|
|
415
|
+
observable_key = self._resolve_observable_key(observable)
|
|
416
|
+
model_observable = self._require_observable(observable_key)
|
|
417
|
+
|
|
418
|
+
if threat_intel.observable_key and threat_intel.observable_key != observable_key:
|
|
419
|
+
raise ValueError("Threat intel is already bound to a different observable.")
|
|
420
|
+
|
|
421
|
+
threat_intel.observable_key = observable_key
|
|
422
|
+
expected_key = keys.generate_threat_intel_key(threat_intel.source, observable_key)
|
|
423
|
+
if not threat_intel.key or threat_intel.key != expected_key:
|
|
424
|
+
threat_intel.key = expected_key
|
|
425
|
+
|
|
426
|
+
result = self._investigation.add_threat_intel(threat_intel, model_observable)
|
|
427
|
+
return self._threat_intel_proxy(result)
|
|
428
|
+
|
|
429
|
+
def observable_set_level(
|
|
430
|
+
self,
|
|
431
|
+
observable: Observable | ObservableProxy | str,
|
|
432
|
+
level: Level,
|
|
433
|
+
reason: str | None = None,
|
|
434
|
+
) -> ObservableProxy:
|
|
435
|
+
"""
|
|
436
|
+
Explicitly set an observable's level via the service layer.
|
|
437
|
+
|
|
438
|
+
Args:
|
|
439
|
+
observable: Observable, ObservableProxy, or its key
|
|
440
|
+
level: Level to apply
|
|
441
|
+
|
|
442
|
+
Returns:
|
|
443
|
+
Updated observable proxy
|
|
444
|
+
|
|
445
|
+
Raises:
|
|
446
|
+
KeyError: If the observable does not exist
|
|
447
|
+
"""
|
|
448
|
+
observable_key = self._resolve_observable_key(observable)
|
|
449
|
+
model_observable = self._require_observable(observable_key)
|
|
450
|
+
self._investigation.apply_level_change(
|
|
451
|
+
model_observable,
|
|
452
|
+
level,
|
|
453
|
+
reason=reason or "Manual level update",
|
|
454
|
+
)
|
|
455
|
+
return self._observable_proxy(model_observable)
|
|
456
|
+
|
|
457
|
+
# Threat intel methods
|
|
458
|
+
|
|
459
|
+
def threat_intel_get(self, key: str) -> ThreatIntelProxy | None:
|
|
460
|
+
"""
|
|
461
|
+
Get a threat intel entry by key.
|
|
462
|
+
|
|
463
|
+
Args:
|
|
464
|
+
key: Threat intel key (format: ti:{source}:{observable_key})
|
|
465
|
+
|
|
466
|
+
Returns:
|
|
467
|
+
ThreatIntelProxy if found, None otherwise
|
|
468
|
+
"""
|
|
469
|
+
ti = self._investigation.get_threat_intel(key)
|
|
470
|
+
return self._threat_intel_proxy(ti)
|
|
471
|
+
|
|
472
|
+
def threat_intel_draft_create(
|
|
473
|
+
self,
|
|
474
|
+
source: str,
|
|
475
|
+
score: Decimal | float,
|
|
476
|
+
comment: str = "",
|
|
477
|
+
extra: dict[str, Any] | None = None,
|
|
478
|
+
level: Level | None = None,
|
|
479
|
+
taxonomies: list[Taxonomy | dict[str, Any]] | None = None,
|
|
480
|
+
) -> ThreatIntel:
|
|
481
|
+
"""
|
|
482
|
+
Create an unbound threat intel draft entry.
|
|
483
|
+
|
|
484
|
+
Args:
|
|
485
|
+
source: Threat intel source name
|
|
486
|
+
score: Score from threat intel
|
|
487
|
+
comment: Optional comment
|
|
488
|
+
extra: Optional extra data
|
|
489
|
+
level: Optional explicit level
|
|
490
|
+
taxonomies: Optional taxonomies
|
|
491
|
+
|
|
492
|
+
Returns:
|
|
493
|
+
Unbound ThreatIntel instance
|
|
494
|
+
"""
|
|
495
|
+
ti_kwargs: dict[str, Any] = {
|
|
496
|
+
"source": source,
|
|
497
|
+
"observable_key": "",
|
|
498
|
+
"comment": comment,
|
|
499
|
+
"extra": extra or {},
|
|
500
|
+
"score": Decimal(str(score)),
|
|
501
|
+
"taxonomies": taxonomies or [],
|
|
502
|
+
}
|
|
503
|
+
if level is not None:
|
|
504
|
+
ti_kwargs["level"] = level
|
|
505
|
+
return ThreatIntel(**ti_kwargs)
|
|
506
|
+
|
|
507
|
+
def threat_intel_add_taxonomy(
|
|
508
|
+
self,
|
|
509
|
+
threat_intel: ThreatIntel | ThreatIntelProxy | str,
|
|
510
|
+
*,
|
|
511
|
+
level: Level,
|
|
512
|
+
name: str,
|
|
513
|
+
value: str,
|
|
514
|
+
) -> ThreatIntelProxy:
|
|
515
|
+
"""
|
|
516
|
+
Add or replace a taxonomy entry by name on a threat intel.
|
|
517
|
+
|
|
518
|
+
Args:
|
|
519
|
+
threat_intel: ThreatIntel, ThreatIntelProxy, or its key
|
|
520
|
+
level: Taxonomy level
|
|
521
|
+
name: Taxonomy name (unique per threat intel)
|
|
522
|
+
value: Taxonomy value
|
|
523
|
+
|
|
524
|
+
Returns:
|
|
525
|
+
Updated threat intel proxy
|
|
526
|
+
|
|
527
|
+
Raises:
|
|
528
|
+
KeyError: If the threat intel does not exist
|
|
529
|
+
"""
|
|
530
|
+
ti_key = self._resolve_threat_intel_key(threat_intel)
|
|
531
|
+
taxonomy = Taxonomy(level=level, name=name, value=value)
|
|
532
|
+
updated = self._investigation.add_threat_intel_taxonomy(ti_key, taxonomy)
|
|
533
|
+
return self._threat_intel_proxy(updated)
|
|
534
|
+
|
|
535
|
+
def threat_intel_remove_taxonomy(
|
|
536
|
+
self,
|
|
537
|
+
threat_intel: ThreatIntel | ThreatIntelProxy | str,
|
|
538
|
+
name: str,
|
|
539
|
+
) -> ThreatIntelProxy:
|
|
540
|
+
"""
|
|
541
|
+
Remove a taxonomy entry by name from a threat intel.
|
|
542
|
+
|
|
543
|
+
Args:
|
|
544
|
+
threat_intel: ThreatIntel, ThreatIntelProxy, or its key
|
|
545
|
+
name: Taxonomy name to remove
|
|
546
|
+
|
|
547
|
+
Returns:
|
|
548
|
+
Updated threat intel proxy
|
|
549
|
+
|
|
550
|
+
Raises:
|
|
551
|
+
KeyError: If the threat intel does not exist
|
|
552
|
+
"""
|
|
553
|
+
ti_key = self._resolve_threat_intel_key(threat_intel)
|
|
554
|
+
updated = self._investigation.remove_threat_intel_taxonomy(ti_key, name)
|
|
555
|
+
return self._threat_intel_proxy(updated)
|
|
556
|
+
|
|
557
|
+
def threat_intel_get_all(self) -> dict[str, ThreatIntelProxy]:
|
|
558
|
+
"""Get read-only proxies for all threat intel entries."""
|
|
559
|
+
return {
|
|
560
|
+
key: ThreatIntelProxy(self._investigation, key)
|
|
561
|
+
for key in self._investigation.get_all_threat_intels().keys()
|
|
562
|
+
}
|
|
563
|
+
|
|
564
|
+
# Check methods
|
|
565
|
+
|
|
566
|
+
def check_create(
|
|
567
|
+
self,
|
|
568
|
+
check_name: str,
|
|
569
|
+
description: str,
|
|
570
|
+
comment: str = "",
|
|
571
|
+
extra: dict[str, Any] | None = None,
|
|
572
|
+
score: Decimal | float | None = None,
|
|
573
|
+
level: Level | None = None,
|
|
574
|
+
) -> CheckProxy:
|
|
575
|
+
"""
|
|
576
|
+
Create a new check.
|
|
577
|
+
|
|
578
|
+
Args:
|
|
579
|
+
check_name: Check name
|
|
580
|
+
description: Check description
|
|
581
|
+
comment: Optional comment
|
|
582
|
+
extra: Optional extra data
|
|
583
|
+
score: Optional explicit score
|
|
584
|
+
level: Optional explicit level
|
|
585
|
+
|
|
586
|
+
Returns:
|
|
587
|
+
The created check
|
|
588
|
+
"""
|
|
589
|
+
check_kwargs: dict[str, Any] = {
|
|
590
|
+
"check_name": check_name,
|
|
591
|
+
"description": description,
|
|
592
|
+
"comment": comment,
|
|
593
|
+
"extra": extra or {},
|
|
594
|
+
"origin_investigation_id": self._investigation.investigation_id,
|
|
595
|
+
}
|
|
596
|
+
if score is not None:
|
|
597
|
+
check_kwargs["score"] = Decimal(str(score))
|
|
598
|
+
if level is not None:
|
|
599
|
+
check_kwargs["level"] = level
|
|
600
|
+
check = Check(**check_kwargs)
|
|
601
|
+
return self._check_proxy(self._investigation.add_check(check))
|
|
602
|
+
|
|
603
|
+
def check_get(self, key: str) -> CheckProxy | None:
|
|
604
|
+
"""
|
|
605
|
+
Get a check by key.
|
|
606
|
+
|
|
607
|
+
Args:
|
|
608
|
+
key: Check key
|
|
609
|
+
|
|
610
|
+
Returns:
|
|
611
|
+
Check if found, None otherwise
|
|
612
|
+
"""
|
|
613
|
+
return self._check_proxy(self._investigation.get_check(key))
|
|
614
|
+
|
|
615
|
+
def check_get_all(self) -> dict[str, CheckProxy]:
|
|
616
|
+
"""Get read-only proxies for all checks."""
|
|
617
|
+
return {key: CheckProxy(self._investigation, key) for key in self._investigation.get_all_checks().keys()}
|
|
618
|
+
|
|
619
|
+
def check_link_observable(
|
|
620
|
+
self,
|
|
621
|
+
check_key: str,
|
|
622
|
+
observable: Observable | ObservableProxy | str,
|
|
623
|
+
propagation_mode: PropagationMode = PropagationMode.LOCAL_ONLY,
|
|
624
|
+
) -> CheckProxy:
|
|
625
|
+
"""
|
|
626
|
+
Link an observable to a check.
|
|
627
|
+
|
|
628
|
+
Args:
|
|
629
|
+
check_key: Key of the check
|
|
630
|
+
observable: Observable, ObservableProxy, or its key
|
|
631
|
+
propagation_mode: Propagation behavior for this link
|
|
632
|
+
|
|
633
|
+
Returns:
|
|
634
|
+
The check
|
|
635
|
+
|
|
636
|
+
Raises:
|
|
637
|
+
KeyError: If the check or observable does not exist
|
|
638
|
+
"""
|
|
639
|
+
observable_key = self._resolve_observable_key(observable)
|
|
640
|
+
result = self._investigation.link_check_observable(check_key, observable_key, propagation_mode=propagation_mode)
|
|
641
|
+
return self._check_proxy(result)
|
|
642
|
+
|
|
643
|
+
def check_update_score(self, check_key: str, score: Decimal | float, reason: str = "") -> CheckProxy:
|
|
644
|
+
"""
|
|
645
|
+
Update a check's score.
|
|
646
|
+
|
|
647
|
+
Args:
|
|
648
|
+
check_key: Key of the check
|
|
649
|
+
score: New score
|
|
650
|
+
reason: Reason for update
|
|
651
|
+
|
|
652
|
+
Returns:
|
|
653
|
+
The check
|
|
654
|
+
|
|
655
|
+
Raises:
|
|
656
|
+
KeyError: If the check does not exist
|
|
657
|
+
"""
|
|
658
|
+
check = self._require_check(check_key)
|
|
659
|
+
self._investigation.apply_score_change(check, Decimal(str(score)), reason=reason)
|
|
660
|
+
return self._check_proxy(check)
|
|
661
|
+
|
|
662
|
+
# Tag methods
|
|
663
|
+
|
|
664
|
+
def tag_create(self, name: str, description: str = "") -> TagProxy:
|
|
665
|
+
"""
|
|
666
|
+
Create a new tag, automatically creating ancestor tags.
|
|
667
|
+
|
|
668
|
+
When creating a tag with a hierarchical name (using ":" delimiter),
|
|
669
|
+
ancestor tags are automatically created if they don't exist.
|
|
670
|
+
For example, creating "header:auth:dkim" will auto-create
|
|
671
|
+
"header" and "header:auth" tags.
|
|
672
|
+
|
|
673
|
+
Args:
|
|
674
|
+
name: Tag name (use ":" as hierarchy delimiter)
|
|
675
|
+
description: Tag description
|
|
676
|
+
|
|
677
|
+
Returns:
|
|
678
|
+
The created tag
|
|
679
|
+
"""
|
|
680
|
+
tag = Tag(name=name, description=description)
|
|
681
|
+
return self._tag_proxy(self._investigation.add_tag(tag))
|
|
682
|
+
|
|
683
|
+
def tag_get(self, *args, **kwargs) -> TagProxy | None:
|
|
684
|
+
"""
|
|
685
|
+
Get a tag by key or by name.
|
|
686
|
+
|
|
687
|
+
Args:
|
|
688
|
+
key: Tag key (single argument, prefixed with tag:)
|
|
689
|
+
name: Tag name (single argument without prefix)
|
|
690
|
+
|
|
691
|
+
Returns:
|
|
692
|
+
Tag if found, None otherwise
|
|
693
|
+
|
|
694
|
+
Raises:
|
|
695
|
+
ValueError: If arguments are invalid or key generation fails
|
|
696
|
+
"""
|
|
697
|
+
if kwargs:
|
|
698
|
+
if not args and set(kwargs) == {"key"}:
|
|
699
|
+
key = kwargs["key"]
|
|
700
|
+
elif not args and set(kwargs) == {"name"}:
|
|
701
|
+
name = kwargs["name"]
|
|
702
|
+
try:
|
|
703
|
+
key = keys.generate_tag_key(name)
|
|
704
|
+
except Exception as e:
|
|
705
|
+
raise ValueError(f"Failed to generate tag key for name='{name}': {e}") from e
|
|
706
|
+
else:
|
|
707
|
+
raise ValueError("tag_get() accepts either (key: str) or (name: str)")
|
|
708
|
+
elif len(args) == 1:
|
|
709
|
+
key_or_name = args[0]
|
|
710
|
+
if isinstance(key_or_name, str) and key_or_name.startswith("tag:"):
|
|
711
|
+
key = key_or_name
|
|
712
|
+
else:
|
|
713
|
+
try:
|
|
714
|
+
key = keys.generate_tag_key(key_or_name)
|
|
715
|
+
except Exception as e:
|
|
716
|
+
raise ValueError(f"Failed to generate tag key for name='{key_or_name}': {e}") from e
|
|
717
|
+
else:
|
|
718
|
+
raise ValueError("tag_get() accepts either (key: str) or (name: str)")
|
|
719
|
+
return self._tag_proxy(self._investigation.get_tag(key))
|
|
720
|
+
|
|
721
|
+
def tag_get_all(self) -> dict[str, TagProxy]:
|
|
722
|
+
"""Get read-only proxies for all tags."""
|
|
723
|
+
return {key: TagProxy(self._investigation, key) for key in self._investigation.get_all_tags().keys()}
|
|
724
|
+
|
|
725
|
+
def tag_add_check(self, tag_key: str, check_key: str) -> TagProxy:
|
|
726
|
+
"""
|
|
727
|
+
Add a check to a tag.
|
|
728
|
+
|
|
729
|
+
Args:
|
|
730
|
+
tag_key: Key of the tag
|
|
731
|
+
check_key: Key of the check
|
|
732
|
+
|
|
733
|
+
Returns:
|
|
734
|
+
The tag
|
|
735
|
+
|
|
736
|
+
Raises:
|
|
737
|
+
KeyError: If the tag or check does not exist
|
|
738
|
+
"""
|
|
739
|
+
tag = self._investigation.add_check_to_tag(tag_key, check_key)
|
|
740
|
+
return self._tag_proxy(tag)
|
|
741
|
+
|
|
742
|
+
def tag_get_children(self, tag_name: str) -> list[TagProxy]:
|
|
743
|
+
"""Get direct child tags of a tag."""
|
|
744
|
+
tags = self._investigation.get_tag_children(tag_name)
|
|
745
|
+
return [TagProxy(self._investigation, t.key) for t in tags]
|
|
746
|
+
|
|
747
|
+
def tag_get_descendants(self, tag_name: str) -> list[TagProxy]:
|
|
748
|
+
"""Get all descendant tags of a tag."""
|
|
749
|
+
tags = self._investigation.get_tag_descendants(tag_name)
|
|
750
|
+
return [TagProxy(self._investigation, t.key) for t in tags]
|
|
751
|
+
|
|
752
|
+
def tag_get_ancestors(self, tag_name: str) -> list[TagProxy]:
|
|
753
|
+
"""Get all ancestor tags of a tag."""
|
|
754
|
+
tags = self._investigation.get_tag_ancestors(tag_name)
|
|
755
|
+
return [TagProxy(self._investigation, t.key) for t in tags]
|
|
756
|
+
|
|
757
|
+
# Enrichment methods
|
|
758
|
+
|
|
759
|
+
def enrichment_create(self, name: str, data: dict[str, Any], context: str = "") -> EnrichmentProxy:
|
|
760
|
+
"""
|
|
761
|
+
Create a new enrichment.
|
|
762
|
+
|
|
763
|
+
Args:
|
|
764
|
+
name: Enrichment name
|
|
765
|
+
data: Enrichment data
|
|
766
|
+
context: Optional context
|
|
767
|
+
|
|
768
|
+
Returns:
|
|
769
|
+
The created enrichment
|
|
770
|
+
"""
|
|
771
|
+
enrichment = Enrichment(name=name, data=data, context=context)
|
|
772
|
+
return self._enrichment_proxy(self._investigation.add_enrichment(enrichment))
|
|
773
|
+
|
|
774
|
+
@overload
|
|
775
|
+
def enrichment_get(self, key: str) -> EnrichmentProxy | None:
|
|
776
|
+
"""Get an enrichment by full key string."""
|
|
777
|
+
...
|
|
778
|
+
|
|
779
|
+
@overload
|
|
780
|
+
def enrichment_get(self, name: str, context: str = "") -> EnrichmentProxy | None:
|
|
781
|
+
"""Get an enrichment by name and optional context."""
|
|
782
|
+
...
|
|
783
|
+
|
|
784
|
+
def enrichment_get(self, *args, **kwargs) -> EnrichmentProxy | None:
|
|
785
|
+
"""
|
|
786
|
+
Get an enrichment by key or by name and context.
|
|
787
|
+
|
|
788
|
+
Args:
|
|
789
|
+
key: Enrichment key (single argument, prefixed with enr:)
|
|
790
|
+
name: Enrichment name (when using one or two arguments)
|
|
791
|
+
context: Optional context (second argument or context= kw)
|
|
792
|
+
|
|
793
|
+
Returns:
|
|
794
|
+
Enrichment if found, None otherwise
|
|
795
|
+
|
|
796
|
+
Raises:
|
|
797
|
+
ValueError: If arguments are invalid or key generation fails
|
|
798
|
+
"""
|
|
799
|
+
if kwargs:
|
|
800
|
+
if not args and set(kwargs) == {"key"}:
|
|
801
|
+
key = kwargs["key"]
|
|
802
|
+
elif not args and set(kwargs) == {"name"}:
|
|
803
|
+
name = kwargs["name"]
|
|
804
|
+
try:
|
|
805
|
+
key = keys.generate_enrichment_key(name)
|
|
806
|
+
except Exception as e:
|
|
807
|
+
raise ValueError(f"Failed to generate enrichment key for name='{name}': {e}") from e
|
|
808
|
+
elif not args and set(kwargs) == {"name", "context"}:
|
|
809
|
+
name = kwargs["name"]
|
|
810
|
+
context = kwargs["context"]
|
|
811
|
+
try:
|
|
812
|
+
key = keys.generate_enrichment_key(name, context)
|
|
813
|
+
except Exception as e:
|
|
814
|
+
raise ValueError(
|
|
815
|
+
f"Failed to generate enrichment key for name='{name}', context='{context}': {e}"
|
|
816
|
+
) from e
|
|
817
|
+
elif len(args) == 1 and set(kwargs) == {"context"}:
|
|
818
|
+
name = args[0]
|
|
819
|
+
context = kwargs["context"]
|
|
820
|
+
try:
|
|
821
|
+
key = keys.generate_enrichment_key(name, context)
|
|
822
|
+
except Exception as e:
|
|
823
|
+
raise ValueError(
|
|
824
|
+
f"Failed to generate enrichment key for name='{name}', context='{context}': {e}"
|
|
825
|
+
) from e
|
|
826
|
+
else:
|
|
827
|
+
raise ValueError('enrichment_get() accepts either (key: str) or (name: str, context: str = "")')
|
|
828
|
+
elif len(args) == 1:
|
|
829
|
+
key_or_name = args[0]
|
|
830
|
+
if isinstance(key_or_name, str) and key_or_name.startswith("enr:"):
|
|
831
|
+
key = key_or_name
|
|
832
|
+
else:
|
|
833
|
+
try:
|
|
834
|
+
key = keys.generate_enrichment_key(key_or_name)
|
|
835
|
+
except Exception as e:
|
|
836
|
+
raise ValueError(f"Failed to generate enrichment key for name='{key_or_name}': {e}") from e
|
|
837
|
+
elif len(args) == 2:
|
|
838
|
+
name, context = args
|
|
839
|
+
try:
|
|
840
|
+
key = keys.generate_enrichment_key(name, context)
|
|
841
|
+
except Exception as e:
|
|
842
|
+
raise ValueError(
|
|
843
|
+
f"Failed to generate enrichment key for name='{name}', context='{context}': {e}"
|
|
844
|
+
) from e
|
|
845
|
+
else:
|
|
846
|
+
raise ValueError('enrichment_get() accepts either (key: str) or (name: str, context: str = "")')
|
|
847
|
+
return self._enrichment_proxy(self._investigation.get_enrichment(key))
|
|
848
|
+
|
|
849
|
+
def enrichment_get_all(self) -> dict[str, EnrichmentProxy]:
|
|
850
|
+
"""Get read-only proxies for all enrichments."""
|
|
851
|
+
return {
|
|
852
|
+
key: EnrichmentProxy(self._investigation, key) for key in self._investigation.get_all_enrichments().keys()
|
|
853
|
+
}
|
|
854
|
+
|
|
855
|
+
# Score and statistics methods
|
|
856
|
+
|
|
857
|
+
def get_global_score(self) -> Decimal:
|
|
858
|
+
"""
|
|
859
|
+
Get the global investigation score.
|
|
860
|
+
|
|
861
|
+
Returns:
|
|
862
|
+
Global score
|
|
863
|
+
"""
|
|
864
|
+
return self._investigation.get_global_score()
|
|
865
|
+
|
|
866
|
+
def get_global_level(self) -> Level:
|
|
867
|
+
"""
|
|
868
|
+
Get the global investigation level.
|
|
869
|
+
|
|
870
|
+
Returns:
|
|
871
|
+
Global level
|
|
872
|
+
"""
|
|
873
|
+
return self._investigation.get_global_level()
|
|
874
|
+
|
|
875
|
+
def get_statistics(self) -> StatisticsSchema:
|
|
876
|
+
"""
|
|
877
|
+
Get comprehensive investigation statistics.
|
|
878
|
+
|
|
879
|
+
Returns:
|
|
880
|
+
Statistics schema with typed fields
|
|
881
|
+
"""
|
|
882
|
+
return self._investigation.get_statistics()
|
|
883
|
+
|
|
884
|
+
# Serialization and I/O methods
|
|
885
|
+
|
|
886
|
+
def io_save_json(self, filepath: str | Path, *, include_audit_log: bool = True) -> str:
|
|
887
|
+
"""
|
|
888
|
+
Save the investigation to a JSON file.
|
|
889
|
+
|
|
890
|
+
Relative paths are converted to absolute paths before saving.
|
|
891
|
+
|
|
892
|
+
Args:
|
|
893
|
+
filepath: Path to save the JSON file (relative or absolute)
|
|
894
|
+
include_audit_log: Include audit log in output (default: True).
|
|
895
|
+
When False, audit_log is set to null for compact, deterministic output.
|
|
896
|
+
|
|
897
|
+
Returns:
|
|
898
|
+
Absolute path to the saved file as a string
|
|
899
|
+
|
|
900
|
+
Raises:
|
|
901
|
+
PermissionError: If the file cannot be written
|
|
902
|
+
OSError: If there are file system issues
|
|
903
|
+
|
|
904
|
+
Examples:
|
|
905
|
+
>>> cv = Cyvest()
|
|
906
|
+
>>> path = cv.io_save_json("investigation.json")
|
|
907
|
+
>>> print(path) # /absolute/path/to/investigation.json
|
|
908
|
+
>>> # For compact, deterministic output:
|
|
909
|
+
>>> path = cv.io_save_json("output.json", include_audit_log=False)
|
|
910
|
+
"""
|
|
911
|
+
save_investigation_json(self._investigation, filepath, include_audit_log=include_audit_log)
|
|
912
|
+
return str(Path(filepath).resolve())
|
|
913
|
+
|
|
914
|
+
def io_save_markdown(
|
|
915
|
+
self,
|
|
916
|
+
filepath: str | Path,
|
|
917
|
+
include_tags: bool = False,
|
|
918
|
+
include_enrichments: bool = False,
|
|
919
|
+
include_observables: bool = True,
|
|
920
|
+
) -> str:
|
|
921
|
+
"""
|
|
922
|
+
Save the investigation as a Markdown report.
|
|
923
|
+
|
|
924
|
+
Relative paths are converted to absolute paths before saving.
|
|
925
|
+
|
|
926
|
+
Args:
|
|
927
|
+
filepath: Path to save the Markdown file (relative or absolute)
|
|
928
|
+
include_tags: Include tags section in the report (default: False)
|
|
929
|
+
include_enrichments: Include enrichments section in the report (default: False)
|
|
930
|
+
include_observables: Include observables section in the report (default: True)
|
|
931
|
+
|
|
932
|
+
Returns:
|
|
933
|
+
Absolute path to the saved file as a string
|
|
934
|
+
|
|
935
|
+
Raises:
|
|
936
|
+
PermissionError: If the file cannot be written
|
|
937
|
+
OSError: If there are file system issues
|
|
938
|
+
|
|
939
|
+
Examples:
|
|
940
|
+
>>> cv = Cyvest()
|
|
941
|
+
>>> path = cv.io_save_markdown("report.md")
|
|
942
|
+
>>> print(path) # /absolute/path/to/report.md
|
|
943
|
+
"""
|
|
944
|
+
save_investigation_markdown(
|
|
945
|
+
self._investigation, filepath, include_tags, include_enrichments, include_observables
|
|
946
|
+
)
|
|
947
|
+
return str(Path(filepath).resolve())
|
|
948
|
+
|
|
949
|
+
def io_to_markdown(
|
|
950
|
+
self,
|
|
951
|
+
include_tags: bool = False,
|
|
952
|
+
include_enrichments: bool = False,
|
|
953
|
+
include_observables: bool = True,
|
|
954
|
+
exclude_levels: set[Level] | None = None,
|
|
955
|
+
) -> str:
|
|
956
|
+
"""
|
|
957
|
+
Generate a Markdown report of the investigation.
|
|
958
|
+
|
|
959
|
+
Args:
|
|
960
|
+
include_tags: Include tags section in the report (default: False)
|
|
961
|
+
include_enrichments: Include enrichments section in the report (default: False)
|
|
962
|
+
include_observables: Include observables section in the report (default: True)
|
|
963
|
+
exclude_levels: Set of levels to exclude from checks section (default: {Level.NONE})
|
|
964
|
+
|
|
965
|
+
Returns:
|
|
966
|
+
Markdown formatted report as a string
|
|
967
|
+
|
|
968
|
+
Examples:
|
|
969
|
+
>>> cv = Cyvest()
|
|
970
|
+
>>> markdown = cv.io_to_markdown()
|
|
971
|
+
>>> print(markdown)
|
|
972
|
+
# Cybersecurity Investigation Report
|
|
973
|
+
...
|
|
974
|
+
"""
|
|
975
|
+
return generate_markdown_report(
|
|
976
|
+
self._investigation, include_tags, include_enrichments, include_observables, exclude_levels
|
|
977
|
+
)
|
|
978
|
+
|
|
979
|
+
def io_to_invest(self, *, include_audit_log: bool = True) -> InvestigationSchema:
|
|
980
|
+
"""
|
|
981
|
+
Serialize the investigation to an InvestigationSchema.
|
|
982
|
+
|
|
983
|
+
Args:
|
|
984
|
+
include_audit_log: Include audit log in serialization (default: True).
|
|
985
|
+
When False, audit_log is set to None for compact, deterministic output.
|
|
986
|
+
|
|
987
|
+
Returns:
|
|
988
|
+
InvestigationSchema instance (use .model_dump() for dict)
|
|
989
|
+
|
|
990
|
+
Examples:
|
|
991
|
+
>>> cv = Cyvest()
|
|
992
|
+
>>> schema = cv.io_to_invest()
|
|
993
|
+
>>> print(schema.score, schema.level)
|
|
994
|
+
>>> dict_data = schema.model_dump() # defaults to by_alias=True
|
|
995
|
+
>>> # For compact, deterministic output:
|
|
996
|
+
>>> schema = cv.io_to_invest(include_audit_log=False)
|
|
997
|
+
>>> assert schema.audit_log is None
|
|
998
|
+
"""
|
|
999
|
+
return serialize_investigation(self._investigation, include_audit_log=include_audit_log)
|
|
1000
|
+
|
|
1001
|
+
def io_to_dict(self, *, include_audit_log: bool = True) -> dict[str, Any]:
|
|
1002
|
+
"""
|
|
1003
|
+
Convert the investigation to a Python dictionary.
|
|
1004
|
+
|
|
1005
|
+
Args:
|
|
1006
|
+
include_audit_log: Include audit log in output (default: True).
|
|
1007
|
+
When False, audit_log is set to None for compact, deterministic output.
|
|
1008
|
+
|
|
1009
|
+
Returns:
|
|
1010
|
+
Dictionary representation of the investigation
|
|
1011
|
+
|
|
1012
|
+
Examples:
|
|
1013
|
+
>>> cv = Cyvest()
|
|
1014
|
+
>>> data = cv.io_to_dict()
|
|
1015
|
+
>>> print(data["score"], data["level"])
|
|
1016
|
+
>>> # For compact, deterministic output:
|
|
1017
|
+
>>> data = cv.io_to_dict(include_audit_log=False)
|
|
1018
|
+
>>> assert data["audit_log"] is None
|
|
1019
|
+
"""
|
|
1020
|
+
return self.io_to_invest(include_audit_log=include_audit_log).model_dump(by_alias=True)
|
|
1021
|
+
|
|
1022
|
+
@staticmethod
|
|
1023
|
+
def io_load_json(filepath: str | Path) -> Cyvest:
|
|
1024
|
+
"""
|
|
1025
|
+
Load an investigation from a JSON file.
|
|
1026
|
+
|
|
1027
|
+
Args:
|
|
1028
|
+
filepath: Path to the JSON file (relative or absolute)
|
|
1029
|
+
|
|
1030
|
+
Returns:
|
|
1031
|
+
Reconstructed Cyvest investigation
|
|
1032
|
+
|
|
1033
|
+
Raises:
|
|
1034
|
+
FileNotFoundError: If the file does not exist
|
|
1035
|
+
json.JSONDecodeError: If the file contains invalid JSON
|
|
1036
|
+
Exception: For other file-related errors
|
|
1037
|
+
|
|
1038
|
+
Example:
|
|
1039
|
+
>>> cv = Cyvest.io_load_json("investigation.json")
|
|
1040
|
+
>>> cv = Cyvest.io_load_json("/absolute/path/to/investigation.json")
|
|
1041
|
+
"""
|
|
1042
|
+
return load_investigation_json(filepath)
|
|
1043
|
+
|
|
1044
|
+
@staticmethod
|
|
1045
|
+
def io_load_dict(data: dict[str, Any]) -> Cyvest:
|
|
1046
|
+
"""
|
|
1047
|
+
Load an investigation from a dictionary (parsed JSON).
|
|
1048
|
+
|
|
1049
|
+
Args:
|
|
1050
|
+
data: Dictionary containing the serialized investigation data
|
|
1051
|
+
|
|
1052
|
+
Returns:
|
|
1053
|
+
Reconstructed Cyvest investigation
|
|
1054
|
+
|
|
1055
|
+
Raises:
|
|
1056
|
+
ValueError: If required fields are missing or invalid
|
|
1057
|
+
|
|
1058
|
+
Example:
|
|
1059
|
+
>>> import json
|
|
1060
|
+
>>> with open("investigation.json") as f:
|
|
1061
|
+
... data = json.load(f)
|
|
1062
|
+
>>> cv = Cyvest.io_load_dict(data)
|
|
1063
|
+
"""
|
|
1064
|
+
return load_investigation_dict(data)
|
|
1065
|
+
|
|
1066
|
+
# Shared context, investigation merging, finalization, comparison
|
|
1067
|
+
|
|
1068
|
+
def shared_context(
|
|
1069
|
+
self,
|
|
1070
|
+
*,
|
|
1071
|
+
lock: threading.RLock | None = None,
|
|
1072
|
+
max_async_workers: int | None = None,
|
|
1073
|
+
) -> SharedInvestigationContext:
|
|
1074
|
+
"""
|
|
1075
|
+
Create a SharedInvestigationContext tied to this Cyvest instance.
|
|
1076
|
+
|
|
1077
|
+
Args:
|
|
1078
|
+
lock: Optional shared lock for advanced synchronization scenarios.
|
|
1079
|
+
max_async_workers: Optional limit for concurrent async reconciliation workers.
|
|
1080
|
+
"""
|
|
1081
|
+
from cyvest.shared import SharedInvestigationContext
|
|
1082
|
+
|
|
1083
|
+
return SharedInvestigationContext(self, lock=lock, max_async_workers=max_async_workers)
|
|
1084
|
+
|
|
1085
|
+
def merge_investigation(self, other: Cyvest) -> None:
|
|
1086
|
+
"""
|
|
1087
|
+
Merge another investigation into this one.
|
|
1088
|
+
|
|
1089
|
+
Args:
|
|
1090
|
+
other: The investigation to merge
|
|
1091
|
+
"""
|
|
1092
|
+
self._investigation.merge_investigation(other._investigation)
|
|
1093
|
+
|
|
1094
|
+
def finalize_relationships(self) -> None:
|
|
1095
|
+
"""
|
|
1096
|
+
Finalize observable relationships by linking orphan sub-graphs to root.
|
|
1097
|
+
|
|
1098
|
+
Any observable or sub-graph not connected to the root will be automatically
|
|
1099
|
+
linked by finding the best starting node of each disconnected component.
|
|
1100
|
+
"""
|
|
1101
|
+
self._investigation.finalize_relationships()
|
|
1102
|
+
|
|
1103
|
+
def compare(
|
|
1104
|
+
self,
|
|
1105
|
+
expected: Cyvest | None = None,
|
|
1106
|
+
result_expected: list | None = None,
|
|
1107
|
+
) -> list:
|
|
1108
|
+
"""
|
|
1109
|
+
Compare this investigation against expected results.
|
|
1110
|
+
|
|
1111
|
+
Args:
|
|
1112
|
+
expected: The reference investigation (expected results), optional
|
|
1113
|
+
result_expected: List of ExpectedResult tolerance rules for specific checks
|
|
1114
|
+
|
|
1115
|
+
Returns:
|
|
1116
|
+
List of DiffItem for all differences found
|
|
1117
|
+
"""
|
|
1118
|
+
return compare_investigations(actual=self, expected=expected, result_expected=result_expected)
|
|
1119
|
+
|
|
1120
|
+
# Display helpers
|
|
1121
|
+
|
|
1122
|
+
def display_summary(
|
|
1123
|
+
self,
|
|
1124
|
+
show_graph: bool = True,
|
|
1125
|
+
exclude_levels: Level | Iterable[Level] = Level.NONE,
|
|
1126
|
+
show_audit_log: bool = False,
|
|
1127
|
+
rich_print: Callable[[Any], None] | None = None,
|
|
1128
|
+
) -> None:
|
|
1129
|
+
"""
|
|
1130
|
+
Display a comprehensive summary of the investigation using Rich.
|
|
1131
|
+
|
|
1132
|
+
Args:
|
|
1133
|
+
show_graph: Whether to display the observable graph
|
|
1134
|
+
exclude_levels: Level(s) to omit from the report (default: Level.NONE)
|
|
1135
|
+
show_audit_log: Whether to display the investigation audit log
|
|
1136
|
+
rich_print: Optional callable that takes a renderable and returns None
|
|
1137
|
+
"""
|
|
1138
|
+
if rich_print is None:
|
|
1139
|
+
|
|
1140
|
+
def rich_print(renderables: Any) -> None:
|
|
1141
|
+
logger.rich("INFO", renderables)
|
|
1142
|
+
|
|
1143
|
+
display_summary(
|
|
1144
|
+
self,
|
|
1145
|
+
rich_print,
|
|
1146
|
+
show_graph=show_graph,
|
|
1147
|
+
exclude_levels=exclude_levels,
|
|
1148
|
+
show_audit_log=show_audit_log,
|
|
1149
|
+
)
|
|
1150
|
+
|
|
1151
|
+
def display_statistics(
|
|
1152
|
+
self,
|
|
1153
|
+
rich_print: Callable[[Any], None] | None = None,
|
|
1154
|
+
) -> None:
|
|
1155
|
+
"""
|
|
1156
|
+
Display investigation statistics using Rich.
|
|
1157
|
+
|
|
1158
|
+
Args:
|
|
1159
|
+
rich_print: Optional callable that takes a renderable and returns None.
|
|
1160
|
+
If not provided, uses the default logger.
|
|
1161
|
+
"""
|
|
1162
|
+
if rich_print is None:
|
|
1163
|
+
|
|
1164
|
+
def rich_print(renderables: Any) -> None:
|
|
1165
|
+
logger.rich("INFO", renderables)
|
|
1166
|
+
|
|
1167
|
+
display_statistics(self, rich_print)
|
|
1168
|
+
|
|
1169
|
+
def display_diff(
|
|
1170
|
+
self,
|
|
1171
|
+
expected: Cyvest | None = None,
|
|
1172
|
+
result_expected: list | None = None,
|
|
1173
|
+
title: str = "Diff",
|
|
1174
|
+
rich_print: Callable[[Any], None] | None = None,
|
|
1175
|
+
) -> None:
|
|
1176
|
+
"""
|
|
1177
|
+
Compare and display diff against expected results.
|
|
1178
|
+
|
|
1179
|
+
Args:
|
|
1180
|
+
expected: The reference investigation (expected results), optional
|
|
1181
|
+
result_expected: List of ExpectedResult tolerance rules for specific checks
|
|
1182
|
+
title: Title for the diff table
|
|
1183
|
+
rich_print: Optional callable that takes a renderable and returns None
|
|
1184
|
+
"""
|
|
1185
|
+
if rich_print is None:
|
|
1186
|
+
|
|
1187
|
+
def rich_print(renderables):
|
|
1188
|
+
return logger.rich("INFO", renderables, width=150)
|
|
1189
|
+
|
|
1190
|
+
diffs = compare_investigations(actual=self, expected=expected, result_expected=result_expected)
|
|
1191
|
+
display_diff(diffs, rich_print, title=title)
|
|
1192
|
+
|
|
1193
|
+
def display_check(
|
|
1194
|
+
self,
|
|
1195
|
+
check_key: str,
|
|
1196
|
+
rich_print: Callable[[Any], None] | None = None,
|
|
1197
|
+
) -> None:
|
|
1198
|
+
"""
|
|
1199
|
+
Display detailed information about a check.
|
|
1200
|
+
|
|
1201
|
+
Args:
|
|
1202
|
+
check_key: Key of the check to display (format: chk:check-name)
|
|
1203
|
+
rich_print: Optional callable that takes a renderable and returns None.
|
|
1204
|
+
If not provided, uses the default logger.
|
|
1205
|
+
|
|
1206
|
+
Raises:
|
|
1207
|
+
KeyError: If check not found
|
|
1208
|
+
"""
|
|
1209
|
+
if rich_print is None:
|
|
1210
|
+
|
|
1211
|
+
def rich_print(renderables: Any) -> None:
|
|
1212
|
+
logger.rich("INFO", renderables, width=150, prefix=False)
|
|
1213
|
+
|
|
1214
|
+
display_check_query(self, check_key, rich_print)
|
|
1215
|
+
|
|
1216
|
+
def display_observable(
|
|
1217
|
+
self,
|
|
1218
|
+
observable_key: str,
|
|
1219
|
+
depth: int = 1,
|
|
1220
|
+
rich_print: Callable[[Any], None] | None = None,
|
|
1221
|
+
) -> None:
|
|
1222
|
+
"""
|
|
1223
|
+
Display detailed information about an observable.
|
|
1224
|
+
|
|
1225
|
+
Shows observable info, score breakdown (how the score was calculated),
|
|
1226
|
+
threat intelligence, and relationships up to the specified depth.
|
|
1227
|
+
|
|
1228
|
+
Args:
|
|
1229
|
+
observable_key: Key of the observable to display (format: obs:type:value)
|
|
1230
|
+
depth: Relationship traversal depth (default 1)
|
|
1231
|
+
rich_print: Optional callable that takes a renderable and returns None.
|
|
1232
|
+
If not provided, uses the default logger.
|
|
1233
|
+
|
|
1234
|
+
Raises:
|
|
1235
|
+
KeyError: If observable not found
|
|
1236
|
+
"""
|
|
1237
|
+
if rich_print is None:
|
|
1238
|
+
|
|
1239
|
+
def rich_print(renderables: Any) -> None:
|
|
1240
|
+
logger.rich("INFO", renderables, width=150, prefix=False)
|
|
1241
|
+
|
|
1242
|
+
display_observable_query(self, observable_key, rich_print, depth=depth)
|
|
1243
|
+
|
|
1244
|
+
def display_threat_intel(
|
|
1245
|
+
self,
|
|
1246
|
+
ti_key: str,
|
|
1247
|
+
rich_print: Callable[[Any], None] | None = None,
|
|
1248
|
+
) -> None:
|
|
1249
|
+
"""
|
|
1250
|
+
Display detailed information about a threat intel entry.
|
|
1251
|
+
|
|
1252
|
+
Args:
|
|
1253
|
+
ti_key: Key of the threat intel to display (format: ti:source:obs:type:value)
|
|
1254
|
+
rich_print: Optional callable that takes a renderable and returns None.
|
|
1255
|
+
If not provided, uses the default logger.
|
|
1256
|
+
|
|
1257
|
+
Raises:
|
|
1258
|
+
KeyError: If threat intel not found
|
|
1259
|
+
"""
|
|
1260
|
+
if rich_print is None:
|
|
1261
|
+
|
|
1262
|
+
def rich_print(renderables: Any) -> None:
|
|
1263
|
+
logger.rich("INFO", renderables, width=150, prefix=False)
|
|
1264
|
+
|
|
1265
|
+
display_threat_intel_query(self, ti_key, rich_print)
|
|
1266
|
+
|
|
1267
|
+
def display_network(
|
|
1268
|
+
self,
|
|
1269
|
+
output_dir: str | None = None,
|
|
1270
|
+
open_browser: bool = True,
|
|
1271
|
+
min_level: Level | None = None,
|
|
1272
|
+
observable_types: list[ObservableType] | None = None,
|
|
1273
|
+
physics: bool = True,
|
|
1274
|
+
group_by_type: bool = False,
|
|
1275
|
+
max_label_length: int = 60,
|
|
1276
|
+
title: str = "Cyvest Investigation Network",
|
|
1277
|
+
) -> str:
|
|
1278
|
+
"""
|
|
1279
|
+
Generate and display an interactive network graph visualization.
|
|
1280
|
+
|
|
1281
|
+
Creates an HTML file with a pyvis network graph showing observables as nodes
|
|
1282
|
+
(colored by level, sized by score, shaped by type) and relationships as edges
|
|
1283
|
+
(colored by direction, labeled by type).
|
|
1284
|
+
|
|
1285
|
+
Args:
|
|
1286
|
+
output_dir: Directory to save HTML file (defaults to temp directory)
|
|
1287
|
+
open_browser: Whether to automatically open the HTML file in a browser
|
|
1288
|
+
min_level: Minimum security level to include (filters out lower levels)
|
|
1289
|
+
observable_types: List of observable types to include (filters out others)
|
|
1290
|
+
physics: Enable physics simulation for organic layout (default: False for static layout)
|
|
1291
|
+
group_by_type: Group observables by type using hierarchical layout (default: False)
|
|
1292
|
+
max_label_length: Maximum length for node labels before truncation (default: 60)
|
|
1293
|
+
title: Title displayed in the generated HTML visualization
|
|
1294
|
+
|
|
1295
|
+
Returns:
|
|
1296
|
+
Path to the generated HTML file
|
|
1297
|
+
|
|
1298
|
+
Examples:
|
|
1299
|
+
>>> cv = Cyvest()
|
|
1300
|
+
>>> # Create investigation with observables
|
|
1301
|
+
>>> cv.display_network()
|
|
1302
|
+
'/tmp/cyvest_12345/cyvest_network.html'
|
|
1303
|
+
"""
|
|
1304
|
+
return generate_network_graph(
|
|
1305
|
+
self,
|
|
1306
|
+
output_dir=output_dir,
|
|
1307
|
+
open_browser=open_browser,
|
|
1308
|
+
min_level=min_level,
|
|
1309
|
+
observable_types=observable_types,
|
|
1310
|
+
physics=physics,
|
|
1311
|
+
group_by_type=group_by_type,
|
|
1312
|
+
max_label_length=max_label_length,
|
|
1313
|
+
title=title,
|
|
1314
|
+
)
|
|
1315
|
+
|
|
1316
|
+
# Fluent helper entrypoints
|
|
1317
|
+
|
|
1318
|
+
def taxonomy(self, *, level: Level, name: str, value: str) -> Taxonomy:
|
|
1319
|
+
"""
|
|
1320
|
+
Create a taxonomy object for threat intelligence entries.
|
|
1321
|
+
|
|
1322
|
+
Args:
|
|
1323
|
+
level: Taxonomy level (Level enum)
|
|
1324
|
+
name: Taxonomy name (unique per threat intel)
|
|
1325
|
+
value: Taxonomy value
|
|
1326
|
+
|
|
1327
|
+
Returns:
|
|
1328
|
+
Taxonomy instance
|
|
1329
|
+
"""
|
|
1330
|
+
return Taxonomy(level=level, name=name, value=value)
|
|
1331
|
+
|
|
1332
|
+
def threat_intel_draft(
|
|
1333
|
+
self,
|
|
1334
|
+
source: str,
|
|
1335
|
+
score: Decimal | float,
|
|
1336
|
+
comment: str = "",
|
|
1337
|
+
extra: dict[str, Any] | None = None,
|
|
1338
|
+
level: Level | None = None,
|
|
1339
|
+
taxonomies: list[Taxonomy | dict[str, Any]] | None = None,
|
|
1340
|
+
) -> ThreatIntel:
|
|
1341
|
+
"""
|
|
1342
|
+
Create an unbound threat intel draft entry with fluent helper methods.
|
|
1343
|
+
|
|
1344
|
+
Args:
|
|
1345
|
+
source: Threat intel source name
|
|
1346
|
+
score: Score from threat intel
|
|
1347
|
+
comment: Optional comment
|
|
1348
|
+
extra: Optional extra data
|
|
1349
|
+
level: Optional explicit level
|
|
1350
|
+
taxonomies: Optional taxonomies
|
|
1351
|
+
|
|
1352
|
+
Returns:
|
|
1353
|
+
Unbound ThreatIntel instance
|
|
1354
|
+
"""
|
|
1355
|
+
return self.threat_intel_draft_create(source, score, comment, extra, level, taxonomies)
|
|
1356
|
+
|
|
1357
|
+
def observable(
|
|
1358
|
+
self,
|
|
1359
|
+
obs_type: ObservableType,
|
|
1360
|
+
value: str,
|
|
1361
|
+
internal: bool = False,
|
|
1362
|
+
whitelisted: bool = False,
|
|
1363
|
+
comment: str = "",
|
|
1364
|
+
extra: dict[str, Any] | None = None,
|
|
1365
|
+
score: Decimal | float | None = None,
|
|
1366
|
+
level: Level | None = None,
|
|
1367
|
+
) -> ObservableProxy:
|
|
1368
|
+
"""
|
|
1369
|
+
Create (or fetch) an observable with fluent helper methods.
|
|
1370
|
+
|
|
1371
|
+
Args:
|
|
1372
|
+
obs_type: Type of observable
|
|
1373
|
+
value: Value of the observable
|
|
1374
|
+
internal: Whether this is an internal asset
|
|
1375
|
+
whitelisted: Whether this is whitelisted
|
|
1376
|
+
comment: Optional comment
|
|
1377
|
+
extra: Optional extra data
|
|
1378
|
+
score: Optional explicit score
|
|
1379
|
+
level: Optional explicit level
|
|
1380
|
+
|
|
1381
|
+
Returns:
|
|
1382
|
+
Observable proxy exposing mutation helpers for chaining
|
|
1383
|
+
"""
|
|
1384
|
+
return self.observable_create(obs_type, value, internal, whitelisted, comment, extra, score, level)
|
|
1385
|
+
|
|
1386
|
+
def check(
|
|
1387
|
+
self,
|
|
1388
|
+
check_name: str,
|
|
1389
|
+
description: str,
|
|
1390
|
+
comment: str = "",
|
|
1391
|
+
extra: dict[str, Any] | None = None,
|
|
1392
|
+
score: Decimal | float | None = None,
|
|
1393
|
+
level: Level | None = None,
|
|
1394
|
+
) -> CheckProxy:
|
|
1395
|
+
"""
|
|
1396
|
+
Create a check with fluent helper methods.
|
|
1397
|
+
|
|
1398
|
+
Args:
|
|
1399
|
+
check_name: Check name
|
|
1400
|
+
description: Check description
|
|
1401
|
+
comment: Optional comment
|
|
1402
|
+
extra: Optional extra data
|
|
1403
|
+
score: Optional explicit score
|
|
1404
|
+
level: Optional explicit level
|
|
1405
|
+
|
|
1406
|
+
Returns:
|
|
1407
|
+
Check proxy exposing mutation helpers for chaining
|
|
1408
|
+
"""
|
|
1409
|
+
return self.check_create(check_name, description, comment, extra, score, level)
|
|
1410
|
+
|
|
1411
|
+
def tag(self, name: str, description: str = "") -> TagProxy:
|
|
1412
|
+
"""
|
|
1413
|
+
Create a tag with fluent helper methods.
|
|
1414
|
+
|
|
1415
|
+
Args:
|
|
1416
|
+
name: Tag name (use ":" as hierarchy delimiter)
|
|
1417
|
+
description: Tag description
|
|
1418
|
+
|
|
1419
|
+
Returns:
|
|
1420
|
+
Tag proxy exposing mutation helpers for chaining
|
|
1421
|
+
"""
|
|
1422
|
+
return self.tag_create(name, description)
|
|
1423
|
+
|
|
1424
|
+
def root(self) -> ObservableProxy:
|
|
1425
|
+
"""
|
|
1426
|
+
Get the root observable.
|
|
1427
|
+
|
|
1428
|
+
Returns:
|
|
1429
|
+
Root observable
|
|
1430
|
+
"""
|
|
1431
|
+
return self.observable_get_root()
|