txt2detection 1.1.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- txt2detection/__init__.py +2 -0
- txt2detection/__main__.py +343 -0
- txt2detection/ai_extractor/__init__.py +16 -0
- txt2detection/ai_extractor/anthropic.py +12 -0
- txt2detection/ai_extractor/base.py +72 -0
- txt2detection/ai_extractor/deepseek.py +20 -0
- txt2detection/ai_extractor/gemini.py +18 -0
- txt2detection/ai_extractor/openai.py +18 -0
- txt2detection/ai_extractor/openrouter.py +20 -0
- txt2detection/ai_extractor/prompts.py +121 -0
- txt2detection/ai_extractor/utils.py +21 -0
- txt2detection/attack_navigator.py +68 -0
- txt2detection/bundler.py +422 -0
- txt2detection/config/detection_languages.yaml +14 -0
- txt2detection/credential_checker.py +82 -0
- txt2detection/models.py +427 -0
- txt2detection/observables.py +161 -0
- txt2detection/utils.py +100 -0
- txt2detection-1.1.3.dist-info/METADATA +230 -0
- txt2detection-1.1.3.dist-info/RECORD +23 -0
- txt2detection-1.1.3.dist-info/WHEEL +4 -0
- txt2detection-1.1.3.dist-info/entry_points.txt +2 -0
- txt2detection-1.1.3.dist-info/licenses/LICENSE +202 -0
|
@@ -0,0 +1,68 @@
|
|
|
1
|
+
import typing
|
|
2
|
+
|
|
3
|
+
if typing.TYPE_CHECKING:
|
|
4
|
+
from .bundler import Bundler
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
def map_technique_tactic(obj, report_tactics, rule_tactics):
|
|
8
|
+
"""
|
|
9
|
+
Return first matching tactics in the same rule
|
|
10
|
+
If no tactic match, try to return from all the tactics in report
|
|
11
|
+
If none exist, return nothing
|
|
12
|
+
"""
|
|
13
|
+
technique_name = obj["external_references"][0]["external_id"]
|
|
14
|
+
tactic_name = None
|
|
15
|
+
tactic_names = set()
|
|
16
|
+
for phase in obj["kill_chain_phases"]:
|
|
17
|
+
if not set(phase["kill_chain_name"].split("-")).issuperset(["mitre", "attack"]):
|
|
18
|
+
continue
|
|
19
|
+
tactic_names.add(phase["phase_name"])
|
|
20
|
+
tactic_obj = None
|
|
21
|
+
if s := tactic_names.intersection(rule_tactics):
|
|
22
|
+
tactic_obj = rule_tactics[s.pop()]
|
|
23
|
+
elif tactic_names.intersection(report_tactics):
|
|
24
|
+
tactic_obj = report_tactics[s.pop()]
|
|
25
|
+
if tactic_obj:
|
|
26
|
+
tactic_name = tactic_obj["external_references"][0]["external_id"]
|
|
27
|
+
return technique_name, tactic_name
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
def create_navigator_layer(report, indicator, technique_mapping, mitre_version):
|
|
31
|
+
techniques = []
|
|
32
|
+
for technique_id, tactic in technique_mapping.items():
|
|
33
|
+
technique_item = dict(
|
|
34
|
+
techniqueID=technique_id,
|
|
35
|
+
score=100,
|
|
36
|
+
showSubtechniques=True,
|
|
37
|
+
)
|
|
38
|
+
if tactic:
|
|
39
|
+
technique_item["tactic"] = tactic
|
|
40
|
+
techniques.append(technique_item)
|
|
41
|
+
|
|
42
|
+
return {
|
|
43
|
+
"name": indicator["name"],
|
|
44
|
+
"domain": "enterprise-attack",
|
|
45
|
+
"description": indicator["description"],
|
|
46
|
+
"versions": {
|
|
47
|
+
"layer": "4.5",
|
|
48
|
+
"attack": mitre_version,
|
|
49
|
+
"navigator": "5.1.0",
|
|
50
|
+
},
|
|
51
|
+
"techniques": techniques,
|
|
52
|
+
"gradient": {
|
|
53
|
+
"colors": ["#ffffff", "#ff6666"],
|
|
54
|
+
"minValue": 0,
|
|
55
|
+
"maxValue": 100,
|
|
56
|
+
},
|
|
57
|
+
"legendItems": [],
|
|
58
|
+
"metadata": [
|
|
59
|
+
{"name": "report_id", "value": report.id, "rule_id": indicator["id"]}
|
|
60
|
+
],
|
|
61
|
+
"links": [
|
|
62
|
+
{
|
|
63
|
+
"label": "Generated using txt2detection",
|
|
64
|
+
"url": "https://github.com/muchdogesec/txt2detection/",
|
|
65
|
+
}
|
|
66
|
+
],
|
|
67
|
+
"layout": {"layout": "side"},
|
|
68
|
+
}
|
txt2detection/bundler.py
ADDED
|
@@ -0,0 +1,422 @@
|
|
|
1
|
+
import contextlib
|
|
2
|
+
import enum
|
|
3
|
+
import itertools
|
|
4
|
+
import json
|
|
5
|
+
import logging
|
|
6
|
+
import os
|
|
7
|
+
from urllib.parse import urljoin
|
|
8
|
+
import requests
|
|
9
|
+
from stix2 import (
|
|
10
|
+
Report,
|
|
11
|
+
Identity,
|
|
12
|
+
MarkingDefinition,
|
|
13
|
+
Relationship,
|
|
14
|
+
Bundle,
|
|
15
|
+
)
|
|
16
|
+
from stix2.serialization import serialize
|
|
17
|
+
import hashlib
|
|
18
|
+
|
|
19
|
+
from txt2detection import attack_navigator, observables
|
|
20
|
+
from txt2detection.models import (
|
|
21
|
+
AIDetection,
|
|
22
|
+
BaseDetection,
|
|
23
|
+
DataContainer,
|
|
24
|
+
DetectionContainer,
|
|
25
|
+
UUID_NAMESPACE,
|
|
26
|
+
SigmaRuleDetection,
|
|
27
|
+
)
|
|
28
|
+
|
|
29
|
+
from datetime import UTC, datetime as dt
|
|
30
|
+
import uuid
|
|
31
|
+
from stix2 import parse as parse_stix
|
|
32
|
+
|
|
33
|
+
from txt2detection.models import TLP_LEVEL
|
|
34
|
+
from txt2detection.utils import (
|
|
35
|
+
STATUSES,
|
|
36
|
+
load_stix_object_from_url,
|
|
37
|
+
remove_rule_specific_tags,
|
|
38
|
+
)
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
logger = logging.getLogger("txt2detection.bundler")
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
class Bundler:
|
|
45
|
+
identity = None
|
|
46
|
+
object_marking_refs = []
|
|
47
|
+
uuid = None
|
|
48
|
+
id_map = dict()
|
|
49
|
+
data: DataContainer
|
|
50
|
+
# https://raw.githubusercontent.com/muchdogesec/stix4doge/refs/heads/main/objects/identity/txt2detection.json
|
|
51
|
+
default_identity = Identity(
|
|
52
|
+
**{
|
|
53
|
+
"type": "identity",
|
|
54
|
+
"spec_version": "2.1",
|
|
55
|
+
"id": "identity--a4d70b75-6f4a-5d19-9137-da863edd33d7",
|
|
56
|
+
"created_by_ref": "identity--9779a2db-f98c-5f4b-8d08-8ee04e02dbb5",
|
|
57
|
+
"created": "2020-01-01T00:00:00.000Z",
|
|
58
|
+
"modified": "2020-01-01T00:00:00.000Z",
|
|
59
|
+
"name": "txt2detection",
|
|
60
|
+
"description": "https://github.com/muchdogesec/txt2detection",
|
|
61
|
+
"identity_class": "system",
|
|
62
|
+
"sectors": ["technology"],
|
|
63
|
+
"contact_information": "https://www.dogesec.com/contact/",
|
|
64
|
+
"object_marking_refs": [
|
|
65
|
+
"marking-definition--94868c89-83c2-464b-929b-a1a8aa3c8487",
|
|
66
|
+
"marking-definition--97ba4e8b-04f6-57e8-8f6e-3a0f0a7dc0fb",
|
|
67
|
+
],
|
|
68
|
+
}
|
|
69
|
+
)
|
|
70
|
+
# https://raw.githubusercontent.com/muchdogesec/stix4doge/refs/heads/main/objects/marking-definition/txt2detection.json
|
|
71
|
+
default_marking = MarkingDefinition(
|
|
72
|
+
**{
|
|
73
|
+
"type": "marking-definition",
|
|
74
|
+
"spec_version": "2.1",
|
|
75
|
+
"id": "marking-definition--a4d70b75-6f4a-5d19-9137-da863edd33d7",
|
|
76
|
+
"created_by_ref": "identity--9779a2db-f98c-5f4b-8d08-8ee04e02dbb5",
|
|
77
|
+
"created": "2020-01-01T00:00:00.000Z",
|
|
78
|
+
"definition_type": "statement",
|
|
79
|
+
"definition": {
|
|
80
|
+
"statement": "This object was created using: https://github.com/muchdogesec/txt2detection"
|
|
81
|
+
},
|
|
82
|
+
"object_marking_refs": [
|
|
83
|
+
"marking-definition--94868c89-83c2-464b-929b-a1a8aa3c8487",
|
|
84
|
+
"marking-definition--97ba4e8b-04f6-57e8-8f6e-3a0f0a7dc0fb",
|
|
85
|
+
],
|
|
86
|
+
}
|
|
87
|
+
)
|
|
88
|
+
|
|
89
|
+
sigma_extension_definition = load_stix_object_from_url(
|
|
90
|
+
"https://raw.githubusercontent.com/muchdogesec/stix2extensions/refs/heads/main/extension-definitions/properties/indicator-sigma-rule.json"
|
|
91
|
+
)
|
|
92
|
+
data_source_extension_definition = load_stix_object_from_url(
|
|
93
|
+
"https://raw.githubusercontent.com/muchdogesec/stix2extensions/refs/heads/main/extension-definitions/scos/data-source.json"
|
|
94
|
+
)
|
|
95
|
+
|
|
96
|
+
@classmethod
|
|
97
|
+
def generate_report_id(cls, created_by_ref, created, name):
|
|
98
|
+
if not created_by_ref:
|
|
99
|
+
created_by_ref = cls.default_identity["id"]
|
|
100
|
+
return str(uuid.uuid5(UUID_NAMESPACE, f"{created_by_ref}+{created}+{name}"))
|
|
101
|
+
|
|
102
|
+
def __init__(
|
|
103
|
+
self,
|
|
104
|
+
name,
|
|
105
|
+
identity,
|
|
106
|
+
tlp_level,
|
|
107
|
+
description,
|
|
108
|
+
labels,
|
|
109
|
+
created=None,
|
|
110
|
+
modified=None,
|
|
111
|
+
report_id=None,
|
|
112
|
+
external_refs: list = None,
|
|
113
|
+
reference_urls=None,
|
|
114
|
+
license=None,
|
|
115
|
+
**kwargs,
|
|
116
|
+
) -> None:
|
|
117
|
+
self.created = created or dt.now(UTC)
|
|
118
|
+
self.modified = modified or self.created
|
|
119
|
+
self.identity = identity or self.default_identity
|
|
120
|
+
self.tlp_level = TLP_LEVEL.get(tlp_level or "clear")
|
|
121
|
+
self.uuid = report_id or self.generate_report_id(
|
|
122
|
+
self.identity.id, self.created, name
|
|
123
|
+
)
|
|
124
|
+
self.reference_urls = reference_urls or []
|
|
125
|
+
self.labels = labels or []
|
|
126
|
+
self.license = license
|
|
127
|
+
|
|
128
|
+
self.all_objects = set()
|
|
129
|
+
self.job_id = f"report--{self.uuid}"
|
|
130
|
+
self.external_refs = (external_refs or []) + [
|
|
131
|
+
dict(
|
|
132
|
+
source_name="txt2detection",
|
|
133
|
+
url=url,
|
|
134
|
+
description="txt2detection-reference",
|
|
135
|
+
)
|
|
136
|
+
for url in self.reference_urls
|
|
137
|
+
]
|
|
138
|
+
self.data = DataContainer.model_construct()
|
|
139
|
+
self.tactics = {}
|
|
140
|
+
self.techniques = {}
|
|
141
|
+
|
|
142
|
+
self.report = Report(
|
|
143
|
+
created_by_ref=self.identity.id,
|
|
144
|
+
name=name,
|
|
145
|
+
id=self.job_id,
|
|
146
|
+
description=description,
|
|
147
|
+
object_refs=[
|
|
148
|
+
f"note--{self.uuid}"
|
|
149
|
+
], # won't allow creation with empty object_refs
|
|
150
|
+
created=self.created,
|
|
151
|
+
modified=self.modified,
|
|
152
|
+
object_marking_refs=[self.tlp_level.value.id],
|
|
153
|
+
labels=remove_rule_specific_tags(self.labels),
|
|
154
|
+
published=self.created,
|
|
155
|
+
external_references=[
|
|
156
|
+
dict(
|
|
157
|
+
source_name="description_md5_hash",
|
|
158
|
+
external_id=hashlib.md5((description or "").encode()).hexdigest(),
|
|
159
|
+
)
|
|
160
|
+
]
|
|
161
|
+
+ self.external_refs,
|
|
162
|
+
)
|
|
163
|
+
self.report.object_refs.clear() # clear object refs
|
|
164
|
+
self.set_defaults()
|
|
165
|
+
if not description:
|
|
166
|
+
self.report.external_references.pop(0)
|
|
167
|
+
|
|
168
|
+
def set_defaults(self):
|
|
169
|
+
# self.value.extend(TLP_LEVEL.values()) # adds all tlp levels
|
|
170
|
+
self.bundle = Bundle(objects=[self.tlp_level.value], id=f"bundle--{self.uuid}")
|
|
171
|
+
|
|
172
|
+
self.bundle.objects.extend([self.default_marking, self.identity, self.report])
|
|
173
|
+
# add default STIX 2.1 marking definition for txt2detection
|
|
174
|
+
self.report.object_marking_refs.append(self.default_marking.id)
|
|
175
|
+
self.add_ref(self.sigma_extension_definition)
|
|
176
|
+
self.add_ref(self.data_source_extension_definition)
|
|
177
|
+
|
|
178
|
+
def add_ref(self, sdo, append_report=False):
|
|
179
|
+
sdo_id = sdo["id"]
|
|
180
|
+
if sdo_id in self.all_objects:
|
|
181
|
+
return
|
|
182
|
+
self.bundle.objects.append(sdo)
|
|
183
|
+
if sdo_id not in self.report.object_refs and append_report:
|
|
184
|
+
self.report.object_refs.append(sdo_id)
|
|
185
|
+
self.all_objects.add(sdo_id)
|
|
186
|
+
|
|
187
|
+
def add_rule_indicator(self, detection: SigmaRuleDetection):
|
|
188
|
+
indicator_types = getattr(detection, "indicator_types", None)
|
|
189
|
+
if isinstance(detection, AIDetection):
|
|
190
|
+
detection = detection.to_sigma_rule_detection(self)
|
|
191
|
+
assert isinstance(
|
|
192
|
+
detection, SigmaRuleDetection
|
|
193
|
+
), f"detection of type {type(detection)} not supported"
|
|
194
|
+
indicator = {
|
|
195
|
+
"type": "indicator",
|
|
196
|
+
"id": "indicator--" + str(detection.detection_id),
|
|
197
|
+
"spec_version": "2.1",
|
|
198
|
+
"created_by_ref": self.report.created_by_ref,
|
|
199
|
+
"created": self.report.created,
|
|
200
|
+
"modified": self.report.modified,
|
|
201
|
+
"indicator_types": indicator_types,
|
|
202
|
+
"name": detection.title,
|
|
203
|
+
"description": detection.description,
|
|
204
|
+
"labels": remove_rule_specific_tags(self.labels),
|
|
205
|
+
"pattern_type": "sigma",
|
|
206
|
+
"pattern": detection.make_rule(self),
|
|
207
|
+
"valid_from": self.report.created,
|
|
208
|
+
"object_marking_refs": self.report.object_marking_refs,
|
|
209
|
+
"external_references": self.external_refs,
|
|
210
|
+
"extensions": {
|
|
211
|
+
self.sigma_extension_definition["id"]: {
|
|
212
|
+
"extension_type": "toplevel-property-extension"
|
|
213
|
+
}
|
|
214
|
+
},
|
|
215
|
+
"x_sigma_type": "base",
|
|
216
|
+
"x_sigma_level": detection.level,
|
|
217
|
+
"x_sigma_status": detection.status,
|
|
218
|
+
"x_sigma_license": detection.license,
|
|
219
|
+
"x_sigma_fields": detection.fields,
|
|
220
|
+
"x_sigma_falsepositives": detection.falsepositives,
|
|
221
|
+
"x_sigma_scope": detection.scope,
|
|
222
|
+
}
|
|
223
|
+
indicator["external_references"].append(
|
|
224
|
+
{
|
|
225
|
+
"source_name": "rule_md5_hash",
|
|
226
|
+
"external_id": hashlib.md5(indicator["pattern"].encode()).hexdigest(),
|
|
227
|
+
}
|
|
228
|
+
)
|
|
229
|
+
logsource = detection.make_data_source()
|
|
230
|
+
|
|
231
|
+
logger.debug(f"===== rule {detection.detection_id} =====")
|
|
232
|
+
logger.debug("```yaml\n" + indicator["pattern"] + "\n```")
|
|
233
|
+
logger.debug(f" =================== end of rule =================== ")
|
|
234
|
+
|
|
235
|
+
self.data.attacks.update(dict.fromkeys(detection.mitre_attack_ids, "Not found"))
|
|
236
|
+
tactics = self.tactics[detection.id] = {}
|
|
237
|
+
techniques = self.techniques[detection.id] = []
|
|
238
|
+
for obj in self.get_attack_objects(detection.mitre_attack_ids):
|
|
239
|
+
self.add_ref(obj)
|
|
240
|
+
self.add_relation(indicator, obj)
|
|
241
|
+
self.data.attacks[obj["external_references"][0]["external_id"]] = obj["id"]
|
|
242
|
+
if obj["type"] == "x-mitre-tactic":
|
|
243
|
+
tactics[obj["x_mitre_shortname"]] = obj
|
|
244
|
+
else:
|
|
245
|
+
techniques.append(obj)
|
|
246
|
+
|
|
247
|
+
self.data.cves.update(dict.fromkeys(detection.cve_ids, "Not found"))
|
|
248
|
+
for obj in self.get_cve_objects(detection.cve_ids):
|
|
249
|
+
self.add_ref(obj)
|
|
250
|
+
self.add_relation(indicator, obj)
|
|
251
|
+
self.data.cves[obj["name"]] = obj["id"]
|
|
252
|
+
|
|
253
|
+
self.add_ref(parse_stix(indicator, allow_custom=True), append_report=True)
|
|
254
|
+
self.add_ref(logsource, append_report=True)
|
|
255
|
+
self.add_relation(
|
|
256
|
+
indicator,
|
|
257
|
+
logsource,
|
|
258
|
+
description=f'{indicator["name"]} is created from {make_logsouce_string(logsource)}',
|
|
259
|
+
)
|
|
260
|
+
|
|
261
|
+
self.data.observables = []
|
|
262
|
+
for ob_type, ob_value in set(
|
|
263
|
+
observables.find_stix_observables(detection.detection)
|
|
264
|
+
):
|
|
265
|
+
self.data.observables.append(dict(type=ob_type, value=ob_value))
|
|
266
|
+
try:
|
|
267
|
+
obj = observables.to_stix_object(ob_type, ob_value)
|
|
268
|
+
self.add_ref(obj)
|
|
269
|
+
self.add_relation(indicator, obj, "related-to", target_name=ob_value)
|
|
270
|
+
except Exception as e:
|
|
271
|
+
self.data.observables[-1]["error"] = str(e)
|
|
272
|
+
logger.exception(f"failed to process observable {ob_type}/{ob_value}")
|
|
273
|
+
|
|
274
|
+
def add_relation(
|
|
275
|
+
self,
|
|
276
|
+
indicator,
|
|
277
|
+
target_object,
|
|
278
|
+
relationship_type="related-to",
|
|
279
|
+
target_name=None,
|
|
280
|
+
description=None,
|
|
281
|
+
):
|
|
282
|
+
ext_refs = []
|
|
283
|
+
|
|
284
|
+
with contextlib.suppress(Exception):
|
|
285
|
+
indicator["external_references"].append(
|
|
286
|
+
target_object["external_references"][0]
|
|
287
|
+
)
|
|
288
|
+
ext_refs = [target_object["external_references"][0]]
|
|
289
|
+
|
|
290
|
+
if not description:
|
|
291
|
+
target_name = (
|
|
292
|
+
target_name
|
|
293
|
+
or f"{target_object['external_references'][0]['external_id']} ({target_object['name']})"
|
|
294
|
+
)
|
|
295
|
+
description = f"{indicator['name']} {relationship_type} {target_name}"
|
|
296
|
+
|
|
297
|
+
rel = Relationship(
|
|
298
|
+
id="relationship--"
|
|
299
|
+
+ str(
|
|
300
|
+
uuid.uuid5(UUID_NAMESPACE, f"{indicator['id']}+{target_object['id']}")
|
|
301
|
+
),
|
|
302
|
+
source_ref=indicator["id"],
|
|
303
|
+
target_ref=target_object["id"],
|
|
304
|
+
relationship_type=relationship_type,
|
|
305
|
+
created_by_ref=self.report.created_by_ref,
|
|
306
|
+
description=description,
|
|
307
|
+
created=self.report.created,
|
|
308
|
+
modified=self.report.modified,
|
|
309
|
+
object_marking_refs=self.report.object_marking_refs,
|
|
310
|
+
external_references=ext_refs,
|
|
311
|
+
allow_custom=True,
|
|
312
|
+
)
|
|
313
|
+
self.add_ref(rel)
|
|
314
|
+
|
|
315
|
+
def to_json(self):
|
|
316
|
+
return serialize(self.bundle, indent=4)
|
|
317
|
+
|
|
318
|
+
@property
|
|
319
|
+
def bundle_dict(self):
|
|
320
|
+
return json.loads(self.to_json())
|
|
321
|
+
|
|
322
|
+
def get_attack_objects(self, attack_ids):
|
|
323
|
+
if not attack_ids:
|
|
324
|
+
return []
|
|
325
|
+
logger.debug(f"retrieving attack objects: {attack_ids}")
|
|
326
|
+
endpoint = urljoin(
|
|
327
|
+
os.environ["CTIBUTLER_BASE_URL"] + "/",
|
|
328
|
+
f"v1/attack-enterprise/objects/?attack_id=" + ",".join(attack_ids),
|
|
329
|
+
)
|
|
330
|
+
|
|
331
|
+
headers = {}
|
|
332
|
+
if api_key := os.environ.get("CTIBUTLER_API_KEY"):
|
|
333
|
+
headers["API-KEY"] = api_key
|
|
334
|
+
|
|
335
|
+
return self._get_objects(endpoint, headers)
|
|
336
|
+
|
|
337
|
+
@classmethod
|
|
338
|
+
def get_attack_version(cls):
|
|
339
|
+
headers = {}
|
|
340
|
+
api_root = os.environ["CTIBUTLER_BASE_URL"] + "/"
|
|
341
|
+
if api_key := os.environ.get("CTIBUTLER_API_KEY"):
|
|
342
|
+
headers["API-KEY"] = api_key
|
|
343
|
+
version_url = urljoin(api_root, f"v1/attack-enterprise/versions/installed/")
|
|
344
|
+
return requests.get(version_url, headers=headers).json()["latest"]
|
|
345
|
+
|
|
346
|
+
@classmethod
|
|
347
|
+
def get_cve_objects(cls, cve_ids):
|
|
348
|
+
if not cve_ids:
|
|
349
|
+
return []
|
|
350
|
+
logger.debug(f"retrieving cve objects: {cve_ids}")
|
|
351
|
+
endpoint = urljoin(
|
|
352
|
+
os.environ["VULMATCH_BASE_URL"] + "/",
|
|
353
|
+
f"v1/cve/objects/?cve_id=" + ",".join(cve_ids),
|
|
354
|
+
)
|
|
355
|
+
headers = {}
|
|
356
|
+
if api_key := os.environ.get("VULMATCH_API_KEY"):
|
|
357
|
+
headers["API-KEY"] = api_key
|
|
358
|
+
|
|
359
|
+
return cls._get_objects(endpoint, headers)
|
|
360
|
+
|
|
361
|
+
@classmethod
|
|
362
|
+
def _get_objects(cls, endpoint, headers):
|
|
363
|
+
data = []
|
|
364
|
+
page = 1
|
|
365
|
+
while True:
|
|
366
|
+
resp = requests.get(
|
|
367
|
+
endpoint, params=dict(page=page, page_size=1000), headers=headers
|
|
368
|
+
)
|
|
369
|
+
if resp.status_code != 200:
|
|
370
|
+
break
|
|
371
|
+
d = resp.json()
|
|
372
|
+
if len(d["objects"]) == 0:
|
|
373
|
+
break
|
|
374
|
+
data.extend(d["objects"])
|
|
375
|
+
page += 1
|
|
376
|
+
if d["page_results_count"] < d["page_size"]:
|
|
377
|
+
break
|
|
378
|
+
return data
|
|
379
|
+
|
|
380
|
+
def bundle_detections(self, container: DetectionContainer):
|
|
381
|
+
self.data.detections = container
|
|
382
|
+
if not container.success:
|
|
383
|
+
return
|
|
384
|
+
for d in container.detections:
|
|
385
|
+
self.add_rule_indicator(d)
|
|
386
|
+
|
|
387
|
+
def create_attack_navigator(self):
|
|
388
|
+
self.mitre_version = self.get_attack_version()
|
|
389
|
+
all_tactics = dict(
|
|
390
|
+
itertools.chain(*map(lambda x: x.items(), self.tactics.values()))
|
|
391
|
+
)
|
|
392
|
+
self.data.navigator_layer = {}
|
|
393
|
+
for detection_id, techniques in self.techniques.items():
|
|
394
|
+
if not techniques:
|
|
395
|
+
continue
|
|
396
|
+
tactics = self.tactics[detection_id]
|
|
397
|
+
mapping = dict(
|
|
398
|
+
[
|
|
399
|
+
attack_navigator.map_technique_tactic(
|
|
400
|
+
technique, all_tactics, tactics
|
|
401
|
+
)
|
|
402
|
+
for technique in techniques
|
|
403
|
+
]
|
|
404
|
+
)
|
|
405
|
+
indicator = [
|
|
406
|
+
f
|
|
407
|
+
for f in self.bundle.objects
|
|
408
|
+
if str(f["id"]).endswith(str(detection_id)) and f["type"] == "indicator"
|
|
409
|
+
][0]
|
|
410
|
+
self.data.navigator_layer[detection_id] = (
|
|
411
|
+
attack_navigator.create_navigator_layer(
|
|
412
|
+
self.report, indicator, mapping, self.mitre_version
|
|
413
|
+
)
|
|
414
|
+
)
|
|
415
|
+
|
|
416
|
+
|
|
417
|
+
def make_logsouce_string(source: dict):
|
|
418
|
+
d = [
|
|
419
|
+
f"{k}={v}" for k, v in source.items() if k in ["product", "service", "category"]
|
|
420
|
+
]
|
|
421
|
+
d_str = ", ".join(d)
|
|
422
|
+
return "log-source {" + d_str + "}"
|
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
# currently only supports Sigma
|
|
2
|
+
## ===== Sigma =====
|
|
3
|
+
|
|
4
|
+
sigma:
|
|
5
|
+
type: "detection_language"
|
|
6
|
+
name: "Sigma"
|
|
7
|
+
description: "https://sigmahq.io/docs/basics/rules.html"
|
|
8
|
+
products:
|
|
9
|
+
-
|
|
10
|
+
documentation: ""
|
|
11
|
+
created: 2020-01-01
|
|
12
|
+
modified: 2020-01-01
|
|
13
|
+
created_by: DOGESEC
|
|
14
|
+
version: 1.0.0
|
|
@@ -0,0 +1,82 @@
|
|
|
1
|
+
import argparse
|
|
2
|
+
import os
|
|
3
|
+
import random
|
|
4
|
+
from urllib.parse import urljoin
|
|
5
|
+
import requests
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
def check_llms():
|
|
9
|
+
from txt2detection.__main__ import parse_model
|
|
10
|
+
|
|
11
|
+
auth_info = dict()
|
|
12
|
+
for model_name in ["openai", "deepseek", "gemini", "openrouter", "anthropic"]:
|
|
13
|
+
try:
|
|
14
|
+
model = parse_model(model_name)
|
|
15
|
+
auth_info[model_name] = model.check_credential()
|
|
16
|
+
except argparse.ArgumentTypeError:
|
|
17
|
+
auth_info[model_name] = "unsupported"
|
|
18
|
+
except:
|
|
19
|
+
auth_info[model_name] = "unauthorized"
|
|
20
|
+
return auth_info
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
def check_ctibutler_vulmatch(service):
|
|
24
|
+
session = requests.Session()
|
|
25
|
+
if service == "vulmatch":
|
|
26
|
+
base_url = os.getenv("VULMATCH_BASE_URL")
|
|
27
|
+
url = urljoin(
|
|
28
|
+
base_url,
|
|
29
|
+
"v1/cve/objects/vulnerability--f552f6f4-39da-48dc-8717-323772c99588/",
|
|
30
|
+
)
|
|
31
|
+
session.headers["API-KEY"] = os.environ.get("VULMATCH_API_KEY")
|
|
32
|
+
elif service == "ctibutler":
|
|
33
|
+
base_url = os.getenv("CTIBUTLER_BASE_URL")
|
|
34
|
+
url = urljoin(base_url, "v1/location/versions/available/")
|
|
35
|
+
session.headers["API-KEY"] = os.environ.get("CTIBUTLER_API_KEY")
|
|
36
|
+
|
|
37
|
+
try:
|
|
38
|
+
resp = session.get(url)
|
|
39
|
+
match resp.status_code:
|
|
40
|
+
case 401 | 403:
|
|
41
|
+
return "unauthorized"
|
|
42
|
+
case 200:
|
|
43
|
+
return "authorized"
|
|
44
|
+
case _:
|
|
45
|
+
return "unknown"
|
|
46
|
+
except:
|
|
47
|
+
return "offline"
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
def check_statuses(test_llms=False):
|
|
51
|
+
statuses = dict(
|
|
52
|
+
ctibutler=check_ctibutler_vulmatch("ctibutler"),
|
|
53
|
+
vulmatch=check_ctibutler_vulmatch("vulmatch"),
|
|
54
|
+
)
|
|
55
|
+
if test_llms:
|
|
56
|
+
statuses.update(llms=check_llms())
|
|
57
|
+
return statuses
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
def format_statuses(status_dict):
|
|
61
|
+
def get_marker(status):
|
|
62
|
+
"""Return a checkmark, cross, or dash based on status."""
|
|
63
|
+
match status.lower():
|
|
64
|
+
case "authorized":
|
|
65
|
+
return "✔"
|
|
66
|
+
case "unauthorized":
|
|
67
|
+
return "✖"
|
|
68
|
+
case "unknown" | "offline" | "unsupported":
|
|
69
|
+
return "–"
|
|
70
|
+
case _:
|
|
71
|
+
return "?"
|
|
72
|
+
|
|
73
|
+
print("============= Service Statuses ===============")
|
|
74
|
+
for key, value in status_dict.items():
|
|
75
|
+
if key == "llms" and isinstance(value, dict):
|
|
76
|
+
print(f"\n {key.upper()}:")
|
|
77
|
+
for llm_name, llm_status in value.items():
|
|
78
|
+
marker = get_marker(llm_status)
|
|
79
|
+
print(f" {llm_name:<12}: {llm_status:<15} {marker}")
|
|
80
|
+
else:
|
|
81
|
+
marker = get_marker(value)
|
|
82
|
+
print(f" {key:<12}: {value:<15} {marker}")
|