txt2detection 1.0.7__py3-none-any.whl → 1.0.9__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of txt2detection might be problematic. Click here for more details.
- txt2detection/__main__.py +219 -68
- txt2detection/ai_extractor/base.py +41 -13
- txt2detection/ai_extractor/models.py +34 -0
- txt2detection/ai_extractor/openai.py +1 -3
- txt2detection/ai_extractor/openrouter.py +4 -4
- txt2detection/ai_extractor/prompts.py +130 -3
- txt2detection/attack_flow.py +233 -0
- txt2detection/bundler.py +174 -87
- txt2detection/credential_checker.py +11 -9
- txt2detection/models.py +86 -49
- txt2detection/observables.py +0 -1
- txt2detection/utils.py +24 -12
- {txt2detection-1.0.7.dist-info → txt2detection-1.0.9.dist-info}/METADATA +7 -8
- txt2detection-1.0.9.dist-info/RECORD +24 -0
- txt2detection-1.0.7.dist-info/RECORD +0 -22
- {txt2detection-1.0.7.dist-info → txt2detection-1.0.9.dist-info}/WHEEL +0 -0
- {txt2detection-1.0.7.dist-info → txt2detection-1.0.9.dist-info}/entry_points.txt +0 -0
- {txt2detection-1.0.7.dist-info → txt2detection-1.0.9.dist-info}/licenses/LICENSE +0 -0
txt2detection/models.py
CHANGED
|
@@ -8,6 +8,7 @@ from slugify import slugify
|
|
|
8
8
|
from datetime import date as dt_date
|
|
9
9
|
from typing import Any, ClassVar, List, Literal, Optional, Union
|
|
10
10
|
from uuid import UUID
|
|
11
|
+
from stix2extensions.data_source import DataSource
|
|
11
12
|
|
|
12
13
|
import jsonschema
|
|
13
14
|
from pydantic import BaseModel, Field, computed_field, field_validator
|
|
@@ -18,6 +19,8 @@ from stix2 import (
|
|
|
18
19
|
MarkingDefinition,
|
|
19
20
|
)
|
|
20
21
|
|
|
22
|
+
from txt2detection.ai_extractor.models import AttackFlowList
|
|
23
|
+
|
|
21
24
|
if typing.TYPE_CHECKING:
|
|
22
25
|
from txt2detection.bundler import Bundler
|
|
23
26
|
|
|
@@ -124,11 +127,11 @@ class TLP_LEVEL(enum.Enum):
|
|
|
124
127
|
]
|
|
125
128
|
|
|
126
129
|
@classmethod
|
|
127
|
-
def get(cls, level:
|
|
130
|
+
def get(cls, level: "str|TLP_LEVEL"):
|
|
128
131
|
if isinstance(level, cls):
|
|
129
132
|
return level
|
|
130
133
|
level = level.lower()
|
|
131
|
-
level = level.replace(
|
|
134
|
+
level = level.replace("+", "_").replace("-", "_")
|
|
132
135
|
if level not in cls.levels():
|
|
133
136
|
raise Exception(f"unsupported tlp level: `{level}`")
|
|
134
137
|
return cls.levels()[level]
|
|
@@ -137,6 +140,7 @@ class TLP_LEVEL(enum.Enum):
|
|
|
137
140
|
def name(self):
|
|
138
141
|
return super().name.lower()
|
|
139
142
|
|
|
143
|
+
|
|
140
144
|
class Statuses(enum.StrEnum):
|
|
141
145
|
stable = enum.auto()
|
|
142
146
|
test = enum.auto()
|
|
@@ -144,6 +148,7 @@ class Statuses(enum.StrEnum):
|
|
|
144
148
|
deprecated = enum.auto()
|
|
145
149
|
unsupported = enum.auto()
|
|
146
150
|
|
|
151
|
+
|
|
147
152
|
class Level(enum.StrEnum):
|
|
148
153
|
informational = enum.auto()
|
|
149
154
|
low = enum.auto()
|
|
@@ -151,6 +156,7 @@ class Level(enum.StrEnum):
|
|
|
151
156
|
high = enum.auto()
|
|
152
157
|
critical = enum.auto()
|
|
153
158
|
|
|
159
|
+
|
|
154
160
|
class SigmaTag(str):
|
|
155
161
|
@classmethod
|
|
156
162
|
def __get_pydantic_core_schema__(
|
|
@@ -158,31 +164,35 @@ class SigmaTag(str):
|
|
|
158
164
|
_source: type[Any],
|
|
159
165
|
_handler,
|
|
160
166
|
) -> core_schema.CoreSchema:
|
|
161
|
-
return core_schema.no_info_after_validator_function(
|
|
167
|
+
return core_schema.no_info_after_validator_function(
|
|
168
|
+
cls._validate, core_schema.str_schema()
|
|
169
|
+
)
|
|
162
170
|
|
|
163
171
|
@classmethod
|
|
164
|
-
def __get_pydantic_json_schema__(
|
|
165
|
-
cls, core_schema: core_schema.CoreSchema, handler
|
|
166
|
-
):
|
|
172
|
+
def __get_pydantic_json_schema__(cls, core_schema: core_schema.CoreSchema, handler):
|
|
167
173
|
field_schema = handler(core_schema)
|
|
168
|
-
field_schema.update(
|
|
174
|
+
field_schema.update(
|
|
175
|
+
type="string", pattern=TAG_PATTERN.pattern, format="sigma-tag"
|
|
176
|
+
)
|
|
169
177
|
return field_schema
|
|
170
178
|
|
|
171
179
|
@classmethod
|
|
172
180
|
def _validate(cls, input_value: str, /) -> str:
|
|
173
181
|
if not TAG_PATTERN.match(input_value):
|
|
174
182
|
raise PydanticCustomError(
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
183
|
+
"value_error",
|
|
184
|
+
"value is not a valid SIGMA tag: {reason}",
|
|
185
|
+
{
|
|
186
|
+
"reason": f"Must be in format namespace.value and match pattern {TAG_PATTERN.pattern}"
|
|
187
|
+
},
|
|
188
|
+
)
|
|
179
189
|
return input_value
|
|
180
|
-
|
|
190
|
+
|
|
191
|
+
|
|
181
192
|
class RelatedRule(BaseModel):
|
|
182
193
|
id: UUID
|
|
183
|
-
type: Literal[
|
|
184
|
-
|
|
185
|
-
]
|
|
194
|
+
type: Literal["derived", "obsolete", "merged", "renamed", "similar"]
|
|
195
|
+
|
|
186
196
|
|
|
187
197
|
class BaseDetection(BaseModel):
|
|
188
198
|
title: str
|
|
@@ -195,7 +205,9 @@ class BaseDetection(BaseModel):
|
|
|
195
205
|
level: Level
|
|
196
206
|
_custom_id = None
|
|
197
207
|
_extra_data: dict
|
|
198
|
-
sigma_json_schema: ClassVar = requests.get(
|
|
208
|
+
sigma_json_schema: ClassVar = requests.get(
|
|
209
|
+
"https://github.com/SigmaHQ/sigma-specification/raw/refs/heads/main/json-schema/sigma-detection-rule-schema.json"
|
|
210
|
+
).json()
|
|
199
211
|
|
|
200
212
|
def model_post_init(self, __context):
|
|
201
213
|
self.tags = self.tags or []
|
|
@@ -213,17 +225,16 @@ class BaseDetection(BaseModel):
|
|
|
213
225
|
@property
|
|
214
226
|
def tlp_level(self):
|
|
215
227
|
return tlp_from_tags(self.tags)
|
|
216
|
-
|
|
228
|
+
|
|
217
229
|
@tlp_level.setter
|
|
218
230
|
def tlp_level(self, level):
|
|
219
231
|
set_tlp_level_in_tags(self.tags, level)
|
|
220
|
-
|
|
232
|
+
|
|
221
233
|
def set_labels(self, labels):
|
|
222
234
|
self.tags.extend(labels)
|
|
223
235
|
|
|
224
236
|
def set_extra_data_from_bundler(self, bundler: "Bundler"):
|
|
225
|
-
raise NotImplementedError(
|
|
226
|
-
|
|
237
|
+
raise NotImplementedError("this class should no longer be in use")
|
|
227
238
|
|
|
228
239
|
def make_rule(self, bundler: "Bundler"):
|
|
229
240
|
self.set_extra_data_from_bundler(bundler)
|
|
@@ -232,19 +243,17 @@ class BaseDetection(BaseModel):
|
|
|
232
243
|
rule = dict(
|
|
233
244
|
id=self.detection_id,
|
|
234
245
|
**self.model_dump(
|
|
235
|
-
exclude=["indicator_types", "id"],
|
|
236
|
-
mode="json",
|
|
237
|
-
by_alias=True
|
|
246
|
+
exclude=["indicator_types", "id"], mode="json", by_alias=True
|
|
238
247
|
),
|
|
239
248
|
)
|
|
240
249
|
for k, v in list(rule.items()):
|
|
241
250
|
if not v:
|
|
242
251
|
rule.pop(k, None)
|
|
243
|
-
|
|
252
|
+
|
|
244
253
|
self.validate_rule_with_json_schema(rule)
|
|
245
|
-
if getattr(self,
|
|
254
|
+
if getattr(self, "date", 0):
|
|
246
255
|
rule.update(date=self.date)
|
|
247
|
-
if getattr(self,
|
|
256
|
+
if getattr(self, "modified", 0):
|
|
248
257
|
rule.update(modified=self.modified)
|
|
249
258
|
return yaml.dump(rule, sort_keys=False, indent=4)
|
|
250
259
|
|
|
@@ -253,13 +262,13 @@ class BaseDetection(BaseModel):
|
|
|
253
262
|
rule,
|
|
254
263
|
self.sigma_json_schema,
|
|
255
264
|
)
|
|
256
|
-
|
|
265
|
+
|
|
257
266
|
@property
|
|
258
267
|
def external_references(self):
|
|
259
268
|
refs = []
|
|
260
|
-
for attr in [
|
|
269
|
+
for attr in ["level", "status", "license"]:
|
|
261
270
|
if attr_val := getattr(self, attr, None):
|
|
262
|
-
refs.append(dict(source_name=f
|
|
271
|
+
refs.append(dict(source_name=f"sigma-{attr}", description=attr_val))
|
|
263
272
|
return refs
|
|
264
273
|
|
|
265
274
|
@property
|
|
@@ -280,19 +289,34 @@ class BaseDetection(BaseModel):
|
|
|
280
289
|
retval.append(namespace.upper() + "-" + label_id)
|
|
281
290
|
return retval
|
|
282
291
|
|
|
292
|
+
def make_data_source(self):
|
|
293
|
+
return DataSource(
|
|
294
|
+
category=self.logsource.get("category"),
|
|
295
|
+
product=self.logsource.get("product"),
|
|
296
|
+
service=self.logsource.get("service"),
|
|
297
|
+
definition=self.logsource.get("definition"),
|
|
298
|
+
)
|
|
299
|
+
|
|
283
300
|
|
|
284
301
|
class AIDetection(BaseDetection):
|
|
285
302
|
indicator_types: list[str] = Field(default_factory=list)
|
|
286
|
-
|
|
303
|
+
|
|
287
304
|
def to_sigma_rule_detection(self, bundler):
|
|
288
305
|
rule_dict = {
|
|
289
|
-
**self.model_dump(exclude=[
|
|
290
|
-
**dict(
|
|
306
|
+
**self.model_dump(exclude=["indicator_types"]),
|
|
307
|
+
**dict(
|
|
308
|
+
date=bundler.report.created.date(),
|
|
309
|
+
modified=bundler.report.modified.date(),
|
|
310
|
+
id=uuid.uuid4(),
|
|
311
|
+
),
|
|
291
312
|
}
|
|
292
313
|
try:
|
|
293
314
|
return SigmaRuleDetection.model_validate(rule_dict)
|
|
294
315
|
except Exception as e:
|
|
295
|
-
raise ValueError(
|
|
316
|
+
raise ValueError(
|
|
317
|
+
dict(message="validate ai output failed", error=e, content=rule_dict)
|
|
318
|
+
)
|
|
319
|
+
|
|
296
320
|
|
|
297
321
|
class SigmaRuleDetection(BaseDetection):
|
|
298
322
|
title: str
|
|
@@ -319,59 +343,71 @@ class SigmaRuleDetection(BaseDetection):
|
|
|
319
343
|
@property
|
|
320
344
|
def detection_id(self):
|
|
321
345
|
return str(self.id)
|
|
322
|
-
|
|
346
|
+
|
|
323
347
|
@property
|
|
324
348
|
def indicator_types(self):
|
|
325
349
|
return self._indicator_types
|
|
326
|
-
|
|
350
|
+
|
|
327
351
|
@indicator_types.setter
|
|
328
352
|
def indicator_types(self, types):
|
|
329
353
|
self._indicator_types = types
|
|
330
|
-
|
|
354
|
+
|
|
331
355
|
@detection_id.setter
|
|
332
356
|
def detection_id(self, new_id):
|
|
333
357
|
if self.id and str(self.id) != str(new_id):
|
|
334
358
|
self.related = self.related or []
|
|
335
359
|
self.related.append(RelatedRule(id=self.id, type="renamed"))
|
|
336
360
|
self.id = new_id
|
|
337
|
-
|
|
338
|
-
@field_validator(
|
|
361
|
+
|
|
362
|
+
@field_validator("tags", mode="after")
|
|
339
363
|
@classmethod
|
|
340
364
|
def validate_tlp(cls, tags: list[str]):
|
|
341
365
|
tlps = []
|
|
342
366
|
for tag in tags:
|
|
343
|
-
if tag.startswith(
|
|
367
|
+
if tag.startswith("tlp."):
|
|
344
368
|
tlps.append(tag)
|
|
345
369
|
if len(tlps) > 1:
|
|
346
|
-
raise ValueError(
|
|
370
|
+
raise ValueError(
|
|
371
|
+
f"tag must not contain more than one tag in tlp namespace. Got {tlps}"
|
|
372
|
+
)
|
|
347
373
|
return tags
|
|
348
|
-
|
|
349
|
-
@field_validator(
|
|
374
|
+
|
|
375
|
+
@field_validator("modified", mode="after")
|
|
350
376
|
@classmethod
|
|
351
377
|
def validate_modified(cls, modified, info):
|
|
352
|
-
if info.data.get(
|
|
378
|
+
if info.data.get("date") == modified:
|
|
353
379
|
return None
|
|
354
380
|
return modified
|
|
355
|
-
|
|
381
|
+
|
|
356
382
|
def set_extra_data_from_bundler(self, bundler: "Bundler"):
|
|
357
383
|
if not bundler:
|
|
358
384
|
return
|
|
359
|
-
|
|
385
|
+
|
|
360
386
|
if not self.date:
|
|
361
387
|
from .utils import as_date
|
|
388
|
+
|
|
362
389
|
self.date = as_date(bundler.created)
|
|
363
|
-
|
|
390
|
+
|
|
364
391
|
self.set_labels(bundler.labels)
|
|
365
392
|
self.tlp_level = bundler.tlp_level.name
|
|
366
393
|
self.author = bundler.report.created_by_ref
|
|
367
394
|
self.license = bundler.license
|
|
368
395
|
self.references = bundler.reference_urls
|
|
369
396
|
|
|
397
|
+
|
|
370
398
|
class DetectionContainer(BaseModel):
|
|
371
399
|
success: bool
|
|
372
|
-
detections: list[Union[BaseDetection
|
|
400
|
+
detections: list[Union[BaseDetection, AIDetection, SigmaRuleDetection]]
|
|
373
401
|
|
|
374
402
|
|
|
403
|
+
class DataContainer(BaseModel):
|
|
404
|
+
detections: DetectionContainer
|
|
405
|
+
attack_flow: AttackFlowList = Field(default=None)
|
|
406
|
+
navigator_layer: list = Field(default=None)
|
|
407
|
+
observables: list[dict] = Field(default=None)
|
|
408
|
+
cves: dict[str, str] = Field(default=None)
|
|
409
|
+
attacks: dict[str, str] = Field(default=None)
|
|
410
|
+
|
|
375
411
|
|
|
376
412
|
def tlp_from_tags(tags: list[SigmaTag]):
|
|
377
413
|
for tag in tags:
|
|
@@ -382,10 +418,11 @@ def tlp_from_tags(tags: list[SigmaTag]):
|
|
|
382
418
|
return tlp_level
|
|
383
419
|
return None
|
|
384
420
|
|
|
421
|
+
|
|
385
422
|
def set_tlp_level_in_tags(tags: list[SigmaTag], level):
|
|
386
423
|
level = str(level)
|
|
387
424
|
for i, tag in enumerate(tags):
|
|
388
|
-
if tag.startswith(
|
|
425
|
+
if tag.startswith("tlp."):
|
|
389
426
|
tags.remove(tag)
|
|
390
|
-
tags.append(
|
|
427
|
+
tags.append("tlp." + level.replace("_", "-"))
|
|
391
428
|
return tags
|
txt2detection/observables.py
CHANGED
txt2detection/utils.py
CHANGED
|
@@ -17,11 +17,14 @@ from .models import UUID_NAMESPACE
|
|
|
17
17
|
class DetectionLanguage(SimpleNamespace):
|
|
18
18
|
pass
|
|
19
19
|
|
|
20
|
+
|
|
20
21
|
def parse_model(value: str):
|
|
21
|
-
splits = value.split(
|
|
22
|
+
splits = value.split(":", 1)
|
|
22
23
|
provider = splits[0]
|
|
23
24
|
if provider not in ALL_AI_EXTRACTORS:
|
|
24
|
-
raise NotImplementedError(
|
|
25
|
+
raise NotImplementedError(
|
|
26
|
+
f"invalid AI provider in `{value}`, must be one of {list(ALL_AI_EXTRACTORS)}"
|
|
27
|
+
)
|
|
25
28
|
provider = ALL_AI_EXTRACTORS[provider]
|
|
26
29
|
try:
|
|
27
30
|
if len(splits) == 2:
|
|
@@ -30,8 +33,10 @@ def parse_model(value: str):
|
|
|
30
33
|
except Exception as e:
|
|
31
34
|
raise ModelError(f"Unable to initialize model `{value}`") from e
|
|
32
35
|
|
|
36
|
+
|
|
33
37
|
def make_identity(name, namespace=None, created_by_ref=None, object_marking_refs=None):
|
|
34
38
|
from .bundler import Bundler
|
|
39
|
+
|
|
35
40
|
if isinstance(namespace, str):
|
|
36
41
|
namespace = uuid.UUID(namespace)
|
|
37
42
|
namespace = namespace or UUID_NAMESPACE
|
|
@@ -41,25 +46,31 @@ def make_identity(name, namespace=None, created_by_ref=None, object_marking_refs
|
|
|
41
46
|
created_by_ref=created_by_ref or Bundler.default_identity.id,
|
|
42
47
|
created=datetime(2020, 1, 1),
|
|
43
48
|
modified=datetime(2020, 1, 1),
|
|
44
|
-
object_marking_refs=object_marking_refs
|
|
49
|
+
object_marking_refs=object_marking_refs
|
|
50
|
+
or [
|
|
45
51
|
"marking-definition--94868c89-83c2-464b-929b-a1a8aa3c8487",
|
|
46
|
-
"marking-definition--a4d70b75-6f4a-5d19-9137-da863edd33d7"
|
|
52
|
+
"marking-definition--a4d70b75-6f4a-5d19-9137-da863edd33d7",
|
|
47
53
|
],
|
|
48
54
|
)
|
|
49
55
|
|
|
50
56
|
|
|
51
57
|
def validate_token_count(max_tokens, input, extractor: BaseAIExtractor):
|
|
52
|
-
logging.info(
|
|
58
|
+
logging.info("INPUT_TOKEN_LIMIT = %d", max_tokens)
|
|
53
59
|
token_count = extractor.count_tokens(input)
|
|
54
|
-
logging.info(
|
|
55
|
-
if
|
|
56
|
-
raise Exception(
|
|
60
|
+
logging.info("TOKEN COUNT FOR %s: %d", extractor.extractor_name, token_count)
|
|
61
|
+
if token_count > max_tokens:
|
|
62
|
+
raise Exception(
|
|
63
|
+
f"{extractor.extractor_name}: input_file token count ({token_count}) exceeds INPUT_TOKEN_LIMIT ({max_tokens})"
|
|
64
|
+
)
|
|
57
65
|
|
|
58
66
|
|
|
59
67
|
@lru_cache(maxsize=5)
|
|
60
68
|
def get_licenses(date):
|
|
61
|
-
resp = requests.get(
|
|
62
|
-
|
|
69
|
+
resp = requests.get(
|
|
70
|
+
"https://github.com/spdx/license-list-data/raw/refs/heads/main/json/licenses.json"
|
|
71
|
+
)
|
|
72
|
+
return {l["licenseId"]: l["name"] for l in resp.json()["licenses"]}
|
|
73
|
+
|
|
63
74
|
|
|
64
75
|
def valid_licenses():
|
|
65
76
|
return get_licenses(datetime.now().date().isoformat())
|
|
@@ -75,9 +86,10 @@ def remove_rule_specific_tags(tags):
|
|
|
75
86
|
return labels
|
|
76
87
|
|
|
77
88
|
|
|
78
|
-
def as_date(d:
|
|
89
|
+
def as_date(d: "date|datetime"):
|
|
79
90
|
if isinstance(d, datetime):
|
|
80
91
|
return d.date()
|
|
81
92
|
return d
|
|
82
93
|
|
|
83
|
-
|
|
94
|
+
|
|
95
|
+
STATUSES = ["stable", "test", "experimental", "deprecated", "unsupported"]
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: txt2detection
|
|
3
|
-
Version: 1.0.
|
|
3
|
+
Version: 1.0.9
|
|
4
4
|
Summary: A command line tool that takes a txt file containing threat intelligence and turns it into a detection rule.
|
|
5
5
|
Project-URL: Homepage, https://github.com/muchdogesec/txt2detection
|
|
6
6
|
Project-URL: Issues, https://github.com/muchdogesec/txt2detection/issues
|
|
@@ -21,6 +21,7 @@ Requires-Dist: python-slugify
|
|
|
21
21
|
Requires-Dist: pyyaml
|
|
22
22
|
Requires-Dist: requests>=2.31.0; python_version >= '3.7'
|
|
23
23
|
Requires-Dist: stix2
|
|
24
|
+
Requires-Dist: stix2extensions
|
|
24
25
|
Requires-Dist: tqdm>=4.66.4; python_version >= '3.7'
|
|
25
26
|
Requires-Dist: validators>=0.34.0
|
|
26
27
|
Provides-Extra: anthropic
|
|
@@ -71,12 +72,6 @@ txt2detection allows a user to enter some threat intelligence as a file to consi
|
|
|
71
72
|
2. Based on the user input, AI prompts structured and sent to produce an intelligence rule
|
|
72
73
|
3. Rules converted into STIX objects
|
|
73
74
|
|
|
74
|
-
## tl;dr
|
|
75
|
-
|
|
76
|
-
[](https://www.youtube.com/watch?v=uJWXYKyu3Xg)
|
|
77
|
-
|
|
78
|
-
[Watch the demo](https://www.youtube.com/watch?v=uJWXYKyu3Xg).
|
|
79
|
-
|
|
80
75
|
## Usage
|
|
81
76
|
|
|
82
77
|
### Setup
|
|
@@ -161,12 +156,14 @@ Use this mode to generate a set of rules from an input text file;
|
|
|
161
156
|
* `--license` (optional): [License of the rule according the SPDX ID specification](https://spdx.org/licenses/). Will be added to the rule.
|
|
162
157
|
* `--reference_urls` (optional): A list of URLs to be added as `references` in the Sigma Rule property and in the `external_references` property of the Indicator and Report STIX object created. e.g `"https://www.google.com/" "https://www.facebook.com/"`
|
|
163
158
|
* `--external_refs` (optional): txt2detection will automatically populate the `external_references` of the report object it creates for the input. You can use this value to add additional objects to `external_references`. Note, you can only add `source_name` and `external_id` values currently. Pass as `source_name=external_id`. e.g. `--external_refs txt2stix=demo1 source=id` would create the following objects under the `external_references` property: `{"source_name":"txt2stix","external_id":"demo1"},{"source_name":"source","external_id":"id"}`
|
|
164
|
-
*
|
|
159
|
+
* `--ai_provider` (required): defines the `provider:model` to be used to generate the rule. Select one option. Currently supports:
|
|
165
160
|
* Provider (env var required `OPENROUTER_API_KEY`): `openrouter:`, providers/models `openai/gpt-4o`, `deepseek/deepseek-chat` ([More here](https://openrouter.ai/models))
|
|
166
161
|
* Provider (env var required `OPENAI_API_KEY`): `openai:`, models e.g.: `gpt-4o`, `gpt-4o-mini`, `gpt-4-turbo`, `gpt-4` ([More here](https://platform.openai.com/docs/models))
|
|
167
162
|
* Provider (env var required `ANTHROPIC_API_KEY`): `anthropic:`, models e.g.: `claude-3-5-sonnet-latest`, `claude-3-5-haiku-latest`, `claude-3-opus-latest` ([More here](https://docs.anthropic.com/en/docs/about-claude/models))
|
|
168
163
|
* Provider (env var required `GOOGLE_API_KEY`): `gemini:models/`, models: `gemini-1.5-pro-latest`, `gemini-1.5-flash-latest` ([More here](https://ai.google.dev/gemini-api/docs/models/gemini))
|
|
169
164
|
* Provider (env var required `DEEPSEEK_API_KEY`): `deepseek:`, models `deepseek-chat` ([More here](https://api-docs.deepseek.com/quick_start/pricing))
|
|
165
|
+
* `--ai_create_attack_flow` (boolean): passing this flag will also prompt the AI model (the same entered for `--ai_provider`, default `false`) to generate an [Attack Flow](https://center-for-threat-informed-defense.github.io/attack-flow/) for the MITRE ATT&CK tags to define the logical order in which they are being described. Note, Sigma currently supports ATT&CK Enterprise only.
|
|
166
|
+
* `--ai_create_attack_navigator_layer` (boolean, default `false`): passing this flag will generate a [MITRE ATT&CK Navigator layer](https://mitre-attack.github.io/attack-navigator/) for MITRE ATT&CK tags. Note, Sigma currently supports ATT&CK Enterprise only. You don't need to pass this if `--ai_create_attack_flow` is set to `true` (as this mode relies on this setting being true)
|
|
170
167
|
|
|
171
168
|
Note, in this mode, the following values will be automatically assigned to the rule
|
|
172
169
|
|
|
@@ -193,6 +190,8 @@ Note, in this mode you should be aware of a few things;
|
|
|
193
190
|
* `--external_refs` (optional): txt2detection will automatically populate the `external_references` of the report object it creates for the input. You can use this value to add additional objects to `external_references`. Note, you can only add `source_name` and `external_id` values currently. Pass as `source_name=external_id`. e.g. `--external_refs txt2stix=demo1 source=id` would create the following objects under the `external_references` property: `{"source_name":"txt2stix","external_id":"demo1"},{"source_name":"source","external_id":"id"}`
|
|
194
191
|
* `status` (optional): either `stable`, `test`, `experimental`, `deprecated`, `unsupported`. If passed, will overwrite any existing `status` recorded in the rule
|
|
195
192
|
* `level` (optional): either `informational`, `low`, `medium`, `high`, `critical`. If passed, will overwrite any existing `level` recorded in the rule
|
|
193
|
+
* `--ai_create_attack_flow` (boolean): passing this flag will also prompt the AI model (the same entered for `--ai_provider`, default `false`) to generate an [Attack Flow](https://center-for-threat-informed-defense.github.io/attack-flow/) for the MITRE ATT&CK tags to define the logical order in which they are being described. Note, Sigma currently supports ATT&CK Enterprise only.
|
|
194
|
+
* `--ai_create_attack_navigator_layer` (boolean, default `false`): passing this flag will generate a [MITRE ATT&CK Navigator layer](https://mitre-attack.github.io/attack-navigator/) for MITRE ATT&CK tags. Note, Sigma currently supports ATT&CK Enterprise only. You don't need to pass this if `--ai_create_attack_flow` is set to `true` (as this mode relies on this setting being true)
|
|
196
195
|
|
|
197
196
|
### A note on observable extraction
|
|
198
197
|
|
|
@@ -0,0 +1,24 @@
|
|
|
1
|
+
txt2detection/__init__.py,sha256=Fc460P0q_eb2u3Xc89z-fwl-4ai3jrPqPNVwJQYNkNQ,89
|
|
2
|
+
txt2detection/__main__.py,sha256=s5XcIctE59ALjys6Y8lRIqS_pQWi1mlNo2gyG8_XS5s,11622
|
|
3
|
+
txt2detection/attack_flow.py,sha256=1Ns98ZEoiN8kH-iSo7d6zYtplm11QkhPQAvSZsW4WXQ,8853
|
|
4
|
+
txt2detection/bundler.py,sha256=eHyr6jlnd4ZvynHkyy5Hposkp_XqEAxEwGzlViSq1xU,13319
|
|
5
|
+
txt2detection/credential_checker.py,sha256=NuKk7WlDshtdpGecxY1exoi4fUHCygunPH2lZ20oEA8,2598
|
|
6
|
+
txt2detection/models.py,sha256=_-sR03FEWI46OUZdL7U0tibNn909B0NU9LWNzopBtiY,12888
|
|
7
|
+
txt2detection/observables.py,sha256=RxgJchvk6_Z2pBxJ6MAGsx00gj8TyRt9W2BTQTb1F9o,6762
|
|
8
|
+
txt2detection/utils.py,sha256=EJ5lMhnghUgW0JbcRmeiDXYwm5GaB6XrG4cUjru-52g,2812
|
|
9
|
+
txt2detection/ai_extractor/__init__.py,sha256=itcwTF0-S80mx-SuSvfrKazvcwsojR-QsBN-UvnSDwE,418
|
|
10
|
+
txt2detection/ai_extractor/anthropic.py,sha256=YOi2rHUeeoRMS4CFG6mX7xUU4q4rw9qNl72R74UN6ZM,420
|
|
11
|
+
txt2detection/ai_extractor/base.py,sha256=2C3d4BoH7I4fnvp6cLxbtjiFVPm4WJLFwnS_lAppHr8,3210
|
|
12
|
+
txt2detection/ai_extractor/deepseek.py,sha256=2XehIYbWXG6Odq68nQX4CNtl5GdmBlAmjLP_lG2eEFo,660
|
|
13
|
+
txt2detection/ai_extractor/gemini.py,sha256=hlcKkiHGzQJ0dQECfIhjx2LfdhZoquAF9POwz61RAhw,557
|
|
14
|
+
txt2detection/ai_extractor/models.py,sha256=xMTvUHoxIflbBA4mkGLTjwf657DVEOxd6gqLpEUciQ4,963
|
|
15
|
+
txt2detection/ai_extractor/openai.py,sha256=ggonpHtckNz9GEJIR0ADMzZWDKi6EWuicP0fsxvkP3A,616
|
|
16
|
+
txt2detection/ai_extractor/openrouter.py,sha256=rL-SnzRhzrCnPJGLxbTlRyxU0NAw42RmSq3ouuo3Iag,658
|
|
17
|
+
txt2detection/ai_extractor/prompts.py,sha256=xI82PelsTidnRzi5wnNbEC4lmkio92YUDd8SZu4CQiE,10961
|
|
18
|
+
txt2detection/ai_extractor/utils.py,sha256=SUxyPhkGp5yDbX_H_E018i93R8IbyLsQ00PIBDecfuc,540
|
|
19
|
+
txt2detection/config/detection_languages.yaml,sha256=dgQUJPxhDRJ_IiFEFOiH0yhEer3SkFSIhY4pS3BsX2c,287
|
|
20
|
+
txt2detection-1.0.9.dist-info/METADATA,sha256=UHkUnaL9wEt78RNw0EmQenodg2qxZ3gsTDkmVC2W7IE,15869
|
|
21
|
+
txt2detection-1.0.9.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
|
22
|
+
txt2detection-1.0.9.dist-info/entry_points.txt,sha256=ep_rLlS2r1-kKE7S3iKf3SVwbCU9-FZhU9zUebitw7A,62
|
|
23
|
+
txt2detection-1.0.9.dist-info/licenses/LICENSE,sha256=BK8Ppqlc4pdgnNzIxnxde0taoQ1BgicdyqmBvMiNYgY,11364
|
|
24
|
+
txt2detection-1.0.9.dist-info/RECORD,,
|
|
@@ -1,22 +0,0 @@
|
|
|
1
|
-
txt2detection/__init__.py,sha256=Fc460P0q_eb2u3Xc89z-fwl-4ai3jrPqPNVwJQYNkNQ,89
|
|
2
|
-
txt2detection/__main__.py,sha256=R9TgWWGzA8rxF39rZr2MNOrQubhItdRAgP2nd8Tfb78,9337
|
|
3
|
-
txt2detection/bundler.py,sha256=rIvVTlLEHu9SMPqy8AyLbiJ3Cg0WNq7uWvGIXGoaPsg,10822
|
|
4
|
-
txt2detection/credential_checker.py,sha256=YoOe1ABjNfAJIcNE6PRAZtvznTybUKHNBB57DPQhZsU,2564
|
|
5
|
-
txt2detection/models.py,sha256=AKxqHsjnMQZFX5tWPCsXE6-OQLMbDhQbdu97zWJdNb0,12064
|
|
6
|
-
txt2detection/observables.py,sha256=NNnwF_gOsPmAbfgk5fj1rcluMsShZOHssAGy2VJgvmo,6763
|
|
7
|
-
txt2detection/utils.py,sha256=rLBFzpSepksXkONnqWkRqiMr8R4LTp4j8OrashFVUPc,2741
|
|
8
|
-
txt2detection/ai_extractor/__init__.py,sha256=itcwTF0-S80mx-SuSvfrKazvcwsojR-QsBN-UvnSDwE,418
|
|
9
|
-
txt2detection/ai_extractor/anthropic.py,sha256=YOi2rHUeeoRMS4CFG6mX7xUU4q4rw9qNl72R74UN6ZM,420
|
|
10
|
-
txt2detection/ai_extractor/base.py,sha256=urZe_kpYu3BwXyKJsQ0GQIEtTasUQYp4dFzuz34Hai8,2336
|
|
11
|
-
txt2detection/ai_extractor/deepseek.py,sha256=2XehIYbWXG6Odq68nQX4CNtl5GdmBlAmjLP_lG2eEFo,660
|
|
12
|
-
txt2detection/ai_extractor/gemini.py,sha256=hlcKkiHGzQJ0dQECfIhjx2LfdhZoquAF9POwz61RAhw,557
|
|
13
|
-
txt2detection/ai_extractor/openai.py,sha256=e5Of3i-T2CvUSx1T_v7wHOuewHK2IoImxZXfXeZc3Ds,625
|
|
14
|
-
txt2detection/ai_extractor/openrouter.py,sha256=-KcdcyKPpaeiGfvqJB4L7vMmcXTDhml3Mr0T6kwANZA,645
|
|
15
|
-
txt2detection/ai_extractor/prompts.py,sha256=ACYFWUafdHXHBXz7fq_RSooA4PJ-mBdaBzqsOOSFpVg,5918
|
|
16
|
-
txt2detection/ai_extractor/utils.py,sha256=SUxyPhkGp5yDbX_H_E018i93R8IbyLsQ00PIBDecfuc,540
|
|
17
|
-
txt2detection/config/detection_languages.yaml,sha256=dgQUJPxhDRJ_IiFEFOiH0yhEer3SkFSIhY4pS3BsX2c,287
|
|
18
|
-
txt2detection-1.0.7.dist-info/METADATA,sha256=q3LsITk_4Ix6C8z2OAWIDlzz0qy8l3O1NeLBWVNJzL0,14489
|
|
19
|
-
txt2detection-1.0.7.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
|
20
|
-
txt2detection-1.0.7.dist-info/entry_points.txt,sha256=ep_rLlS2r1-kKE7S3iKf3SVwbCU9-FZhU9zUebitw7A,62
|
|
21
|
-
txt2detection-1.0.7.dist-info/licenses/LICENSE,sha256=BK8Ppqlc4pdgnNzIxnxde0taoQ1BgicdyqmBvMiNYgY,11364
|
|
22
|
-
txt2detection-1.0.7.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|