txt2stix 0.0.4__py3-none-any.whl → 1.0.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- txt2stix/ai_extractor/base.py +4 -3
- txt2stix/ai_extractor/prompts.py +7 -1
- txt2stix/ai_extractor/utils.py +1 -0
- txt2stix/bundler.py +43 -10
- txt2stix/txt2stix.py +10 -13
- txt2stix/utils.py +4 -3
- {txt2stix-0.0.4.dist-info → txt2stix-1.0.1.dist-info}/METADATA +4 -3
- {txt2stix-0.0.4.dist-info → txt2stix-1.0.1.dist-info}/RECORD +11 -11
- {txt2stix-0.0.4.dist-info → txt2stix-1.0.1.dist-info}/WHEEL +0 -0
- {txt2stix-0.0.4.dist-info → txt2stix-1.0.1.dist-info}/entry_points.txt +0 -0
- {txt2stix-0.0.4.dist-info → txt2stix-1.0.1.dist-info}/licenses/LICENSE +0 -0
txt2stix/ai_extractor/base.py
CHANGED
@@ -6,7 +6,7 @@ import textwrap
|
|
6
6
|
from llama_index.core import PromptTemplate
|
7
7
|
from llama_index.core.llms.llm import LLM
|
8
8
|
|
9
|
-
from txt2stix.ai_extractor.prompts import
|
9
|
+
from txt2stix.ai_extractor.prompts import DEFAULT_CONTENT_CHECKER_WITH_SUMMARY_TEMPL, DEFAULT_EXTRACTION_TEMPL, DEFAULT_RELATIONSHIP_TEMPL, DEFAULT_SYSTEM_PROMPT, ATTACK_FLOW_PROMPT_TEMPL
|
10
10
|
from txt2stix.ai_extractor.utils import AttackFlowList, DescribesIncident, ExtractionList, ParserWithLogging, RelationshipList, get_extractors_str
|
11
11
|
from llama_index.core.utils import get_tokenizer
|
12
12
|
|
@@ -19,7 +19,7 @@ class BaseAIExtractor():
|
|
19
19
|
|
20
20
|
relationship_template = DEFAULT_RELATIONSHIP_TEMPL
|
21
21
|
|
22
|
-
content_check_template =
|
22
|
+
content_check_template = DEFAULT_CONTENT_CHECKER_WITH_SUMMARY_TEMPL
|
23
23
|
|
24
24
|
def _get_extraction_program(self):
|
25
25
|
return LLMTextCompletionProgram.from_defaults(
|
@@ -63,7 +63,8 @@ class BaseAIExtractor():
|
|
63
63
|
return self._get_relationship_program()(relationship_types=relationship_types, input_file=input_text, extractions=extractions)
|
64
64
|
|
65
65
|
def extract_objects(self, input_text, extractors) -> ExtractionList:
|
66
|
-
|
66
|
+
extraction_list = self._get_extraction_program()(extractors=get_extractors_str(extractors), input_file=input_text)
|
67
|
+
return extraction_list.model_dump().get('extractions', [])
|
67
68
|
|
68
69
|
def __init__(self, *args, **kwargs) -> None:
|
69
70
|
pass
|
txt2stix/ai_extractor/prompts.py
CHANGED
@@ -86,7 +86,7 @@ DEFAULT_RELATIONSHIP_TEMPL = PromptTemplate(textwrap.dedent(
|
|
86
86
|
"""
|
87
87
|
))
|
88
88
|
|
89
|
-
|
89
|
+
DEFAULT_CONTENT_CHECKER_WITH_SUMMARY_TEMPL = PromptTemplate("""
|
90
90
|
<persona>
|
91
91
|
You are a cyber security threat intelligence analyst.
|
92
92
|
Your job is to review reports that describe a cyber security incidents and/or threat intelligence.
|
@@ -120,6 +120,12 @@ DEFAULT_CONTENT_CHECKER_TEMPL = PromptTemplate("""
|
|
120
120
|
* `indicator_of_compromise`
|
121
121
|
* `ttp`
|
122
122
|
</incident_classification>
|
123
|
+
<summary>
|
124
|
+
Using the MARKDOWN of the report provided in <document>, provide an executive summary of it containing no more than one paragraphs.
|
125
|
+
IMPORTANT: the output should be structured as markdown text.
|
126
|
+
IMPORTANT: This `summary` is different from explanation.
|
127
|
+
IMPORTANT: You are to simplify the long intelligence reports into concise summaries for other to quickly understand the contents.
|
128
|
+
</summary>
|
123
129
|
""")
|
124
130
|
|
125
131
|
ATTACK_FLOW_PROMPT_TEMPL = ChatPromptTemplate([
|
txt2stix/ai_extractor/utils.py
CHANGED
@@ -34,6 +34,7 @@ class DescribesIncident(BaseModel):
|
|
34
34
|
describes_incident: bool = Field(description="does the <document> include malware analysis, APT group reports, data breaches and vulnerabilities?")
|
35
35
|
explanation: str = Field(description="Two or three sentence summary of the incidents it describes OR summary of what it describes instead of an incident")
|
36
36
|
incident_classification : list[str] = Field(description="All the valid incident classifications that describe this document/report")
|
37
|
+
summary: str = Field(description="executive summary of the document containing no more than one paragraphs.")
|
37
38
|
|
38
39
|
class AttackFlowItem(BaseModel):
|
39
40
|
position : int = Field(description="order of object starting at 0")
|
txt2stix/bundler.py
CHANGED
@@ -6,6 +6,7 @@ from stix2 import (
|
|
6
6
|
MarkingDefinition,
|
7
7
|
Relationship,
|
8
8
|
Bundle,
|
9
|
+
Note,
|
9
10
|
)
|
10
11
|
from stix2.parsing import dict_to_stix2, parse as parse_stix
|
11
12
|
from stix2.serialization import serialize
|
@@ -110,6 +111,8 @@ class TLP_LEVEL(enum.Enum):
|
|
110
111
|
|
111
112
|
@classmethod
|
112
113
|
def get(cls, level):
|
114
|
+
if isinstance(level, str):
|
115
|
+
level = level.replace('-', '_').replace('+', '_')
|
113
116
|
if isinstance(level, cls):
|
114
117
|
return level
|
115
118
|
return cls.levels()[level]
|
@@ -261,7 +264,7 @@ class txt2stixBundler:
|
|
261
264
|
def load_stix_object_from_url(url):
|
262
265
|
resp = requests.get(url)
|
263
266
|
return dict_to_stix2(resp.json())
|
264
|
-
|
267
|
+
|
265
268
|
def add_ref(self, sdo, is_report_object=True):
|
266
269
|
self.add_extension(sdo)
|
267
270
|
sdo_id = sdo["id"]
|
@@ -277,22 +280,19 @@ class txt2stixBundler:
|
|
277
280
|
sdo_value = v
|
278
281
|
break
|
279
282
|
else:
|
280
|
-
if refs := sdo.get(
|
281
|
-
sdo_value = refs[0][
|
283
|
+
if refs := sdo.get("external_references", []):
|
284
|
+
sdo_value = refs[0]["external_id"]
|
282
285
|
else:
|
283
286
|
sdo_value = "{NOTEXTRACTED}"
|
284
287
|
|
285
|
-
|
286
288
|
self.id_value_map[sdo_id] = sdo_value
|
287
289
|
|
288
|
-
|
289
290
|
def add_indicator(self, extracted_dict, add_standard_relationship):
|
290
291
|
extractor = self.all_extractors[extracted_dict["type"]]
|
291
292
|
stix_mapping = extractor.stix_mapping
|
292
293
|
extracted_value = extracted_dict["value"]
|
293
294
|
extracted_id = extracted_dict["id"]
|
294
295
|
|
295
|
-
|
296
296
|
indicator = self.new_indicator(extractor, stix_mapping, extracted_value)
|
297
297
|
# set id so it doesn''t need to be created in build_observables
|
298
298
|
if extracted_dict.get("indexes"):
|
@@ -340,7 +340,7 @@ class txt2stixBundler:
|
|
340
340
|
},
|
341
341
|
],
|
342
342
|
}
|
343
|
-
|
343
|
+
|
344
344
|
return indicator
|
345
345
|
|
346
346
|
def add_ai_relationship(self, gpt_out):
|
@@ -413,11 +413,44 @@ class txt2stixBundler:
|
|
413
413
|
return "indicator--" + str(
|
414
414
|
uuid.uuid5(UUID_NAMESPACE, f"txt2stix+{self.identity['id']}+{self.report_md5}+{stix_mapping}+{value}")
|
415
415
|
)
|
416
|
-
|
416
|
+
|
417
|
+
def add_summary(self, summary, ai_summary_provider):
|
418
|
+
summary_note_obj = Note(
|
419
|
+
type="note",
|
420
|
+
spec_version="2.1",
|
421
|
+
id=self.report.id.replace("report", "note"),
|
422
|
+
created=self.report.created,
|
423
|
+
modified=self.report.modified,
|
424
|
+
created_by_ref=self.report.created_by_ref,
|
425
|
+
external_references=[
|
426
|
+
{
|
427
|
+
"source_name": "txt2stix_ai_summary_provider",
|
428
|
+
"external_id": ai_summary_provider,
|
429
|
+
},
|
430
|
+
],
|
431
|
+
abstract=f"AI Summary: {self.report.name}",
|
432
|
+
content=summary,
|
433
|
+
object_refs=[self.report.id],
|
434
|
+
object_marking_refs=self.report.object_marking_refs,
|
435
|
+
labels=self.report.get('labels'),
|
436
|
+
confidence=self.report.get('confidence')
|
437
|
+
)
|
438
|
+
|
439
|
+
self.add_ref(summary_note_obj)
|
440
|
+
self.add_ref(
|
441
|
+
self.new_relationship(
|
442
|
+
summary_note_obj["id"],
|
443
|
+
self.report.id,
|
444
|
+
relationship_type="summary-of",
|
445
|
+
description=f"AI generated summary for {self.report.name}",
|
446
|
+
external_references=summary_note_obj["external_references"],
|
447
|
+
)
|
448
|
+
)
|
449
|
+
|
417
450
|
@property
|
418
451
|
def flow_objects(self):
|
419
452
|
return self._flow_objects
|
420
|
-
|
453
|
+
|
421
454
|
@flow_objects.setter
|
422
455
|
def flow_objects(self, objects):
|
423
456
|
for obj in objects:
|
@@ -425,4 +458,4 @@ class txt2stixBundler:
|
|
425
458
|
continue
|
426
459
|
is_report_object = obj['type'] != "extension-definition"
|
427
460
|
self.add_ref(obj, is_report_object=is_report_object)
|
428
|
-
self._flow_objects = objects
|
461
|
+
self._flow_objects = objects
|
txt2stix/txt2stix.py
CHANGED
@@ -14,7 +14,7 @@ from txt2stix.ai_extractor.utils import DescribesIncident
|
|
14
14
|
from txt2stix.attack_flow import parse_flow
|
15
15
|
|
16
16
|
|
17
|
-
from .utils import Txt2StixData, remove_links
|
17
|
+
from .utils import RELATIONSHIP_TYPES, Txt2StixData, remove_links
|
18
18
|
|
19
19
|
from .common import UUID_NAMESPACE, FatalException
|
20
20
|
|
@@ -99,8 +99,6 @@ def parse_extractors_globbed(type, all_extractors, names):
|
|
99
99
|
pattern.load_extractor(extractor)
|
100
100
|
filtered_extractors[extractor.type] = extraction_processor
|
101
101
|
extraction_processor[extractor_name] = extractor
|
102
|
-
except KeyError:
|
103
|
-
raise argparse.ArgumentTypeError(f"no such {type} slug `{extractor_name}`")
|
104
102
|
except BaseException as e:
|
105
103
|
raise argparse.ArgumentTypeError(f"{type} `{extractor_name}`: {e}")
|
106
104
|
return filtered_extractors
|
@@ -137,22 +135,23 @@ def parse_args():
|
|
137
135
|
|
138
136
|
inf_arg = parser.add_argument("--input_file", "--input-file", required=True, help="The file to be converted. Must be .txt", type=Path)
|
139
137
|
parser.add_argument("--ai_content_check_provider", required=False, type=parse_model, help="Use an AI model to check wether the content of the file contains threat intelligence. Paticularly useful to weed out vendor marketing.")
|
138
|
+
parser.add_argument("--always_extract", default=True, type=parse_bool, help="Whether to always extract or not depending on output of ai_content_check_provider. Default, extracts even when content_check returns describes_incident=False")
|
140
139
|
name_arg = parser.add_argument("--name", required=True, help="Name of the file, max 124 chars", default="stix-out")
|
141
140
|
parser.add_argument("--created", required=False, default=datetime.now(), help="Allow user to optionally pass --created time in input, which will hardcode the time used in created times")
|
142
141
|
parser.add_argument("--ai_settings_extractions", required=False, type=parse_model, help="(required if AI extraction enabled): passed in format provider:model e.g. openai:gpt4o. Can pass more than one value to get extractions from multiple providers.", metavar="provider[:model]", nargs='+')
|
143
142
|
parser.add_argument("--ai_settings_relationships", required=False, type=parse_model, help="(required if AI relationship enabled): passed in format `provider:model`. Can only pass one model at this time.", metavar="provider[:model]")
|
144
143
|
parser.add_argument("--labels", type=parse_labels)
|
145
|
-
parser.add_argument("--relationship_mode", choices=["ai", "standard"], required=True)
|
144
|
+
rmode_arg = parser.add_argument("--relationship_mode", choices=["ai", "standard"], required=True)
|
146
145
|
parser.add_argument("--report_id", type=uuid.UUID, required=False, help="id to use instead of automatically generated `{name}+{created}`", metavar="VALID_UUID")
|
147
146
|
parser.add_argument("--confidence", type=range_type(0,100), default=None, help="value between 0-100. Default if not passed is null.", metavar="[0-100]")
|
148
147
|
parser.add_argument("--tlp_level", "--tlp-level", choices=TLP_LEVEL.levels().keys(), default="clear", help="TLP level, default is clear")
|
149
|
-
parser.add_argument("--use_extractions", "--use-extractions", default={}, type=functools.partial(parse_extractors_globbed, "extractor", all_extractors), help="Specify extraction types from the default/local extractions .yaml file", metavar="EXTRACTION1,EXTRACTION2")
|
148
|
+
extractions_arg = parser.add_argument("--use_extractions", "--use-extractions", default={}, type=functools.partial(parse_extractors_globbed, "extractor", all_extractors), help="Specify extraction types from the default/local extractions .yaml file", metavar="EXTRACTION1,EXTRACTION2")
|
150
149
|
parser.add_argument("--use_identity", "--use-identity", help="Specify an identity file id (e.g., {\"type\":\"identity\",\"name\":\"demo\",\"identity_class\":\"system\"})", metavar="[stix2 identity json]", type=parse_stix)
|
151
150
|
parser.add_argument("--external_refs", type=parse_ref, help="pass additional `external_references` entry (or entries) to the report object created. e.g --external_ref author=dogesec link=https://dkjjadhdaj.net", default=[], metavar="{source_name}={external_id}", action="extend", nargs='+')
|
152
151
|
parser.add_argument('--ignore_image_refs', default=True, type=parse_bool)
|
153
152
|
parser.add_argument('--ignore_link_refs', default=True, type=parse_bool)
|
154
153
|
parser.add_argument("--ignore_extraction_boundary", default=False, type=parse_bool, help="default if not passed is `false`, but if set to `true` will ignore boundary capture logic for extractions")
|
155
|
-
parser.add_argument('--ai_create_attack_flow', default=False, action='store_true', help="create attack flow for attack objects in report/bundle")
|
154
|
+
aflow_arg = parser.add_argument('--ai_create_attack_flow', default=False, action='store_true', help="create attack flow for attack objects in report/bundle")
|
156
155
|
|
157
156
|
args = parser.parse_args()
|
158
157
|
if not args.input_file.exists():
|
@@ -161,13 +160,13 @@ def parse_args():
|
|
161
160
|
raise argparse.ArgumentError(name_arg, "max 124 characters")
|
162
161
|
|
163
162
|
if args.relationship_mode == 'ai' and not args.ai_settings_relationships:
|
164
|
-
|
163
|
+
raise argparse.ArgumentError(rmode_arg, "relationship_mode is set to AI, --ai_settings_relationships is required")
|
165
164
|
|
166
165
|
if args.ai_create_attack_flow and not args.ai_settings_relationships:
|
167
|
-
|
166
|
+
raise argparse.ArgumentError(aflow_arg, "--ai_create_attack_flow requires --ai_settings_relationships")
|
168
167
|
#### process --use-extractions
|
169
168
|
if args.use_extractions.get('ai') and not args.ai_settings_extractions:
|
170
|
-
|
169
|
+
raise argparse.ArgumentError(extractions_arg, "ai based extractors are passed, --ai_settings_extractions is required")
|
171
170
|
|
172
171
|
args.all_extractors = all_extractors
|
173
172
|
return args
|
@@ -218,7 +217,6 @@ def extract_all(bundler: txt2stixBundler, extractors_map, text_content, ai_extra
|
|
218
217
|
logging.info("running extractor: %s", extractor.extractor_name)
|
219
218
|
try:
|
220
219
|
ai_extracts = extractor.extract_objects(text_content, extractors_map["ai"].values())
|
221
|
-
ai_extracts = ai_extracts.model_dump().get('extractions', [])
|
222
220
|
bundler.process_observables(ai_extracts)
|
223
221
|
all_extracts[f"ai-{extractor.extractor_name}"] = ai_extracts
|
224
222
|
except BaseException as e:
|
@@ -231,8 +229,7 @@ def extract_relationships_with_ai(bundler: txt2stixBundler, text_content, all_ex
|
|
231
229
|
relationships = None
|
232
230
|
try:
|
233
231
|
all_extracts = list(itertools.chain(*all_extracts.values()))
|
234
|
-
|
235
|
-
relationships = ai_extractor_session.extract_relationships(text_content, all_extracts, relationship_types)
|
232
|
+
relationships = ai_extractor_session.extract_relationships(text_content, all_extracts, RELATIONSHIP_TYPES)
|
236
233
|
relationships = relationships.model_dump()
|
237
234
|
log_notes(relationships, "Relationships")
|
238
235
|
bundler.process_relationships(relationships['relationships'])
|
@@ -277,6 +274,7 @@ def run_txt2stix(bundler: txt2stixBundler, preprocessed_text: str, extractors_ma
|
|
277
274
|
logging.info(retval.content_check.model_dump_json())
|
278
275
|
for classification in retval.content_check.incident_classification:
|
279
276
|
bundler.report.labels.append(f'txt2stix:{classification}'.lower())
|
277
|
+
bundler.add_summary(retval.content_check.summary, model.extractor_name)
|
280
278
|
|
281
279
|
if should_extract or always_extract:
|
282
280
|
if extractors_map.get("ai"):
|
@@ -313,7 +311,6 @@ def main():
|
|
313
311
|
|
314
312
|
bundler = txt2stixBundler(args.name, args.use_identity, args.tlp_level, input_text, args.confidence, args.all_extractors, args.labels, created=args.created, report_id=args.report_id, external_references=args.external_refs)
|
315
313
|
log_notes(sys.argv, "Config")
|
316
|
-
convo_str = None
|
317
314
|
|
318
315
|
data = run_txt2stix(
|
319
316
|
bundler, preprocessed_text, args.use_extractions,
|
txt2stix/utils.py
CHANGED
@@ -1,6 +1,5 @@
|
|
1
1
|
import os
|
2
2
|
import pkgutil
|
3
|
-
import re
|
4
3
|
from pathlib import Path
|
5
4
|
from typing import Dict
|
6
5
|
from pydantic import BaseModel, Field
|
@@ -45,7 +44,6 @@ class ImageLinkRemover(MarkdownRenderer):
|
|
45
44
|
del img['src']
|
46
45
|
return soup.decode()
|
47
46
|
|
48
|
-
import tldextract
|
49
47
|
|
50
48
|
|
51
49
|
class Txt2StixData(BaseModel):
|
@@ -81,6 +79,9 @@ def validate_file_mimetype(file_name):
|
|
81
79
|
return FILE_EXTENSIONS.get(ext)
|
82
80
|
|
83
81
|
|
82
|
+
|
83
|
+
|
84
84
|
TLDs = [tld.lower() for tld in read_included_file('helpers/tlds.txt').splitlines()]
|
85
85
|
REGISTRY_PREFIXES = [key.lower() for key in read_included_file('helpers/windows_registry_key_prefix.txt').splitlines()]
|
86
|
-
FILE_EXTENSIONS = dict(line.lower().split(',') for line in read_included_file('helpers/mimetype_filename_extension_list.csv').splitlines())
|
86
|
+
FILE_EXTENSIONS = dict(line.lower().split(',') for line in read_included_file('helpers/mimetype_filename_extension_list.csv').splitlines())
|
87
|
+
RELATIONSHIP_TYPES = [x for x in read_included_file('helpers/stix_relationship_types.txt').splitlines() if x and not x.startswith('#')]
|
@@ -1,10 +1,12 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: txt2stix
|
3
|
-
Version:
|
3
|
+
Version: 1.0.1
|
4
4
|
Summary: txt2stix is a Python script that is designed to identify and extract IoCs and TTPs from text files, identify the relationships between them, convert them to STIX 2.1 objects, and output as a STIX 2.1 bundle.
|
5
5
|
Project-URL: Homepage, https://github.com/muchdogesec/txt2stix
|
6
6
|
Project-URL: Issues, https://github.com/muchdogesec/txt2stix/issues
|
7
|
-
|
7
|
+
Project-URL: dogesec HQ, https://dogesec.com
|
8
|
+
Author: dogesec
|
9
|
+
Maintainer: dogesec
|
8
10
|
License-File: LICENSE
|
9
11
|
Classifier: License :: OSI Approved :: Apache Software License
|
10
12
|
Classifier: Operating System :: OS Independent
|
@@ -16,7 +18,6 @@ Requires-Dist: llama-index-core>=0.12.42
|
|
16
18
|
Requires-Dist: llama-index-llms-anthropic>=0.7.2
|
17
19
|
Requires-Dist: llama-index-llms-deepseek>=0.1.2
|
18
20
|
Requires-Dist: llama-index-llms-gemini>=0.5.0
|
19
|
-
Requires-Dist: llama-index-llms-openai-like>=0.4.0
|
20
21
|
Requires-Dist: llama-index-llms-openai>=0.4.5
|
21
22
|
Requires-Dist: llama-index-llms-openrouter>=0.3.2
|
22
23
|
Requires-Dist: mistune>=3.0.2
|
@@ -1,23 +1,23 @@
|
|
1
1
|
txt2stix/__init__.py,sha256=Sm_VT913IFuAZ6dJEdVz3baPwC5VYtHySVfBAOUG92w,803
|
2
2
|
txt2stix/attack_flow.py,sha256=WWlukuQYrGW1SJ1DnhfROYC5Ck4WYqNifgmtiuyDg7E,4177
|
3
|
-
txt2stix/bundler.py,sha256=
|
3
|
+
txt2stix/bundler.py,sha256=EVTcVgZyVMwb6XjNQ3Gyj7zm44UErXo9wbVr2JGsjQQ,16797
|
4
4
|
txt2stix/common.py,sha256=ISnGNKqJPE1EcfhL-x_4G18mcwt1urmorkW-ru9kV-0,585
|
5
5
|
txt2stix/extractions.py,sha256=ExynKWSeuWOC0q6i4SuU1NkeNw7uoOm6xu0YtJRVaiE,2058
|
6
6
|
txt2stix/indicator.py,sha256=c6S0xx0K8JM-PT_Qd1PlN_ZlDXdnEwiRS8529iUp3yg,30774
|
7
7
|
txt2stix/lookups.py,sha256=h42YVtYUkWZm6ZPv2h5hHDHDzDs3yBqrT_T7pj2MDZI,2301
|
8
8
|
txt2stix/retriever.py,sha256=zU8L00RSh9N5J0NpAo3CM3IHsuZsNVjJGohRisXcMRs,5167
|
9
9
|
txt2stix/stix.py,sha256=9nXD9a2dCY4uaatl-mlIA1k3srwQBhGW-tUSho3iYe0,30
|
10
|
-
txt2stix/txt2stix.py,sha256=
|
11
|
-
txt2stix/utils.py,sha256=
|
10
|
+
txt2stix/txt2stix.py,sha256=Vt9CUsSEO1bw5SS7vlsVxktFz1nW8M_G4-RN6idOTA0,16444
|
11
|
+
txt2stix/utils.py,sha256=P66yq-SphsQu2S9At6BfYpavfghXsZqh4h6W13HUEoI,3256
|
12
12
|
txt2stix/ai_extractor/__init__.py,sha256=RcXh30ZcIA3Fva2bOPH4EtWq6ffWhGE39C_II8ElAx0,417
|
13
13
|
txt2stix/ai_extractor/anthropic.py,sha256=mdz-8CB-BSCEqnK5l35DRZURVPUf508ef2b48XMxmuk,441
|
14
|
-
txt2stix/ai_extractor/base.py,sha256=
|
14
|
+
txt2stix/ai_extractor/base.py,sha256=MAtnKvWUmWZgnzwDM0i2n-WrRWq69du4KVcapNMIsEg,3523
|
15
15
|
txt2stix/ai_extractor/deepseek.py,sha256=2XehIYbWXG6Odq68nQX4CNtl5GdmBlAmjLP_lG2eEFo,660
|
16
16
|
txt2stix/ai_extractor/gemini.py,sha256=yJC7knYzl-TScyCBd-MTpUf-NT6znC25E7vXxNMqjLU,578
|
17
17
|
txt2stix/ai_extractor/openai.py,sha256=DtllzeVhZw1231hj35vn1U8V2MMzm8wM7mqKLBkxazQ,489
|
18
18
|
txt2stix/ai_extractor/openrouter.py,sha256=hAA6mTOMcpA28XYsOCvuJH7WMJqXCxfqZGJf_VrDsIk,628
|
19
|
-
txt2stix/ai_extractor/prompts.py,sha256=
|
20
|
-
txt2stix/ai_extractor/utils.py,sha256=
|
19
|
+
txt2stix/ai_extractor/prompts.py,sha256=3PewwmNptHEvsG1r1Yk5rs3oaEX5Jf3BRFyGaHru6r0,8137
|
20
|
+
txt2stix/ai_extractor/utils.py,sha256=mnGIDiDa8ecwyRqDuYcKBIOiXfeQsivKxe93CfGW660,4440
|
21
21
|
txt2stix/pattern/__init__.py,sha256=K9ofaP2AOikvzb48VSBpJZijckdqufZxSzr_kbRypLY,491
|
22
22
|
txt2stix/pattern/extractors/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
23
23
|
txt2stix/pattern/extractors/base_extractor.py,sha256=ly80rp-L40g7DbhrGiCvhPWI95-ZFMtAQUEC-fH6Y-o,6130
|
@@ -112,8 +112,8 @@ txt2stix/includes/lookups/threat_actor.txt,sha256=QfDO9maQuqKBgW_Sdd7VGv1SHZ9Ra-
|
|
112
112
|
txt2stix/includes/lookups/tld.txt,sha256=-MEgJea2NMG_KDsnc4BVvI8eRk5Dm93L-t8SGYx5wMo,8598
|
113
113
|
txt2stix/includes/lookups/tool.txt,sha256=HGKG6JpUE26w6ezzSxOjBkp15UpSaB7N-mZ_NU_3G7A,6
|
114
114
|
txt2stix/includes/tests/test_cases.yaml,sha256=QD1FdIunpPkOpsn6wJRqs2vil_hv8OSVaqUp4a96aZg,22247
|
115
|
-
txt2stix-
|
116
|
-
txt2stix-
|
117
|
-
txt2stix-
|
118
|
-
txt2stix-
|
119
|
-
txt2stix-
|
115
|
+
txt2stix-1.0.1.dist-info/METADATA,sha256=rg6DP9idqFjH6eK2FbVmHxCgk-XnQ9mNTbFg3re-dvE,12916
|
116
|
+
txt2stix-1.0.1.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
117
|
+
txt2stix-1.0.1.dist-info/entry_points.txt,sha256=x6QPtt65hWeomw4IpJ_wQUesBl1M4WOLODbhOKyWMFg,55
|
118
|
+
txt2stix-1.0.1.dist-info/licenses/LICENSE,sha256=BK8Ppqlc4pdgnNzIxnxde0taoQ1BgicdyqmBvMiNYgY,11364
|
119
|
+
txt2stix-1.0.1.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|