txt2stix 0.0.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (119) hide show
  1. txt2stix/__init__.py +33 -0
  2. txt2stix/ai_extractor/__init__.py +15 -0
  3. txt2stix/ai_extractor/anthropic.py +12 -0
  4. txt2stix/ai_extractor/base.py +87 -0
  5. txt2stix/ai_extractor/deepseek.py +19 -0
  6. txt2stix/ai_extractor/gemini.py +18 -0
  7. txt2stix/ai_extractor/openai.py +15 -0
  8. txt2stix/ai_extractor/openrouter.py +20 -0
  9. txt2stix/ai_extractor/prompts.py +164 -0
  10. txt2stix/ai_extractor/utils.py +85 -0
  11. txt2stix/attack_flow.py +101 -0
  12. txt2stix/bundler.py +428 -0
  13. txt2stix/common.py +23 -0
  14. txt2stix/extractions.py +59 -0
  15. txt2stix/includes/__init__.py +0 -0
  16. txt2stix/includes/extractions/ai/config.yaml +1023 -0
  17. txt2stix/includes/extractions/lookup/config.yaml +393 -0
  18. txt2stix/includes/extractions/pattern/config.yaml +609 -0
  19. txt2stix/includes/helpers/mimetype_filename_extension_list.csv +936 -0
  20. txt2stix/includes/helpers/stix_relationship_types.txt +41 -0
  21. txt2stix/includes/helpers/tlds.txt +1446 -0
  22. txt2stix/includes/helpers/windows_registry_key_prefix.txt +12 -0
  23. txt2stix/includes/lookups/_README.md +11 -0
  24. txt2stix/includes/lookups/_generate_lookups.py +247 -0
  25. txt2stix/includes/lookups/attack_pattern.txt +1 -0
  26. txt2stix/includes/lookups/campaign.txt +1 -0
  27. txt2stix/includes/lookups/country_iso3166_alpha2.txt +249 -0
  28. txt2stix/includes/lookups/course_of_action.txt +1 -0
  29. txt2stix/includes/lookups/disarm_id_v1_5.txt +345 -0
  30. txt2stix/includes/lookups/disarm_name_v1_5.txt +347 -0
  31. txt2stix/includes/lookups/extensions.txt +78 -0
  32. txt2stix/includes/lookups/identity.txt +1 -0
  33. txt2stix/includes/lookups/infrastructure.txt +1 -0
  34. txt2stix/includes/lookups/intrusion_set.txt +1 -0
  35. txt2stix/includes/lookups/malware.txt +2 -0
  36. txt2stix/includes/lookups/mitre_atlas_id_v4_5_2.txt +116 -0
  37. txt2stix/includes/lookups/mitre_atlas_name_v4_5_2.txt +117 -0
  38. txt2stix/includes/lookups/mitre_attack_enterprise_aliases_v16_0.txt +1502 -0
  39. txt2stix/includes/lookups/mitre_attack_enterprise_id_v16_0.txt +1656 -0
  40. txt2stix/includes/lookups/mitre_attack_enterprise_name_v16_0.txt +1765 -0
  41. txt2stix/includes/lookups/mitre_attack_ics_aliases_v16_0.txt +141 -0
  42. txt2stix/includes/lookups/mitre_attack_ics_id_v16_0.txt +254 -0
  43. txt2stix/includes/lookups/mitre_attack_ics_name_v16_0.txt +293 -0
  44. txt2stix/includes/lookups/mitre_attack_mobile_aliases_v16_0.txt +159 -0
  45. txt2stix/includes/lookups/mitre_attack_mobile_id_v16_0.txt +277 -0
  46. txt2stix/includes/lookups/mitre_attack_mobile_name_v16_0.txt +296 -0
  47. txt2stix/includes/lookups/mitre_capec_id_v3_9.txt +559 -0
  48. txt2stix/includes/lookups/mitre_capec_name_v3_9.txt +560 -0
  49. txt2stix/includes/lookups/mitre_cwe_id_v4_15.txt +939 -0
  50. txt2stix/includes/lookups/mitre_cwe_name_v4_15.txt +939 -0
  51. txt2stix/includes/lookups/threat_actor.txt +1 -0
  52. txt2stix/includes/lookups/tld.txt +1422 -0
  53. txt2stix/includes/lookups/tool.txt +1 -0
  54. txt2stix/includes/tests/test_cases.yaml +695 -0
  55. txt2stix/indicator.py +860 -0
  56. txt2stix/lookups.py +68 -0
  57. txt2stix/pattern/__init__.py +13 -0
  58. txt2stix/pattern/extractors/__init__.py +0 -0
  59. txt2stix/pattern/extractors/base_extractor.py +167 -0
  60. txt2stix/pattern/extractors/card/README.md +34 -0
  61. txt2stix/pattern/extractors/card/__init__.py +15 -0
  62. txt2stix/pattern/extractors/card/amex_card_extractor.py +52 -0
  63. txt2stix/pattern/extractors/card/diners_card_extractor.py +47 -0
  64. txt2stix/pattern/extractors/card/discover_card_extractor.py +48 -0
  65. txt2stix/pattern/extractors/card/jcb_card_extractor.py +43 -0
  66. txt2stix/pattern/extractors/card/master_card_extractor.py +63 -0
  67. txt2stix/pattern/extractors/card/union_card_extractor.py +38 -0
  68. txt2stix/pattern/extractors/card/visa_card_extractor.py +46 -0
  69. txt2stix/pattern/extractors/crypto/__init__.py +3 -0
  70. txt2stix/pattern/extractors/crypto/btc_extractor.py +38 -0
  71. txt2stix/pattern/extractors/directory/__init__.py +10 -0
  72. txt2stix/pattern/extractors/directory/unix_directory_extractor.py +40 -0
  73. txt2stix/pattern/extractors/directory/unix_file_path_extractor.py +42 -0
  74. txt2stix/pattern/extractors/directory/windows_directory_path_extractor.py +47 -0
  75. txt2stix/pattern/extractors/directory/windows_file_path_extractor.py +42 -0
  76. txt2stix/pattern/extractors/domain/__init__.py +8 -0
  77. txt2stix/pattern/extractors/domain/domain_extractor.py +39 -0
  78. txt2stix/pattern/extractors/domain/hostname_extractor.py +36 -0
  79. txt2stix/pattern/extractors/domain/sub_domain_extractor.py +49 -0
  80. txt2stix/pattern/extractors/hashes/__init__.py +16 -0
  81. txt2stix/pattern/extractors/hashes/md5_extractor.py +16 -0
  82. txt2stix/pattern/extractors/hashes/sha1_extractor.py +14 -0
  83. txt2stix/pattern/extractors/hashes/sha224_extractor.py +18 -0
  84. txt2stix/pattern/extractors/hashes/sha2_256_exactor.py +14 -0
  85. txt2stix/pattern/extractors/hashes/sha2_512_exactor.py +13 -0
  86. txt2stix/pattern/extractors/hashes/sha3_256_exactor.py +15 -0
  87. txt2stix/pattern/extractors/hashes/sha3_512_exactor.py +16 -0
  88. txt2stix/pattern/extractors/helper.py +64 -0
  89. txt2stix/pattern/extractors/ip/__init__.py +14 -0
  90. txt2stix/pattern/extractors/ip/ipv4_cidr_extractor.py +49 -0
  91. txt2stix/pattern/extractors/ip/ipv4_extractor.py +18 -0
  92. txt2stix/pattern/extractors/ip/ipv4_port_extractor.py +42 -0
  93. txt2stix/pattern/extractors/ip/ipv6_cidr_extractor.py +18 -0
  94. txt2stix/pattern/extractors/ip/ipv6_extractor.py +16 -0
  95. txt2stix/pattern/extractors/ip/ipv6_port_extractor.py +46 -0
  96. txt2stix/pattern/extractors/others/__init__.py +22 -0
  97. txt2stix/pattern/extractors/others/asn_extractor.py +14 -0
  98. txt2stix/pattern/extractors/others/cpe_extractor.py +29 -0
  99. txt2stix/pattern/extractors/others/cve_extractor.py +14 -0
  100. txt2stix/pattern/extractors/others/email_extractor.py +21 -0
  101. txt2stix/pattern/extractors/others/filename_extractor.py +17 -0
  102. txt2stix/pattern/extractors/others/iban_extractor.py +15 -0
  103. txt2stix/pattern/extractors/others/mac_address_extractor.py +13 -0
  104. txt2stix/pattern/extractors/others/phonenumber_extractor.py +41 -0
  105. txt2stix/pattern/extractors/others/user_agent_extractor.py +20 -0
  106. txt2stix/pattern/extractors/others/windows_registry_key_extractor.py +18 -0
  107. txt2stix/pattern/extractors/url/__init__.py +7 -0
  108. txt2stix/pattern/extractors/url/url_extractor.py +22 -0
  109. txt2stix/pattern/extractors/url/url_file_extractor.py +21 -0
  110. txt2stix/pattern/extractors/url/url_path_extractor.py +74 -0
  111. txt2stix/retriever.py +126 -0
  112. txt2stix/stix.py +1 -0
  113. txt2stix/txt2stix.py +336 -0
  114. txt2stix/utils.py +86 -0
  115. txt2stix-0.0.4.dist-info/METADATA +190 -0
  116. txt2stix-0.0.4.dist-info/RECORD +119 -0
  117. txt2stix-0.0.4.dist-info/WHEEL +4 -0
  118. txt2stix-0.0.4.dist-info/entry_points.txt +2 -0
  119. txt2stix-0.0.4.dist-info/licenses/LICENSE +202 -0
txt2stix/txt2stix.py ADDED
@@ -0,0 +1,336 @@
1
+ import argparse, dotenv
2
+ from datetime import datetime
3
+ import glob
4
+ import uuid
5
+ import itertools
6
+ import fnmatch
7
+ import re
8
+ from pathlib import Path
9
+ import sys, os
10
+
11
+ from pydantic import BaseModel
12
+
13
+ from txt2stix.ai_extractor.utils import DescribesIncident
14
+ from txt2stix.attack_flow import parse_flow
15
+
16
+
17
+ from .utils import Txt2StixData, remove_links
18
+
19
+ from .common import UUID_NAMESPACE, FatalException
20
+
21
+ from .bundler import txt2stixBundler, parse_stix, TLP_LEVEL
22
+ from .import extractions, lookups, pattern
23
+ from types import SimpleNamespace
24
+ import functools
25
+ from fnmatch import filter
26
+ from .ai_extractor import ALL_AI_EXTRACTORS, BaseAIExtractor, ModelError
27
+ from stix2.serialization import serialize as stix2_serialize
28
+ from stix2 import Bundle
29
+
30
+ import json, logging
31
+
32
+
33
+ def newLogger(name: str) -> logging.Logger:
34
+ # Configure logging
35
+ stream_handler = logging.StreamHandler() # Log to stdout and stderr
36
+ stream_handler.setLevel(logging.INFO)
37
+ logging.basicConfig(
38
+ level=logging.DEBUG, # Set the desired logging level
39
+ format=f"%(asctime)s [{name}] [%(levelname)s] %(message)s",
40
+ handlers=[stream_handler],
41
+ datefmt='%d-%b-%y %H:%M:%S'
42
+ )
43
+
44
+ return logging.root
45
+
46
+ def setLogFile(logger, file: Path):
47
+ file.parent.mkdir(parents=True, exist_ok=True)
48
+ logger.info(f"Saving log to `{file.absolute()}`")
49
+ handler = logging.FileHandler(file, "w")
50
+ handler.formatter = logging.Formatter(fmt='%(levelname)s %(asctime)s - %(message)s', datefmt='%d-%b-%y %H:%M:%S')
51
+ handler.setLevel(logging.DEBUG)
52
+ logger.addHandler(handler)
53
+ logger.info("=====================txt2stix======================")
54
+
55
+
56
+ MODULE_PATH = Path(__file__).parent.parent
57
+ INCLUDES_PATH = MODULE_PATH/"includes"
58
+ try:
59
+ from . import includes
60
+ INCLUDES_PATH = Path(includes.__file__).parent
61
+ except:
62
+ pass
63
+
64
+ def split_comma(s: str) -> list[str]:
65
+ return [ss for ss in s.split(",") if ss]
66
+
67
+ def range_type(min, max):
68
+ def fn(astr):
69
+ value = int(astr)
70
+ if min<= value <= max:
71
+ return value
72
+ else:
73
+ raise argparse.ArgumentTypeError(f'value {value} not in range [{min}-{max}]')
74
+ return fn
75
+
76
+ def parse_labels(labels: str) -> list[str]:
77
+ labels = labels.split(",")
78
+ for label in labels:
79
+ if not re.fullmatch(r"[a-zA-Z0-9]+", label):
80
+ raise argparse.ArgumentTypeError(f"invalid label: {label}")
81
+
82
+ return labels
83
+
84
+ def parse_extractors_globbed(type, all_extractors, names):
85
+ globbed_names = set()
86
+ for name in names.split(","):
87
+ matches = fnmatch.filter(all_extractors.keys(), name)
88
+ if not matches:
89
+ raise argparse.ArgumentTypeError(f'`{name}` has 0 matches')
90
+ globbed_names.update(matches)
91
+ filtered_extractors = {}
92
+ for extractor_name in globbed_names:
93
+ try:
94
+ extractor = all_extractors[extractor_name]
95
+ extraction_processor = filtered_extractors.get(extractor.type, {})
96
+ if extractor.type in ["lookup"]:
97
+ lookups.load_lookup(extractor)
98
+ if extractor.type == "pattern":
99
+ pattern.load_extractor(extractor)
100
+ filtered_extractors[extractor.type] = extraction_processor
101
+ extraction_processor[extractor_name] = extractor
102
+ except KeyError:
103
+ raise argparse.ArgumentTypeError(f"no such {type} slug `{extractor_name}`")
104
+ except BaseException as e:
105
+ raise argparse.ArgumentTypeError(f"{type} `{extractor_name}`: {e}")
106
+ return filtered_extractors
107
+
108
+ def parse_ref(value):
109
+ m = re.compile(r'(.+?)=(.+)').match(value)
110
+ if not m:
111
+ raise argparse.ArgumentTypeError("must be in format key=value")
112
+ return dict(source_name=m.group(1), external_id=m.group(2))
113
+
114
+ def parse_model(value: str):
115
+ splits = value.split(':', 1)
116
+ provider = splits[0]
117
+ if provider not in ALL_AI_EXTRACTORS:
118
+ raise argparse.ArgumentTypeError(f"invalid AI provider in `{value}`, must be one of {list(ALL_AI_EXTRACTORS)}")
119
+ provider = ALL_AI_EXTRACTORS[provider]
120
+
121
+ try:
122
+ if len(splits) == 2:
123
+ return provider(model=splits[1])
124
+ return provider()
125
+ except Exception as e:
126
+ raise ModelError(f"Unable to initialize model `{value}`") from e
127
+
128
+ def parse_bool(value: str):
129
+ value = value.lower()
130
+ return value in ["yes", "y", "true", "1"]
131
+
132
+ def parse_args():
133
+ EXTRACTORS_PATH = INCLUDES_PATH/"extractions"
134
+ all_extractors = extractions.parse_extraction_config(INCLUDES_PATH)
135
+
136
+ parser = argparse.ArgumentParser(description="File Conversion Tool")
137
+
138
+ inf_arg = parser.add_argument("--input_file", "--input-file", required=True, help="The file to be converted. Must be .txt", type=Path)
139
+ parser.add_argument("--ai_content_check_provider", required=False, type=parse_model, help="Use an AI model to check wether the content of the file contains threat intelligence. Paticularly useful to weed out vendor marketing.")
140
+ name_arg = parser.add_argument("--name", required=True, help="Name of the file, max 124 chars", default="stix-out")
141
+ parser.add_argument("--created", required=False, default=datetime.now(), help="Allow user to optionally pass --created time in input, which will hardcode the time used in created times")
142
+ parser.add_argument("--ai_settings_extractions", required=False, type=parse_model, help="(required if AI extraction enabled): passed in format provider:model e.g. openai:gpt4o. Can pass more than one value to get extractions from multiple providers.", metavar="provider[:model]", nargs='+')
143
+ parser.add_argument("--ai_settings_relationships", required=False, type=parse_model, help="(required if AI relationship enabled): passed in format `provider:model`. Can only pass one model at this time.", metavar="provider[:model]")
144
+ parser.add_argument("--labels", type=parse_labels)
145
+ parser.add_argument("--relationship_mode", choices=["ai", "standard"], required=True)
146
+ parser.add_argument("--report_id", type=uuid.UUID, required=False, help="id to use instead of automatically generated `{name}+{created}`", metavar="VALID_UUID")
147
+ parser.add_argument("--confidence", type=range_type(0,100), default=None, help="value between 0-100. Default if not passed is null.", metavar="[0-100]")
148
+ parser.add_argument("--tlp_level", "--tlp-level", choices=TLP_LEVEL.levels().keys(), default="clear", help="TLP level, default is clear")
149
+ parser.add_argument("--use_extractions", "--use-extractions", default={}, type=functools.partial(parse_extractors_globbed, "extractor", all_extractors), help="Specify extraction types from the default/local extractions .yaml file", metavar="EXTRACTION1,EXTRACTION2")
150
+ parser.add_argument("--use_identity", "--use-identity", help="Specify an identity file id (e.g., {\"type\":\"identity\",\"name\":\"demo\",\"identity_class\":\"system\"})", metavar="[stix2 identity json]", type=parse_stix)
151
+ parser.add_argument("--external_refs", type=parse_ref, help="pass additional `external_references` entry (or entries) to the report object created. e.g --external_ref author=dogesec link=https://dkjjadhdaj.net", default=[], metavar="{source_name}={external_id}", action="extend", nargs='+')
152
+ parser.add_argument('--ignore_image_refs', default=True, type=parse_bool)
153
+ parser.add_argument('--ignore_link_refs', default=True, type=parse_bool)
154
+ parser.add_argument("--ignore_extraction_boundary", default=False, type=parse_bool, help="default if not passed is `false`, but if set to `true` will ignore boundary capture logic for extractions")
155
+ parser.add_argument('--ai_create_attack_flow', default=False, action='store_true', help="create attack flow for attack objects in report/bundle")
156
+
157
+ args = parser.parse_args()
158
+ if not args.input_file.exists():
159
+ raise argparse.ArgumentError(inf_arg, "cannot open file")
160
+ if len(args.name) > 124:
161
+ raise argparse.ArgumentError(name_arg, "max 124 characters")
162
+
163
+ if args.relationship_mode == 'ai' and not args.ai_settings_relationships:
164
+ parser.error("relationship_mode is set to AI, --ai_settings_relationships is required")
165
+
166
+ if args.ai_create_attack_flow and not args.ai_settings_relationships:
167
+ parser.error("--ai_create_attack_flow requires --ai_settings_relationships")
168
+ #### process --use-extractions
169
+ if args.use_extractions.get('ai') and not args.ai_settings_extractions:
170
+ parser.error("ai based extractors are passed, --ai_settings_extractions is required")
171
+
172
+ args.all_extractors = all_extractors
173
+ return args
174
+
175
+ REQUIRED_ENV_VARIABLES = [
176
+ "INPUT_TOKEN_LIMIT",
177
+ "CTIBUTLER_BASE_URL",
178
+ "VULMATCH_BASE_URL",
179
+ ]
180
+ def load_env():
181
+ for env in REQUIRED_ENV_VARIABLES:
182
+ if not os.getenv(env):
183
+ raise FatalException(f"env variable `{env}` required")
184
+
185
+
186
+ def log_notes(content, type):
187
+ logging.debug(f" ========================= {type} ========================= ")
188
+ logging.debug(f" ========================= {'+'*len(type)} ========================= ")
189
+ logging.debug(json.dumps(content, sort_keys=True, indent=4))
190
+ logging.debug(f" ========================= {'-'*len(type)} ========================= ")
191
+
192
+ def extract_all(bundler: txt2stixBundler, extractors_map, text_content, ai_extractors: list[BaseAIExtractor]=[], **kwargs):
193
+ assert ai_extractors or not extractors_map.get("ai"), "There should be at least one AI extractor in ai_extractors"
194
+
195
+ text_content = "\n"+text_content+"\n"
196
+ all_extracts = dict()
197
+ if extractors_map.get("lookup"):
198
+ try:
199
+ lookup_extracts = lookups.extract_all(extractors_map["lookup"].values(), text_content)
200
+ bundler.process_observables(lookup_extracts)
201
+ all_extracts["lookup"] = lookup_extracts
202
+ except BaseException as e:
203
+ logging.exception("lookup extraction failed", exc_info=True)
204
+
205
+ if extractors_map.get("pattern"):
206
+ try:
207
+ logging.info("using pattern extractors")
208
+ pattern_extracts = pattern.extract_all(extractors_map["pattern"].values(), text_content, ignore_extraction_boundary=kwargs.get('ignore_extraction_boundary', False))
209
+ bundler.process_observables(pattern_extracts)
210
+ all_extracts["pattern"] = pattern_extracts
211
+ except BaseException as e:
212
+ logging.exception("pattern extraction failed", exc_info=True)
213
+
214
+ if extractors_map.get("ai"):
215
+ logging.info("using ai extractors")
216
+
217
+ for extractor in ai_extractors:
218
+ logging.info("running extractor: %s", extractor.extractor_name)
219
+ try:
220
+ ai_extracts = extractor.extract_objects(text_content, extractors_map["ai"].values())
221
+ ai_extracts = ai_extracts.model_dump().get('extractions', [])
222
+ bundler.process_observables(ai_extracts)
223
+ all_extracts[f"ai-{extractor.extractor_name}"] = ai_extracts
224
+ except BaseException as e:
225
+ logging.exception("AI extraction failed for %s", extractor.extractor_name, exc_info=True)
226
+
227
+ log_notes(all_extracts, "Extractions")
228
+ return all_extracts
229
+
230
+ def extract_relationships_with_ai(bundler: txt2stixBundler, text_content, all_extracts, ai_extractor_session: BaseAIExtractor):
231
+ relationships = None
232
+ try:
233
+ all_extracts = list(itertools.chain(*all_extracts.values()))
234
+ relationship_types = (INCLUDES_PATH/"helpers/stix_relationship_types.txt").read_text().splitlines()
235
+ relationships = ai_extractor_session.extract_relationships(text_content, all_extracts, relationship_types)
236
+ relationships = relationships.model_dump()
237
+ log_notes(relationships, "Relationships")
238
+ bundler.process_relationships(relationships['relationships'])
239
+ except BaseException as e:
240
+ logging.exception("Relationship processing failed: %s", e)
241
+ return relationships
242
+
243
+ def validate_token_count(max_tokens, input, extractors: list[BaseAIExtractor]):
244
+ logging.info('INPUT_TOKEN_LIMIT = %d', max_tokens)
245
+ for extractor in extractors:
246
+ token_count = _count_token(extractor, input)
247
+ if token_count > max_tokens:
248
+ raise FatalException(f"{extractor.extractor_name}: input_file token count ({token_count}) exceeds INPUT_TOKEN_LIMIT ({max_tokens})")
249
+
250
+
251
+ @functools.lru_cache
252
+ def _count_token(extractor: BaseAIExtractor, input: str):
253
+ return extractor.count_tokens(input)
254
+
255
+ def run_txt2stix(bundler: txt2stixBundler, preprocessed_text: str, extractors_map: dict,
256
+ ai_content_check_provider=None,
257
+ ai_create_attack_flow=None,
258
+ input_token_limit=10,
259
+ ai_settings_extractions=None,
260
+ ai_settings_relationships=None,
261
+ relationship_mode="standard",
262
+ ignore_extraction_boundary=False,
263
+ always_extract=False, # continue even if ai_content_check fails
264
+
265
+ **kwargs
266
+ ) -> Txt2StixData:
267
+ should_extract = True
268
+ retval = Txt2StixData.model_construct()
269
+ retval.extractions = retval.attack_flow = retval.relationships = None
270
+ if ai_content_check_provider:
271
+ logging.info("checking content")
272
+ model : BaseAIExtractor = ai_content_check_provider
273
+ validate_token_count(input_token_limit, preprocessed_text, [model])
274
+ retval.content_check = model.check_content(preprocessed_text)
275
+ should_extract = retval.content_check.describes_incident
276
+ logging.info("=== ai-check-content output ====")
277
+ logging.info(retval.content_check.model_dump_json())
278
+ for classification in retval.content_check.incident_classification:
279
+ bundler.report.labels.append(f'txt2stix:{classification}'.lower())
280
+
281
+ if should_extract or always_extract:
282
+ if extractors_map.get("ai"):
283
+ validate_token_count(input_token_limit, preprocessed_text, ai_settings_extractions)
284
+ if relationship_mode == "ai":
285
+ validate_token_count(input_token_limit, preprocessed_text, [ai_settings_relationships])
286
+
287
+ retval.extractions = extract_all(bundler, extractors_map, preprocessed_text, ai_extractors=ai_settings_extractions, ignore_extraction_boundary=ignore_extraction_boundary)
288
+ if relationship_mode == "ai" and sum(map(lambda x: len(x), retval.extractions.values())):
289
+ retval.relationships = extract_relationships_with_ai(bundler, preprocessed_text, retval.extractions, ai_settings_relationships)
290
+
291
+ if ai_create_attack_flow:
292
+ logging.info("creating attack-flow bundle")
293
+ ex: BaseAIExtractor = ai_settings_relationships
294
+ retval.attack_flow = ex.extract_attack_flow(preprocessed_text, retval.extractions, retval.relationships)
295
+ bundler.flow_objects = parse_flow(bundler.report, retval.attack_flow)
296
+
297
+ return retval
298
+
299
+ def main():
300
+ dotenv.load_dotenv()
301
+ logger = newLogger("txt2stix")
302
+ try:
303
+ args = parse_args()
304
+ job_id = args.report_id or str(uuid.uuid4())
305
+ setLogFile(logger, Path(f"logs/logs-{job_id}.log"))
306
+ logger.info(f"Arguments: {json.dumps(sys.argv[1:])}")
307
+
308
+
309
+ input_text = args.input_file.read_text()
310
+ preprocessed_text = remove_links(input_text, args.ignore_image_refs, args.ignore_link_refs)
311
+ load_env()
312
+
313
+
314
+ bundler = txt2stixBundler(args.name, args.use_identity, args.tlp_level, input_text, args.confidence, args.all_extractors, args.labels, created=args.created, report_id=args.report_id, external_references=args.external_refs)
315
+ log_notes(sys.argv, "Config")
316
+ convo_str = None
317
+
318
+ data = run_txt2stix(
319
+ bundler, preprocessed_text, args.use_extractions,
320
+ input_token_limit=int(os.environ['INPUT_TOKEN_LIMIT']),
321
+ **args.__dict__,
322
+ )
323
+
324
+ ## write outputs
325
+ out = bundler.to_json()
326
+ output_path = Path("./output")/f"{bundler.bundle.id}.json"
327
+ output_path.parent.mkdir(exist_ok=True)
328
+ output_path.write_text(out)
329
+ logger.info(f"Wrote bundle output to `{output_path}`")
330
+ data_path = Path(str(output_path).replace('bundle--', 'data--'))
331
+ data_path.write_text(data.model_dump_json(indent=4))
332
+ logger.info(f"Wrote data output to `{data_path}`")
333
+ except argparse.ArgumentError as e:
334
+ logger.exception(e, exc_info=True)
335
+ except:
336
+ raise
txt2stix/utils.py ADDED
@@ -0,0 +1,86 @@
1
+ import os
2
+ import pkgutil
3
+ import re
4
+ from pathlib import Path
5
+ from typing import Dict
6
+ from pydantic import BaseModel, Field
7
+ import bs4
8
+ import mistune
9
+ from mistune.renderers.markdown import MarkdownRenderer
10
+ from mistune.util import unescape
11
+
12
+ from txt2stix.ai_extractor.utils import AttackFlowList, DescribesIncident
13
+ class ImageLinkRemover(MarkdownRenderer):
14
+ def __init__(self, remove_links: bool=False, remove_images: bool=False):
15
+ self.remove_links = remove_links
16
+ self.remove_images = remove_images
17
+ super().__init__()
18
+
19
+ def image(self, token: dict[str, dict], state: mistune.BlockState) -> str:
20
+ if self.remove_images:
21
+ token['attrs']['url'] = ''
22
+ return super().image(token, state)
23
+
24
+ def link(self, token: dict[str, dict], state: mistune.BlockState) -> str:
25
+ if self.remove_links and token.get('type') != 'image':
26
+ token['attrs']['url'] = ''
27
+ return super().link(token, state)
28
+
29
+ def codespan(self, token: dict[str, dict], state: mistune.BlockState) -> str:
30
+ token['raw'] = unescape(token['raw'])
31
+ return super().codespan(token, state)
32
+
33
+
34
+ def block_html(self, token: Dict[str, dict], state: mistune.BlockState) -> str:
35
+ return self.inline_html(token, state) + '\n\n'
36
+
37
+ def inline_html(self, token: Dict[str, dict], state: mistune.BlockState) -> str:
38
+ raw = token['raw']
39
+ soup = bs4.BeautifulSoup(raw, 'html.parser')
40
+ if self.remove_links:
41
+ for a in soup.find_all('a'):
42
+ del a['href']
43
+ if self.remove_images:
44
+ for img in soup.find_all('img'):
45
+ del img['src']
46
+ return soup.decode()
47
+
48
+ import tldextract
49
+
50
+
51
+ class Txt2StixData(BaseModel):
52
+ content_check: DescribesIncident = Field(default=None)
53
+ extractions: dict = Field(default=None)
54
+ relationships: list[dict] = Field(default_factory=list)
55
+ attack_flow: AttackFlowList = Field(default=None)
56
+
57
+
58
+ def remove_links(input_text: str, remove_images: bool, remove_anchors: bool):
59
+ modify_links = mistune.create_markdown(escape=False, renderer=ImageLinkRemover(remove_links=remove_anchors, remove_images=remove_images))
60
+ return modify_links(input_text)
61
+
62
+ def read_included_file(path):
63
+ try:
64
+ return pkgutil.get_data("txt2stix.includes", path).decode()
65
+ except:
66
+ return (Path("includes")/path).read_text()
67
+
68
+ def validate_tld(domain: str):
69
+ _, _, suffix = domain.lower().rpartition('.')
70
+ return suffix in TLDs
71
+
72
+ def validate_reg_key(reg_key: str):
73
+ reg_key = reg_key.lower()
74
+ for prefix in REGISTRY_PREFIXES:
75
+ if reg_key.startswith(prefix):
76
+ return True
77
+ return False
78
+
79
+ def validate_file_mimetype(file_name):
80
+ _, ext = os.path.splitext(file_name)
81
+ return FILE_EXTENSIONS.get(ext)
82
+
83
+
84
+ TLDs = [tld.lower() for tld in read_included_file('helpers/tlds.txt').splitlines()]
85
+ REGISTRY_PREFIXES = [key.lower() for key in read_included_file('helpers/windows_registry_key_prefix.txt').splitlines()]
86
+ FILE_EXTENSIONS = dict(line.lower().split(',') for line in read_included_file('helpers/mimetype_filename_extension_list.csv').splitlines())
@@ -0,0 +1,190 @@
1
+ Metadata-Version: 2.4
2
+ Name: txt2stix
3
+ Version: 0.0.4
4
+ Summary: txt2stix is a Python script that is designed to identify and extract IoCs and TTPs from text files, identify the relationships between them, convert them to STIX 2.1 objects, and output as a STIX 2.1 bundle.
5
+ Project-URL: Homepage, https://github.com/muchdogesec/txt2stix
6
+ Project-URL: Issues, https://github.com/muchdogesec/txt2stix/issues
7
+ Author-email: DOGESEC <support@dogesec.com>
8
+ License-File: LICENSE
9
+ Classifier: License :: OSI Approved :: Apache Software License
10
+ Classifier: Operating System :: OS Independent
11
+ Classifier: Programming Language :: Python :: 3
12
+ Requires-Python: >=3.9
13
+ Requires-Dist: base58>=2.1.1
14
+ Requires-Dist: beautifulsoup4>=4.12.3
15
+ Requires-Dist: llama-index-core>=0.12.42
16
+ Requires-Dist: llama-index-llms-anthropic>=0.7.2
17
+ Requires-Dist: llama-index-llms-deepseek>=0.1.2
18
+ Requires-Dist: llama-index-llms-gemini>=0.5.0
19
+ Requires-Dist: llama-index-llms-openai-like>=0.4.0
20
+ Requires-Dist: llama-index-llms-openai>=0.4.5
21
+ Requires-Dist: llama-index-llms-openrouter>=0.3.2
22
+ Requires-Dist: mistune>=3.0.2
23
+ Requires-Dist: pathvalidate>=3.2.0
24
+ Requires-Dist: phonenumbers>=8.13.39
25
+ Requires-Dist: python-dotenv>=1.0.1
26
+ Requires-Dist: requests>=2.32.4
27
+ Requires-Dist: schwifty>=2024.6.1
28
+ Requires-Dist: stix2extensions
29
+ Requires-Dist: tld>=0.13
30
+ Requires-Dist: tldextract>=5.1.2
31
+ Requires-Dist: validators>=0.28.3
32
+ Provides-Extra: tests
33
+ Requires-Dist: pytest; extra == 'tests'
34
+ Requires-Dist: pytest-cov; extra == 'tests'
35
+ Requires-Dist: pytest-subtests; extra == 'tests'
36
+ Requires-Dist: requests; extra == 'tests'
37
+ Description-Content-Type: text/markdown
38
+
39
+ # txt2stix
40
+
41
+ [![codecov](https://codecov.io/gh/muchdogesec/txt2stix/graph/badge.svg?token=KTHAIYBH1I)](https://codecov.io/gh/muchdogesec/txt2stix)
42
+
43
+ ## Before you begin...
44
+
45
+ We have build two products on-top of txt2stix that provide more user-friendly experience:
46
+
47
+ * [Stixify: Extract machine readable cyber threat intelligence from unstructured data](https://github.com/muchdogesec/stixify)
48
+ * [Obstracts: Turn any blog into structured threat intelligence](https://github.com/muchdogesec/obstracts)
49
+
50
+ ## Overview
51
+
52
+ ![txt2stix](docs/txt2stix.png)
53
+
54
+ txt2stix is a Python script that is designed to identify and extract IoCs and TTPs from text files, identify the relationships between them, convert them to STIX 2.1 objects, and output as a STIX 2.1 bundle.
55
+
56
+ The general design goal of txt2stix was to keep it flexible, but simple, so that new extractions could be added or modified over time.
57
+
58
+ In short txt2stix;
59
+
60
+ 1. takes a txt file input
61
+ 2. extracts observables for enabled extractions (ai, pattern, or lookup)
62
+ 3. converts extracted observables to STIX 2.1 objects
63
+ 4. generates the relationships between extracted observables (ai, standard)
64
+ 5. converts extracted relationships to STIX 2.1 SRO objects
65
+ 6. outputs a STIX 2.1 bundle
66
+
67
+ ## tl;dr
68
+
69
+ [![txt2stix](https://img.youtube.com/vi/TWVGCou9oGk/0.jpg)](https://www.youtube.com/watch?v=TWVGCou9oGk)
70
+
71
+ [Watch the demo](https://www.youtube.com/watch?v=TWVGCou9oGk).
72
+
73
+ ## Usage
74
+
75
+ ### Setup
76
+
77
+ Install the required dependencies using:
78
+
79
+ ```shell
80
+ # clone the latest code
81
+ git clone https://github.com/muchdogesec/txt2stix
82
+ cd txt2stix
83
+ # create a venv
84
+ python3 -m venv txt2stix-venv
85
+ source txt2stix-venv/bin/activate
86
+ # install requirements
87
+ pip3 install .
88
+ ```
89
+
90
+ ### Set variables
91
+
92
+ txt2stix has various settings that are defined in an `.env` file.
93
+
94
+ To create a template for the file:
95
+
96
+ ```shell
97
+ cp .env.example .env
98
+ ```
99
+
100
+ To see more information about how to set the variables, and what they do, read the `.env.markdown` file.
101
+
102
+ ### Usage
103
+
104
+ ```shell
105
+ python3 txt2stix.py \
106
+ --relationship_mode MODE \
107
+ --input_file FILE.txt \
108
+ ...
109
+ ```
110
+
111
+ The following arguments are available:
112
+
113
+ #### Input settings
114
+
115
+ * `--input_file` (REQUIRED): the file to be converted. Must be `.txt`
116
+
117
+ #### STIX Report generation settings
118
+
119
+
120
+ * `--name` (REQUIRED): name of file, max 72 chars. Will be used in the STIX Report Object created.
121
+ * `--report_id` (OPTIONAL): Sometimes it is required to control the id of the `report` object generated. You can therefore pass a valid UUIDv4 in this field to be assigned to the report. e.g. passing `2611965-930e-43db-8b95-30a1e119d7e2` would create a STIX object id `report--2611965-930e-43db-8b95-30a1e119d7e2`. If this argument is not passed, the UUID will be randomly generated.
122
+ * `--tlp_level` (OPTIONAL): Options are `clear`, `green`, `amber`, `amber_strict`, `red`. Default if not passed, is `clear`.
123
+ * `--confidence` (OPTIONAL): value between 0-100. Default if not passed is null.
124
+ * `--labels` (OPTIONAL): comma seperated list of labels. Case-insensitive (will all be converted to lower-case). Allowed `a-z`, `0-9`. e.g.`label1,label2` would create 2 labels.
125
+ * `--created` (OPTIONAL): by default all object `created` times will take the time the script was run. If you want to explicitly set these times you can do so using this flag. Pass the value in the format `YYYY-MM-DDTHH:MM:SS.sssZ` e.g. `2020-01-01T00:00:00.000Z`
126
+ * `--use_identity` (OPTIONAL): can pass a full STIX 2.1 identity object (make sure to properly escape). Will be validated by the STIX2 library.
127
+ * `--external_refs` (OPTIONAL): txt2stix will automatically populate the `external_references` of the report object it creates for the input. You can use this value to add additional objects to `external_references`. Note, you can only add `source_name` and `external_id` values currently. Pass as `source_name=external_id`. e.g. `--external_refs txt2stix=demo1 source=id` would create the following objects under the `external_references` property: `{"source_name":"txt2stix","external_id":"demo1"},{"source_name":"source","external_id":"id"}`
128
+
129
+ #### Output settings
130
+
131
+ How the extractions are performed
132
+
133
+ * `--use_extractions` (REQUIRED): if you only want to use certain extraction types, you can pass their slug found in either `includes/ai/config.yaml`, `includes/lookup/config.yaml` `includes/pattern/config.yaml` (e.g. `pattern_ipv4_address_only`). Default if not passed, no extractions applied. You can also pass a catch all wildcard `*` which will match all extraction paths (e.g. `'pattern_*'` would run all extractions starting with `pattern_` -- make sure to use quotes when using a wildcard)
134
+ * Important: if using any AI extractions (`ai_*`), you must set an AI API key in your `.env` file
135
+ * Important: if you are using any MITRE ATT&CK, CAPEC, CWE, ATLAS or Location extractions you must set `CTIBUTLER` or NVD CPE or CVE extractions you must set `VULMATCH` settings in your `.env` file
136
+ * `--relationship_mode` (REQUIRED): either.
137
+ * `ai`: AI provider must be enabled. extractions performed by either regex or AI for extractions user selected. Rich relationships created from AI provider from extractions.
138
+ * `standard`: extractions performed by either regex or AI (AI provider must be enabled) for extractions user selected. Basic relationships created from extractions back to master Report object generated.
139
+ * `--ignore_extraction_boundary` (OPTIONAL, default `false`, not compatible with AI extractions): in some cases the same string will create multiple extractions depending on extractions set (e.g. `https://www.google.com/file.txt` could create a url, url with file, domain, subdomain, and file). The default behaviour is for txt2stix to take the longest extraction and ignore everything else (e.g. only extract url with file, and ignore url, file, domain, subdomain, and file). If you want to override this behaviour and get all extractions in the output, set this flag to `true`.
140
+ * `--ignore_image_refs` (default `true`): images references in documents don't usually need extracting. e.g. `<img src="https://example.com/image.png" alt="something">` you would not want domain or file extractions extracting `example.com` and `image.png`. Hence these are ignored by default (they are removed from text sent to extraction). Note, only the `img src` is ignored, all other values e.g. `alt` are considered. If you want extractions to consider this data, set it to `false`
141
+ * `--ignore_link_refs` (default `true`): link references in documents don't usually need extracting e.g. `<a href="https://example.com/link.html" title="something">Bad Actor</a>` you would only want `Bad actor` to be considered for extraction. Hence these part of the link are ignored by default (they are removed from text sent to extraction). Note, only the `a href` is ignored, all other values e.g. `title` are considered. Setting this to `false` will also include everything inside the link tag (e.g. `example.com` would extract as a domain)
142
+
143
+ #### AI settings
144
+
145
+ If any AI extractions, or AI relationship mode is set, you must set the following accordingly
146
+
147
+ * `--ai_settings_extractions`:
148
+ * defines the `provider:model` to be used for extractions. You can supply more than one provider. Seperate with a space (e.g. `openrouter:openai/gpt-4o` `openrouter:deepseek/deepseek-chat`) If more than one provider passed, txt2stix will take extractions from all models, de-dupelicate them, and them package them in the output. Currently supports:
149
+ * Provider (env var required `OPENROUTER_API_KEY`): `openrouter:`, providers/models `openai/gpt-4o`, `deepseek/deepseek-chat` ([More here](https://openrouter.ai/models))
150
+ * Provider (env var required `OPENAI_API_KEY`): `openai:`, models e.g.: `gpt-4o`, `gpt-4o-mini`, `gpt-4-turbo`, `gpt-4` ([More here](https://platform.openai.com/docs/models))
151
+ * Provider (env var required `ANTHROPIC_API_KEY`): `anthropic:`, models e.g.: `claude-3-5-sonnet-latest`, `claude-3-5-haiku-latest`, `claude-3-opus-latest` ([More here](https://docs.anthropic.com/en/docs/about-claude/models))
152
+ * Provider (env var required `GOOGLE_API_KEY`): `gemini:models/`, models: `gemini-1.5-pro-latest`, `gemini-1.5-flash-latest` ([More here](https://ai.google.dev/gemini-api/docs/models/gemini))
153
+ * Provider (env var required `DEEPSEEK_API_KEY`): `deepseek:`, models `deepseek-chat` ([More here](https://api-docs.deepseek.com/quick_start/pricing))
154
+ * See `tests/manual-tests/cases-ai-extraction-type.md` for some examples
155
+ * `--ai_settings_relationships`:
156
+ * similar to `ai_settings_extractions` but defines the model used to generate relationships. Only one model can be provided. Passed in same format as `ai_settings_extractions`
157
+ * See `tests/manual-tests/cases-ai-relationships.md` for some examples
158
+ * `--ai_content_check_provider`: Passing this flag will get the AI to try and classify the text in the input to 1) determine if it is talking about threat intelligence, and 2) what type of threat intelligence it is talking about. For context, we use this to filter out non-threat intel posts in Obstracts and Stixify. You pass `provider:model` with this flag to determine the AI model you wish to use to perform the check.
159
+ * `--ai_create_attack_flow`: passing this flag will also prompt the AI model (the same entered for `--ai_settings_relationships`) to generate an [Attack Flow](https://center-for-threat-informed-defense.github.io/attack-flow/) for the MITRE ATT&CK extractions to define the logical order in which they are being described. You must pass `--ai_settings_relationships` for this to work.
160
+
161
+ ## Adding new extractions
162
+
163
+ It is very likely you'll want to extend txt2stix to include new extractions to;
164
+
165
+ * Add a new lookup extraction: add your lookup to `includes/lookups` as a `.txt` file. Lookups should be a list of items seperated by new lines to be searched for in documents. Once this is added, update `includes/extractions/lookup/config.yaml` with a new record pointing to your lookup. You can now use this lookup time at script run-time.
166
+ * Add a new AI extraction: Edit `includes/extractions/ai/config.yaml` with a new record for your extraction. You can craft the prompt used in the config to control how the LLM performs the extraction.
167
+
168
+ Currently it is not possible to easily add any other types of extractions (without modifying the logic at a code level).
169
+
170
+ ## Detailed documentation
171
+
172
+ If you would like to understand how txt2stix works in more detail, please refer to the documentation in `/docs/README.md`.
173
+
174
+ This documentation is paticularly helpful to read for those of you wanting to add your own custom extractions.
175
+
176
+ ## Useful supporting tools
177
+
178
+ * [A Quick Start Guide to txt2stix](https://www.dogesec.com/blog/txt2stix_quickstart_guide/)
179
+ * [An example of how to use txt2stix with Attack Flows](https://www.dogesec.com/blog/understading_structure_attack_flows/)
180
+ * [STIX2 Python Library](https://pypi.org/project/stix2/): APIs for serializing and de-serializing STIX2 JSON content
181
+ * [STIX 2 Pattern Validator](https://pypi.org/project/stix2-patterns/): a tool for checking the syntax of the Cyber Threat Intelligence (CTI) STIX Pattern expressions
182
+ * [STIX Viewer](https://github.com/traut/stixview): Quickly load bundles produced from your report
183
+
184
+ ## Support
185
+
186
+ [Minimal support provided via the DOGESEC community](https://community.dogesec.com/).
187
+
188
+ ## License
189
+
190
+ [Apache 2.0](/LICENSE).