secator 0.22.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (150) hide show
  1. secator/.gitignore +162 -0
  2. secator/__init__.py +0 -0
  3. secator/celery.py +453 -0
  4. secator/celery_signals.py +138 -0
  5. secator/celery_utils.py +320 -0
  6. secator/cli.py +2035 -0
  7. secator/cli_helper.py +395 -0
  8. secator/click.py +87 -0
  9. secator/config.py +670 -0
  10. secator/configs/__init__.py +0 -0
  11. secator/configs/profiles/__init__.py +0 -0
  12. secator/configs/profiles/aggressive.yaml +8 -0
  13. secator/configs/profiles/all_ports.yaml +7 -0
  14. secator/configs/profiles/full.yaml +31 -0
  15. secator/configs/profiles/http_headless.yaml +7 -0
  16. secator/configs/profiles/http_record.yaml +8 -0
  17. secator/configs/profiles/insane.yaml +8 -0
  18. secator/configs/profiles/paranoid.yaml +8 -0
  19. secator/configs/profiles/passive.yaml +11 -0
  20. secator/configs/profiles/polite.yaml +8 -0
  21. secator/configs/profiles/sneaky.yaml +8 -0
  22. secator/configs/profiles/tor.yaml +5 -0
  23. secator/configs/scans/__init__.py +0 -0
  24. secator/configs/scans/domain.yaml +31 -0
  25. secator/configs/scans/host.yaml +23 -0
  26. secator/configs/scans/network.yaml +30 -0
  27. secator/configs/scans/subdomain.yaml +27 -0
  28. secator/configs/scans/url.yaml +19 -0
  29. secator/configs/workflows/__init__.py +0 -0
  30. secator/configs/workflows/cidr_recon.yaml +48 -0
  31. secator/configs/workflows/code_scan.yaml +29 -0
  32. secator/configs/workflows/domain_recon.yaml +46 -0
  33. secator/configs/workflows/host_recon.yaml +95 -0
  34. secator/configs/workflows/subdomain_recon.yaml +120 -0
  35. secator/configs/workflows/url_bypass.yaml +15 -0
  36. secator/configs/workflows/url_crawl.yaml +98 -0
  37. secator/configs/workflows/url_dirsearch.yaml +62 -0
  38. secator/configs/workflows/url_fuzz.yaml +68 -0
  39. secator/configs/workflows/url_params_fuzz.yaml +66 -0
  40. secator/configs/workflows/url_secrets_hunt.yaml +23 -0
  41. secator/configs/workflows/url_vuln.yaml +91 -0
  42. secator/configs/workflows/user_hunt.yaml +29 -0
  43. secator/configs/workflows/wordpress.yaml +38 -0
  44. secator/cve.py +718 -0
  45. secator/decorators.py +7 -0
  46. secator/definitions.py +168 -0
  47. secator/exporters/__init__.py +14 -0
  48. secator/exporters/_base.py +3 -0
  49. secator/exporters/console.py +10 -0
  50. secator/exporters/csv.py +37 -0
  51. secator/exporters/gdrive.py +123 -0
  52. secator/exporters/json.py +16 -0
  53. secator/exporters/table.py +36 -0
  54. secator/exporters/txt.py +28 -0
  55. secator/hooks/__init__.py +0 -0
  56. secator/hooks/gcs.py +80 -0
  57. secator/hooks/mongodb.py +281 -0
  58. secator/installer.py +694 -0
  59. secator/loader.py +128 -0
  60. secator/output_types/__init__.py +49 -0
  61. secator/output_types/_base.py +108 -0
  62. secator/output_types/certificate.py +78 -0
  63. secator/output_types/domain.py +50 -0
  64. secator/output_types/error.py +42 -0
  65. secator/output_types/exploit.py +58 -0
  66. secator/output_types/info.py +24 -0
  67. secator/output_types/ip.py +47 -0
  68. secator/output_types/port.py +55 -0
  69. secator/output_types/progress.py +36 -0
  70. secator/output_types/record.py +36 -0
  71. secator/output_types/stat.py +41 -0
  72. secator/output_types/state.py +29 -0
  73. secator/output_types/subdomain.py +45 -0
  74. secator/output_types/tag.py +69 -0
  75. secator/output_types/target.py +38 -0
  76. secator/output_types/url.py +112 -0
  77. secator/output_types/user_account.py +41 -0
  78. secator/output_types/vulnerability.py +101 -0
  79. secator/output_types/warning.py +30 -0
  80. secator/report.py +140 -0
  81. secator/rich.py +130 -0
  82. secator/runners/__init__.py +14 -0
  83. secator/runners/_base.py +1240 -0
  84. secator/runners/_helpers.py +218 -0
  85. secator/runners/celery.py +18 -0
  86. secator/runners/command.py +1178 -0
  87. secator/runners/python.py +126 -0
  88. secator/runners/scan.py +87 -0
  89. secator/runners/task.py +81 -0
  90. secator/runners/workflow.py +168 -0
  91. secator/scans/__init__.py +29 -0
  92. secator/serializers/__init__.py +8 -0
  93. secator/serializers/dataclass.py +39 -0
  94. secator/serializers/json.py +45 -0
  95. secator/serializers/regex.py +25 -0
  96. secator/tasks/__init__.py +8 -0
  97. secator/tasks/_categories.py +487 -0
  98. secator/tasks/arjun.py +113 -0
  99. secator/tasks/arp.py +53 -0
  100. secator/tasks/arpscan.py +70 -0
  101. secator/tasks/bbot.py +372 -0
  102. secator/tasks/bup.py +118 -0
  103. secator/tasks/cariddi.py +193 -0
  104. secator/tasks/dalfox.py +87 -0
  105. secator/tasks/dirsearch.py +84 -0
  106. secator/tasks/dnsx.py +186 -0
  107. secator/tasks/feroxbuster.py +93 -0
  108. secator/tasks/ffuf.py +135 -0
  109. secator/tasks/fping.py +85 -0
  110. secator/tasks/gau.py +102 -0
  111. secator/tasks/getasn.py +60 -0
  112. secator/tasks/gf.py +36 -0
  113. secator/tasks/gitleaks.py +96 -0
  114. secator/tasks/gospider.py +84 -0
  115. secator/tasks/grype.py +109 -0
  116. secator/tasks/h8mail.py +75 -0
  117. secator/tasks/httpx.py +167 -0
  118. secator/tasks/jswhois.py +36 -0
  119. secator/tasks/katana.py +203 -0
  120. secator/tasks/maigret.py +87 -0
  121. secator/tasks/mapcidr.py +42 -0
  122. secator/tasks/msfconsole.py +179 -0
  123. secator/tasks/naabu.py +85 -0
  124. secator/tasks/nmap.py +487 -0
  125. secator/tasks/nuclei.py +151 -0
  126. secator/tasks/search_vulns.py +225 -0
  127. secator/tasks/searchsploit.py +109 -0
  128. secator/tasks/sshaudit.py +299 -0
  129. secator/tasks/subfinder.py +48 -0
  130. secator/tasks/testssl.py +283 -0
  131. secator/tasks/trivy.py +130 -0
  132. secator/tasks/trufflehog.py +240 -0
  133. secator/tasks/urlfinder.py +100 -0
  134. secator/tasks/wafw00f.py +106 -0
  135. secator/tasks/whois.py +34 -0
  136. secator/tasks/wpprobe.py +116 -0
  137. secator/tasks/wpscan.py +202 -0
  138. secator/tasks/x8.py +94 -0
  139. secator/tasks/xurlfind3r.py +83 -0
  140. secator/template.py +294 -0
  141. secator/thread.py +24 -0
  142. secator/tree.py +196 -0
  143. secator/utils.py +922 -0
  144. secator/utils_test.py +297 -0
  145. secator/workflows/__init__.py +29 -0
  146. secator-0.22.0.dist-info/METADATA +447 -0
  147. secator-0.22.0.dist-info/RECORD +150 -0
  148. secator-0.22.0.dist-info/WHEEL +4 -0
  149. secator-0.22.0.dist-info/entry_points.txt +2 -0
  150. secator-0.22.0.dist-info/licenses/LICENSE +60 -0
@@ -0,0 +1,1178 @@
1
+ import copy
2
+ import getpass
3
+ import logging
4
+ import os
5
+ import queue
6
+ import re
7
+ import shlex
8
+ import shutil
9
+ import signal
10
+ import subprocess
11
+ import sys
12
+ import threading
13
+
14
+ from time import time
15
+
16
+ import psutil
17
+ from fp.fp import FreeProxy
18
+
19
+ from secator.definitions import OPT_NOT_SUPPORTED, OPT_PIPE_INPUT, OPT_SPACE_SEPARATED
20
+ from secator.config import CONFIG
21
+ from secator.output_types import Info, Warning, Error, Stat
22
+ from secator.runners import Runner
23
+ from secator.template import TemplateLoader
24
+ from secator.utils import debug, rich_escape as _s, signal_to_name
25
+
26
+
27
+ logger = logging.getLogger(__name__)
28
+
29
+
30
+ class Command(Runner):
31
+ """Base class to execute an external command."""
32
+ # Base cmd
33
+ cmd = None
34
+
35
+ # Tags
36
+ tags = []
37
+ # Meta options
38
+ meta_opts = {}
39
+
40
+ # Additional command options
41
+ opts = {}
42
+
43
+ # Option prefix char
44
+ opt_prefix = '-'
45
+
46
+ # Option key map to transform option names
47
+ opt_key_map = {}
48
+
49
+ # Option value map to transform option values
50
+ opt_value_map = {}
51
+
52
+ # Output map to transform JSON output keys
53
+ output_map = {}
54
+
55
+ # Run in shell if True (not recommended)
56
+ shell = False
57
+
58
+ # Current working directory
59
+ cwd = None
60
+
61
+ # Output encoding
62
+ encoding = 'utf-8'
63
+
64
+ # Flag to take the input
65
+ input_flag = None
66
+
67
+ # Input path (if a file is constructed)
68
+ input_path = None
69
+
70
+ # Input chunk size
71
+ input_chunk_size = CONFIG.runners.input_chunk_size
72
+
73
+ # Flag to take a file as input
74
+ file_flag = None
75
+ file_eof_newline = False
76
+ file_copy_sudo = False
77
+
78
+ # Flag to enable output JSON
79
+ json_flag = None
80
+
81
+ # Flag to show version
82
+ version_flag = None
83
+
84
+ # Install
85
+ install_pre = None
86
+ install_post = None
87
+ install_cmd_pre = None
88
+ install_cmd = None
89
+ install_github_bin = True
90
+ github_handle = None
91
+ install_github_version_prefix = ''
92
+ install_ignore_bin = []
93
+ install_version = None
94
+
95
+ # Serializer
96
+ item_loader = None
97
+ item_loaders = []
98
+
99
+ # Hooks
100
+ hooks = [
101
+ 'on_cmd',
102
+ 'on_cmd_opts',
103
+ 'on_cmd_done',
104
+ 'on_line'
105
+ ]
106
+
107
+ # Ignore return code
108
+ ignore_return_code = False
109
+
110
+ # Sudo
111
+ requires_sudo = False
112
+
113
+ # Return code
114
+ return_code = -1
115
+
116
+ # Exit ok
117
+ exit_ok = False
118
+
119
+ # Output
120
+ output = ''
121
+
122
+ # Proxy options
123
+ proxychains = False
124
+ proxy_socks5 = False
125
+ proxy_http = False
126
+
127
+ # Profile
128
+ profile = 'io'
129
+
130
+ def __init__(self, inputs=[], **run_opts):
131
+
132
+ # Build runnerconfig on-the-fly
133
+ config = TemplateLoader(input={
134
+ 'name': self.__class__.__name__,
135
+ 'type': 'task',
136
+ 'input_types': self.input_types,
137
+ 'description': run_opts.get('description', None)
138
+ })
139
+
140
+ # Extract run opts
141
+ hooks = run_opts.pop('hooks', {})
142
+ caller = run_opts.get('caller', None)
143
+ results = run_opts.pop('results', [])
144
+ context = run_opts.pop('context', {})
145
+ node_id = context.get('node_id', None)
146
+ node_name = context.get('node_name', None)
147
+ if node_id:
148
+ config.node_id = node_id
149
+ if node_name:
150
+ config.node_name = context.get('node_name')
151
+ self.skip_if_no_inputs = run_opts.pop('skip_if_no_inputs', False)
152
+ self.enable_validators = run_opts.pop('enable_validators', True)
153
+
154
+ # Prepare validators
155
+ input_validators = []
156
+ if not self.skip_if_no_inputs:
157
+ input_validators.append(self._validate_input_nonempty)
158
+ if not caller:
159
+ input_validators.append(self._validate_chunked_input)
160
+ validators = {'validate_input': input_validators}
161
+
162
+ # Call super().__init__
163
+ super().__init__(
164
+ config=config,
165
+ inputs=inputs,
166
+ results=results,
167
+ run_opts=run_opts,
168
+ hooks=hooks,
169
+ validators=validators,
170
+ context=context)
171
+
172
+ # Cmd name
173
+ self.cmd_name = self.__class__.cmd.split(' ')[0]
174
+
175
+ # Inputs path
176
+ self.inputs_path = None
177
+
178
+ # Current working directory for cmd
179
+ self.cwd = self.run_opts.get('cwd', None)
180
+
181
+ # Print cmd
182
+ self.print_cmd = self.run_opts.get('print_cmd', False)
183
+
184
+ # Stat update
185
+ self.last_updated_stat = None
186
+
187
+ # Process
188
+ self.process = None
189
+
190
+ # Monitor thread (lazy initialization)
191
+ self.monitor_thread = None
192
+ self.monitor_stop_event = None
193
+ self.monitor_queue = None
194
+ self.process_start_time = None
195
+ # self.retry_count = 0 # TODO: remove this
196
+
197
+ # Proxy config (global)
198
+ self.proxy = self.run_opts.pop('proxy', False)
199
+ self.configure_proxy()
200
+
201
+ # Build command input
202
+ self._build_cmd_input()
203
+
204
+ # Build command
205
+ self._build_cmd()
206
+
207
+ # Run on_cmd hook
208
+ self.run_hooks('on_cmd', sub='init')
209
+
210
+ # Add sudo to command if it is required
211
+ if self.requires_sudo:
212
+ self.cmd = f'sudo {self.cmd}'
213
+
214
+ # Build item loaders
215
+ instance_func = getattr(self, 'item_loader', None)
216
+ item_loaders = self.item_loaders.copy()
217
+ if instance_func:
218
+ item_loaders.append(instance_func)
219
+ self.item_loaders = item_loaders
220
+
221
+ def _init_monitor_objects(self):
222
+ """Initialize monitor thread objects when needed (lazy initialization)."""
223
+ if self.monitor_stop_event is None:
224
+ self.monitor_stop_event = threading.Event()
225
+ if self.monitor_queue is None:
226
+ self.monitor_queue = queue.Queue()
227
+
228
+ def toDict(self):
229
+ res = super().toDict()
230
+ res.update({
231
+ 'cmd': self.cmd,
232
+ 'cwd': self.cwd,
233
+ 'return_code': self.return_code
234
+ })
235
+ return res
236
+
237
+ def needs_chunking(self, sync):
238
+ many_targets = len(self.inputs) > 1
239
+ targets_over_chunk_size = self.input_chunk_size and len(self.inputs) > self.input_chunk_size
240
+ has_file_flag = self.file_flag is not None
241
+ is_chunk = self.chunk
242
+ chunk_it = (sync and many_targets and not has_file_flag and not is_chunk) or (not sync and many_targets and targets_over_chunk_size and not is_chunk) # noqa: E501
243
+ return chunk_it
244
+
245
+ @classmethod
246
+ def delay(cls, *args, **kwargs):
247
+ # TODO: Move this to TaskBase
248
+ from secator.celery import run_command
249
+ results = kwargs.get('results', [])
250
+ kwargs['sync'] = False
251
+ name = cls.__name__
252
+ profile = cls.profile(kwargs) if callable(cls.profile) else cls.profile
253
+ return run_command.apply_async(args=[results, name] + list(args), kwargs={'opts': kwargs}, queue=profile)
254
+
255
+ @classmethod
256
+ def s(cls, *args, **kwargs):
257
+ # TODO: Move this to TaskBase
258
+ from secator.celery import run_command
259
+ profile = cls.profile(kwargs) if callable(cls.profile) else cls.profile
260
+ return run_command.s(cls.__name__, *args, opts=kwargs).set(queue=profile)
261
+
262
+ @classmethod
263
+ def si(cls, *args, results=None, **kwargs):
264
+ # TODO: Move this to TaskBase
265
+ from secator.celery import run_command
266
+ profile = cls.profile(kwargs) if callable(cls.profile) else cls.profile
267
+ return run_command.si(results or [], cls.__name__, *args, opts=kwargs).set(queue=profile)
268
+
269
+ def get_opt_value(self, opt_name, preprocess=False, process=False):
270
+ """Get option value as inputed by the user.
271
+
272
+ Args:
273
+ opt_name (str): Option name.
274
+ preprocess (bool): Preprocess the value with the option preprocessor function if it exists.
275
+ process (bool): Process the value with the option processor function if it exists.
276
+
277
+ Returns:
278
+ Any: Option value.
279
+ """
280
+ return Command._get_opt_value(
281
+ self.run_opts,
282
+ opt_name,
283
+ dict(self.opts, **self.meta_opts),
284
+ opt_aliases=self.opt_aliases,
285
+ preprocess=preprocess,
286
+ process=process)
287
+
288
+ @classmethod
289
+ def get_version_flag(cls):
290
+ if cls.version_flag == OPT_NOT_SUPPORTED:
291
+ return None
292
+ return cls.version_flag or f'{cls.opt_prefix}version'
293
+
294
+ @classmethod
295
+ def get_version_info(cls, bleeding=False):
296
+ from secator.installer import get_version_info
297
+ return get_version_info(
298
+ cls.cmd.split(' ')[0],
299
+ cls.get_version_flag(),
300
+ cls.github_handle,
301
+ cls.install_github_version_prefix,
302
+ cls.install_cmd,
303
+ cls.install_version,
304
+ bleeding=bleeding
305
+ )
306
+
307
+ @classmethod
308
+ def get_supported_opts(cls):
309
+ # TODO: Replace this with get_command_options called on the command class
310
+ def convert(d):
311
+ for k, v in d.items():
312
+ if hasattr(v, '__name__') and v.__name__ in ['str', 'int', 'float']:
313
+ d[k] = v.__name__
314
+ return d
315
+
316
+ cls_opts = copy.deepcopy(cls.opts)
317
+ opts = {k: convert(v) for k, v in cls_opts.items()}
318
+ for k, v in opts.items():
319
+ v['meta'] = cls.__name__
320
+ v['supported'] = True
321
+
322
+ cls_meta_opts = copy.deepcopy(cls.meta_opts)
323
+ meta_opts = {k: convert(v) for k, v in cls_meta_opts.items() if cls.opt_key_map.get(k) is not OPT_NOT_SUPPORTED}
324
+ for k, v in meta_opts.items():
325
+ v['meta'] = 'meta'
326
+ if cls.opt_key_map.get(k) is OPT_NOT_SUPPORTED:
327
+ v['supported'] = False
328
+ else:
329
+ v['supported'] = True
330
+ opts = dict(opts)
331
+ opts.update(meta_opts)
332
+ return opts
333
+
334
+ #---------------#
335
+ # Class methods #
336
+ #---------------#
337
+
338
+ @classmethod
339
+ def execute(cls, cmd, name=None, cls_attributes={}, run=True, **kwargs):
340
+ """Execute an ad-hoc command.
341
+
342
+ Can be used without defining an inherited class to run a command, while still enjoying all the good stuff in
343
+ this class.
344
+
345
+ Args:
346
+ cls (object): Class.
347
+ cmd (str): Command.
348
+ name (str): Printed name.
349
+ cls_attributes (dict): Class attributes.
350
+ kwargs (dict): Options.
351
+
352
+ Returns:
353
+ secator.runners.Command: instance of the Command.
354
+ """
355
+ name = name or cmd.split(' ')[0]
356
+ kwargs['print_cmd'] = not kwargs.get('quiet', False)
357
+ kwargs['print_line'] = True
358
+ kwargs['process'] = kwargs.get('process', False)
359
+ kwargs['enable_validators'] = False
360
+ cmd_instance = type(name, (Command,), {'cmd': cmd})(**kwargs)
361
+ for k, v in cls_attributes.items():
362
+ setattr(cmd_instance, k, v)
363
+ if run:
364
+ cmd_instance.run()
365
+ return cmd_instance
366
+
367
+ def configure_proxy(self):
368
+ """Configure proxy. Start with global settings like 'proxychains' or 'random', or fallback to tool-specific
369
+ proxy settings.
370
+
371
+ TODO: Move this to a subclass of Command, or to a configurable attribute to pass to derived classes as it's not
372
+ related to core functionality.
373
+ """
374
+ opt_key_map = self.opt_key_map
375
+ proxy_opt = opt_key_map.get('proxy', False)
376
+ support_proxy_opt = proxy_opt and proxy_opt != OPT_NOT_SUPPORTED
377
+ proxychains_flavor = getattr(self, 'proxychains_flavor', CONFIG.http.proxychains_command)
378
+ proxy = False
379
+
380
+ if self.proxy in ['auto', 'proxychains'] and self.proxychains:
381
+ self.cmd = f'{proxychains_flavor} {self.cmd}'
382
+ proxy = 'proxychains'
383
+
384
+ elif self.proxy and support_proxy_opt:
385
+ if self.proxy in ['auto', 'socks5'] and self.proxy_socks5 and CONFIG.http.socks5_proxy:
386
+ proxy = CONFIG.http.socks5_proxy
387
+ elif self.proxy in ['auto', 'http'] and self.proxy_http and CONFIG.http.http_proxy:
388
+ proxy = CONFIG.http.http_proxy
389
+ elif self.proxy == 'random' and self.proxy_http:
390
+ proxy = FreeProxy(timeout=CONFIG.http.freeproxy_timeout, rand=True, anonym=True).get()
391
+ elif self.proxy.startswith(('http://', 'socks5://')):
392
+ proxy = self.proxy
393
+
394
+ if proxy != 'proxychains':
395
+ self.run_opts['proxy'] = proxy
396
+
397
+ if proxy != 'proxychains' and self.proxy and not proxy:
398
+ warning = Warning(message=rf'Ignoring proxy "{self.proxy}" (reason: not supported) \[[bold yellow3]{self.unique_name}[/]]') # noqa: E501
399
+ self._print(repr(warning))
400
+
401
+ #----------#
402
+ # Internal #
403
+ #----------#
404
+ def yielder(self):
405
+ """Run command and yields its output in real-time. Also saves the command line, return code and output to the
406
+ database.
407
+
408
+ Args:
409
+ cmd (str): Command to run.
410
+ cwd (str, Optional): Working directory to run from.
411
+ shell (bool, Optional): Run command in a shell.
412
+ history_file (str): History file path.
413
+ mapper_func (Callable, Optional): Function to map output before yielding.
414
+ encoding (str, Optional): Output encoding.
415
+ ctx (dict, Optional): Scan context.
416
+
417
+ Yields:
418
+ str: Command stdout / stderr.
419
+ dict: Serialized object.
420
+ """
421
+ try:
422
+
423
+ # Abort if it has children tasks
424
+ if self.has_children:
425
+ return
426
+
427
+ # Abort if dry run
428
+ if self.dry_run:
429
+ self.print_description()
430
+ self.print_command()
431
+ yield Info(message=self.cmd)
432
+ return
433
+
434
+ # Abort if no inputs
435
+ if len(self.inputs) == 0 and self.skip_if_no_inputs and not self.default_inputs:
436
+ self.print_description()
437
+ self.print_command()
438
+ self.add_result(Warning(message=f'{self.unique_name} skipped (no inputs)'), print=False)
439
+ for item in self.warnings:
440
+ self._print_item(item)
441
+ for item in self.errors:
442
+ self._print_item(item)
443
+ self.skipped = True
444
+ return
445
+
446
+ # Print command
447
+ self.print_description()
448
+ self.print_command()
449
+
450
+ # Check for sudo requirements and prepare the password if needed
451
+ sudo_required = re.search(r'\bsudo\b', self.cmd)
452
+ sudo_password = None
453
+ if sudo_required:
454
+ sudo_password, error = self._prompt_sudo(self.cmd)
455
+ if error:
456
+ yield Error(message=error)
457
+ return
458
+
459
+ # Prepare cmds
460
+ command = self.cmd if self.shell else shlex.split(self.cmd)
461
+
462
+ # Check command is installed and auto-install
463
+ if not self.no_process and not self.is_installed():
464
+ if CONFIG.security.auto_install_commands:
465
+ from secator.installer import ToolInstaller
466
+ yield Info(message=f'Command {self.name} is missing but auto-installing since security.autoinstall_commands is set') # noqa: E501
467
+ status = ToolInstaller.install(self.__class__)
468
+ if not status.is_ok():
469
+ yield Error(message=f'Failed installing {self.cmd_name}')
470
+ return
471
+
472
+ # Output and results
473
+ self.return_code = 0
474
+ self.killed = False
475
+ self.memory_limit_mb = CONFIG.celery.task_memory_limit_mb
476
+
477
+ # Run the command using subprocess
478
+ env = os.environ
479
+ self.process = subprocess.Popen(
480
+ command,
481
+ stdin=subprocess.PIPE if sudo_password else None,
482
+ stdout=subprocess.PIPE,
483
+ stderr=subprocess.STDOUT,
484
+ universal_newlines=True,
485
+ preexec_fn=os.setsid if not sudo_required else None,
486
+ shell=self.shell,
487
+ env=env,
488
+ cwd=self.cwd)
489
+
490
+ # Initialize monitor objects and start monitor thread
491
+ self._init_monitor_objects()
492
+ self.process_start_time = time()
493
+ self.monitor_stop_event.clear()
494
+ self.monitor_thread = threading.Thread(target=self._monitor_process, daemon=True)
495
+ self.monitor_thread.start()
496
+
497
+ # If sudo password is provided, send it to stdin
498
+ if sudo_password:
499
+ self.process.stdin.write(f"{sudo_password}\n")
500
+ self.process.stdin.flush()
501
+
502
+ # Process the output in real-time
503
+ for line in iter(lambda: self.process.stdout.readline(), b''):
504
+ # sleep(0) # for async to give up control
505
+ if not line:
506
+ break
507
+ yield from self.process_line(line)
508
+ yield from self.process_monitor_queue()
509
+
510
+ # Run hooks after cmd has completed successfully
511
+ result = self.run_hooks('on_cmd_done', sub='end')
512
+ if result:
513
+ yield from result
514
+
515
+ except FileNotFoundError as e:
516
+ yield from self.handle_file_not_found(e)
517
+
518
+ except BaseException as e:
519
+ self.debug(f'{self.unique_name}: {type(e).__name__}.', sub='end')
520
+ self.stop_process()
521
+ yield Error.from_exception(e)
522
+
523
+ finally:
524
+ yield from self._wait_for_end()
525
+
526
+ def is_installed(self):
527
+ """Check if a command is installed by using `which`.
528
+
529
+ Args:
530
+ command (str): The command to check.
531
+
532
+ Returns:
533
+ bool: True if the command is installed, False otherwise.
534
+ """
535
+ cmd = ["which", self.cmd_name]
536
+ if self.requires_sudo:
537
+ cmd = ["sudo"] + cmd
538
+ result = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
539
+ result.communicate()
540
+ return result.returncode == 0
541
+
542
+ def process_line(self, line):
543
+ """Process a single line of output emitted on stdout / stderr and yield results."""
544
+
545
+ # Strip line endings
546
+ line = line.rstrip()
547
+
548
+ # Some commands output ANSI text, so we need to remove those ANSI chars
549
+ if self.encoding == 'ansi':
550
+ ansi_escape = re.compile(r'\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])')
551
+ line = ansi_escape.sub('', line)
552
+ line = line.replace('\\x0d\\x0a', '\n')
553
+
554
+ # Run on_line hooks
555
+ line = self.run_hooks('on_line', line, sub='line.process')
556
+ if line is None:
557
+ return
558
+
559
+ # Yield line if no items were yielded
560
+ yield line
561
+
562
+ # Run item_loader to try parsing as dict
563
+ for item in self.run_item_loaders(line):
564
+ yield item
565
+
566
+ # Skip rest of iteration (no process mode)
567
+ if self.no_process:
568
+ return
569
+
570
+ def process_monitor_queue(self):
571
+ """Process and yield any queued items from monitor thread."""
572
+ if self.monitor_queue is None:
573
+ return
574
+ while not self.monitor_queue.empty():
575
+ try:
576
+ monitor_item = self.monitor_queue.get_nowait()
577
+ yield monitor_item
578
+ except queue.Empty:
579
+ break
580
+
581
+ def print_description(self):
582
+ """Print description"""
583
+ if self.sync and not self.has_children and self.caller and self.description and self.print_cmd:
584
+ self._print(f'\n[bold gold3]:wrench: {self.description} [dim cyan]({self.config.name})[/][/] ...', rich=True)
585
+
586
+ def print_command(self):
587
+ """Print command."""
588
+ if self.print_cmd:
589
+ cmd_str = f':zap: {_s(self.cmd)}'
590
+ if self.sync and self.chunk and self.chunk_count:
591
+ cmd_str += f' [dim gray11]({self.chunk}/{self.chunk_count})[/]'
592
+ self._print(cmd_str, color='bold green', rich=True)
593
+ self.debug('command', obj={'cmd': self.cmd}, sub='start')
594
+ self.debug('options', obj=self.cmd_options, sub='start')
595
+
596
+ def handle_file_not_found(self, exc):
597
+ """Handle case where binary is not found.
598
+
599
+ Args:
600
+ exc (FileNotFoundError): the exception.
601
+
602
+ Yields:
603
+ secator.output_types.Error: the error.
604
+ """
605
+ self.debug('command not found', sub='end')
606
+ self.return_code = 127
607
+ if self.config.name in str(exc):
608
+ message = 'Executable not found.'
609
+ if self.install_cmd:
610
+ message += f' Install it with "secator install tools {self.config.name}".'
611
+ error = Error(message=message)
612
+ else:
613
+ error = Error.from_exception(exc)
614
+ yield error
615
+
616
+ def stop_process(self, exit_ok=False, sig=signal.SIGINT):
617
+ """Sends SIGINT to running process, if any."""
618
+ if not self.process:
619
+ return
620
+ self.debug(f'Sending signal {signal_to_name(sig)} to process {self.process.pid}.', sub='error')
621
+ if self.process and self.process.pid:
622
+ os.killpg(os.getpgid(self.process.pid), sig)
623
+ if exit_ok:
624
+ self.exit_ok = True
625
+
626
+ def _stop_monitor_thread(self):
627
+ """Stop monitor thread."""
628
+ if self.monitor_thread and self.monitor_thread.is_alive() and self.monitor_stop_event:
629
+ self.monitor_stop_event.set()
630
+ self.monitor_thread.join(timeout=2.0)
631
+
632
+ def _monitor_process(self):
633
+ """Monitor thread that checks process health and kills if necessary."""
634
+ last_stats_time = 0
635
+
636
+ while not self.monitor_stop_event.is_set():
637
+ if not self.process or not self.process.pid:
638
+ break
639
+
640
+ try:
641
+ current_time = time()
642
+ self.debug('Collecting monitor items', sub='monitor')
643
+
644
+ # Collect and queue stats at regular intervals
645
+ if (current_time - last_stats_time) >= CONFIG.runners.stat_update_frequency:
646
+ stats_items = list(self._collect_stats())
647
+ for stat_item in stats_items:
648
+ if self.monitor_queue is not None:
649
+ self.monitor_queue.put(stat_item)
650
+ last_stats_time = current_time
651
+
652
+ # Check memory usage from collected stats
653
+ if self.memory_limit_mb and self.memory_limit_mb != -1:
654
+ total_mem = sum(stat_item.extra_data.get('memory_info', {}).get('rss', 0) / 1024 / 1024 for stat_item in stats_items) # noqa: E501
655
+ if total_mem > self.memory_limit_mb:
656
+ warning = Warning(message=f'Memory limit {self.memory_limit_mb}MB exceeded (actual: {total_mem:.2f}MB)')
657
+ if self.monitor_queue is not None:
658
+ self.monitor_queue.put(warning)
659
+ self.stop_process(exit_ok=True, sig=signal.SIGTERM)
660
+ break
661
+
662
+ # Check execution time
663
+ if self.process_start_time and CONFIG.celery.task_max_timeout != -1:
664
+ elapsed_time = current_time - self.process_start_time
665
+ if elapsed_time > CONFIG.celery.task_max_timeout:
666
+ warning = Warning(message=f'Task timeout {CONFIG.celery.task_max_timeout}s exceeded')
667
+ if self.monitor_queue is not None:
668
+ self.monitor_queue.put(warning)
669
+ self.stop_process(exit_ok=True, sig=signal.SIGTERM)
670
+ break
671
+
672
+ # Check retry count
673
+ # TODO: remove this
674
+ # if CONFIG.celery.task_max_retries and self.retry_count >= CONFIG.celery.task_max_retries:
675
+ # warning = Warning(message=f'Max retries {CONFIG.celery.task_max_retries} exceeded (actual: {self.retry_count})')
676
+ # self.monitor_queue.put(warning)
677
+ # self.stop_process(exit_ok=False, sig=signal.SIGTERM)
678
+ # break
679
+
680
+ except Exception as e:
681
+ self.debug(f'Monitor thread error: {e}', sub='monitor')
682
+ warning = Warning(message=f'Monitor thread error: {e}')
683
+ if self.monitor_queue is not None:
684
+ self.monitor_queue.put(warning)
685
+ break
686
+
687
+ # Sleep for a short interval before next check (stat update frequency)
688
+ self.monitor_stop_event.wait(CONFIG.runners.stat_update_frequency)
689
+
690
+ def _collect_stats(self):
691
+ """Collect stats about the current running process, if any."""
692
+ if not self.process or not self.process.pid:
693
+ return
694
+ proc = psutil.Process(self.process.pid)
695
+ stats = Command.get_process_info(proc, children=True)
696
+ total_mem = 0
697
+ for info in stats:
698
+ name = info['name']
699
+ pid = info['pid']
700
+ cpu_percent = info['cpu_percent']
701
+ # mem_percent = info['memory_percent']
702
+ mem_rss = round(info['memory_info']['rss'] / 1024 / 1024, 2)
703
+ total_mem += mem_rss
704
+ self.debug(f'{name} {pid} {mem_rss}MB', sub='monitor')
705
+ net_conns = info.get('net_connections') or []
706
+ extra_data = {k: v for k, v in info.items() if k not in ['cpu_percent', 'memory_percent', 'net_connections']}
707
+ yield Stat(
708
+ name=name,
709
+ pid=pid,
710
+ cpu=cpu_percent,
711
+ memory=mem_rss,
712
+ memory_limit=self.memory_limit_mb,
713
+ net_conns=len(net_conns),
714
+ extra_data=extra_data
715
+ )
716
+ # self.debug(f'Total mem: {total_mem}MB, memory limit: {self.memory_limit_mb}', sub='monitor')
717
+ # if self.memory_limit_mb and self.memory_limit_mb != -1 and total_mem > self.memory_limit_mb:
718
+ # raise MemoryError(f'Memory limit {self.memory_limit_mb}MB reached for {self.unique_name}')
719
+
720
+ def stats(self, memory_limit_mb=None):
721
+ """Gather stats about the current running process, if any."""
722
+ if not self.process or not self.process.pid:
723
+ return
724
+ proc = psutil.Process(self.process.pid)
725
+ stats = Command.get_process_info(proc, children=True)
726
+ total_mem = 0
727
+ for info in stats:
728
+ name = info['name']
729
+ pid = info['pid']
730
+ cpu_percent = info['cpu_percent']
731
+ mem_percent = info['memory_percent']
732
+ mem_rss = round(info['memory_info']['rss'] / 1024 / 1024, 2)
733
+ total_mem += mem_rss
734
+ self.debug(f'process: {name} pid: {pid} memory: {mem_rss}MB', sub='stats')
735
+ net_conns = info.get('net_connections') or []
736
+ extra_data = {k: v for k, v in info.items() if k not in ['cpu_percent', 'memory_percent', 'net_connections']}
737
+ yield Stat(
738
+ name=name,
739
+ pid=pid,
740
+ cpu=cpu_percent,
741
+ memory=mem_percent,
742
+ net_conns=len(net_conns),
743
+ extra_data=extra_data
744
+ )
745
+ self.debug(f'Total mem: {total_mem}MB, memory limit: {memory_limit_mb}', sub='stats')
746
+ if memory_limit_mb and memory_limit_mb != -1 and total_mem > memory_limit_mb:
747
+ raise MemoryError(f'Memory limit {memory_limit_mb}MB reached for {self.unique_name}')
748
+
749
+ @staticmethod
750
+ def get_process_info(process, children=False):
751
+ """Get process information from psutil.
752
+
753
+ Args:
754
+ process (subprocess.Process): Process.
755
+ children (bool): Whether to gather stats about children processes too.
756
+ """
757
+ try:
758
+ data = {
759
+ k: v._asdict() if hasattr(v, '_asdict') else v
760
+ for k, v in process.as_dict().items()
761
+ if k not in ['memory_maps', 'open_files', 'environ']
762
+ }
763
+ yield data
764
+ except (psutil.Error, FileNotFoundError):
765
+ return
766
+ if children:
767
+ for subproc in process.children(recursive=True):
768
+ yield from Command.get_process_info(subproc, children=False)
769
+
770
+ def run_item_loaders(self, line):
771
+ """Run item loaders against an output line.
772
+
773
+ Args:
774
+ line (str): Output line.
775
+ """
776
+ if self.no_process:
777
+ return
778
+ for item_loader in self.item_loaders:
779
+ if (callable(item_loader)):
780
+ yield from item_loader(self, line)
781
+ elif item_loader:
782
+ name = item_loader.__class__.__name__.replace('Serializer', '').lower()
783
+ default_callback = lambda self, x: [(yield x)] # noqa: E731
784
+ callback = getattr(self, f'on_{name}_loaded', None) or default_callback
785
+ for item in item_loader.run(line):
786
+ yield from callback(self, item)
787
+
788
+ def _prompt_sudo(self, command):
789
+ """
790
+ Checks if the command requires sudo and prompts for the password if necessary.
791
+
792
+ Args:
793
+ command (str): The initial command to be executed.
794
+
795
+ Returns:
796
+ tuple: (sudo password, error).
797
+ """
798
+ sudo_password = None
799
+
800
+ # Check if sudo is required by the command
801
+ if not re.search(r'\bsudo\b', command):
802
+ return None, []
803
+
804
+ # Check if sudo can be executed without a password
805
+ try:
806
+ if subprocess.run(['sudo', '-n', 'true'], capture_output=False).returncode == 0:
807
+ return None, None
808
+ except ValueError:
809
+ self._print('[bold orange3]Could not run sudo check test.[/][bold green]Passing.[/]')
810
+
811
+ # Check if we have a tty
812
+ if not sys.stdin.isatty():
813
+ error = "No TTY detected. Sudo password prompt requires a TTY to proceed."
814
+ return -1, error
815
+
816
+ # If not, prompt the user for a password
817
+ self._print('[bold red]Please enter sudo password to continue.[/]', rich=True)
818
+ for _ in range(3):
819
+ user = getpass.getuser()
820
+ self._print(rf'\[sudo] password for {user}: ▌', rich=True)
821
+ sudo_password = getpass.getpass()
822
+ result = subprocess.run(
823
+ ['sudo', '-S', '-p', '', 'true'],
824
+ input=sudo_password + "\n",
825
+ text=True,
826
+ capture_output=True,
827
+ )
828
+ if result.returncode == 0:
829
+ return sudo_password, None # Password is correct
830
+ self._print("Sorry, try again.")
831
+ error = "Sudo password verification failed after 3 attempts."
832
+ return -1, error
833
+
834
+ def _wait_for_end(self):
835
+ """Wait for process to finish and process output and return code."""
836
+ self._stop_monitor_thread()
837
+ yield from self.process_monitor_queue()
838
+ if not self.process:
839
+ return
840
+ for line in self.process.stdout.readlines():
841
+ yield from self.process_line(line)
842
+ self.process.wait()
843
+ self.return_code = 0 if self.exit_ok else self.process.returncode
844
+ self.process.stdout.close()
845
+ self.return_code = 0 if self.ignore_return_code else self.return_code
846
+ self.output = self.output.strip()
847
+ self.killed = self.return_code == -2 or self.killed
848
+ self.debug(f'return code: {self.return_code}', sub='end')
849
+
850
+ if self.killed:
851
+ error = 'Process was killed manually (CTRL+C / CTRL+X)'
852
+ yield Error(message=error)
853
+
854
+ elif self.return_code != 0:
855
+ error = f'Command failed with return code {self.return_code}'
856
+ last_lines = self.output.split('\n')
857
+ last_lines = last_lines[max(0, len(last_lines) - 2):]
858
+ last_lines = [line for line in last_lines if line != '']
859
+ yield Error(message=error, traceback='\n'.join(last_lines), traceback_title='Last stdout lines')
860
+
861
+ @staticmethod
862
+ def _process_opts(
863
+ opts,
864
+ opts_conf,
865
+ opt_key_map={},
866
+ opt_value_map={},
867
+ opt_prefix='-',
868
+ opt_aliases=None,
869
+ preprocess=False,
870
+ process=True):
871
+ """Process a dict of options using a config, option key map / value map and option character like '-' or '--'.
872
+
873
+ Args:
874
+ opts (dict): Command options as input on the CLI.
875
+ opts_conf (dict): Options config (Click options definition).
876
+ opt_key_map (dict[str, str | Callable]): A dict to map option key with their actual values.
877
+ opt_value_map (dict, str | Callable): A dict to map option values with their actual values.
878
+ opt_prefix (str, default: '-'): Option prefix.
879
+ opt_aliases (str | None, default: None): Aliases to try.
880
+ preprocess (bool, default: True): Preprocess the value with the option preprocessor function if it exists.
881
+ process (bool, default: True): Process the value with the option processor function if it exists.
882
+
883
+ Returns:
884
+ dict: Processed options dict.
885
+ """
886
+ opts_dict = {}
887
+ for opt_name, opt_conf in opts_conf.items():
888
+ debug('before get_opt_value', obj={'name': opt_name, 'conf': opt_conf}, obj_after=False, sub='init.options', verbose=True) # noqa: E501
889
+
890
+ # Save original opt name
891
+ original_opt_name = opt_name
892
+
893
+ # Copy opt conf
894
+ conf = opt_conf.copy()
895
+
896
+ # Get opt value
897
+ default_val = conf.get('default')
898
+ opt_val = Command._get_opt_value(
899
+ opts,
900
+ opt_name,
901
+ opts_conf,
902
+ opt_aliases=opt_aliases,
903
+ default=default_val,
904
+ preprocess=preprocess,
905
+ process=process)
906
+
907
+ debug('after get_opt_value', obj={'name': opt_name, 'value': opt_val, 'conf': conf}, obj_after=False, sub='init.options', verbose=True) # noqa: E501
908
+
909
+ # Skip option if value is falsy
910
+ if opt_val in [None, False, []]:
911
+ debug('skipped (falsy)', obj={'name': opt_name, 'value': opt_val}, obj_after=False, sub='init.options', verbose=True) # noqa: E501
912
+ continue
913
+
914
+ # Convert opt value to expected command opt value
915
+ mapped_opt_val = opt_value_map.get(opt_name)
916
+ if mapped_opt_val:
917
+ conf.pop('pre_process', None)
918
+ conf.pop('process', None)
919
+ if callable(mapped_opt_val):
920
+ opt_val = mapped_opt_val(opt_val)
921
+ else:
922
+ opt_val = mapped_opt_val
923
+ elif 'pre_process' in conf:
924
+ opt_val = conf['pre_process'](opt_val)
925
+
926
+ # Convert opt name to expected command opt name
927
+ mapped_opt_name = opt_key_map.get(opt_name)
928
+ if mapped_opt_name is not None:
929
+ if mapped_opt_name == OPT_NOT_SUPPORTED:
930
+ debug('skipped (unsupported)', obj={'name': opt_name, 'value': opt_val}, sub='init.options', verbose=True) # noqa: E501
931
+ continue
932
+ else:
933
+ opt_name = mapped_opt_name
934
+ debug('mapped key / value', obj={'name': opt_name, 'value': opt_val}, obj_after=False, sub='init.options', verbose=True) # noqa: E501
935
+
936
+ # Avoid shell injections and detect opt prefix
937
+ opt_name = str(opt_name).split(' ')[0] # avoid cmd injection
938
+
939
+ # Replace '_' with '-'
940
+ opt_name = opt_name.replace('_', '-')
941
+
942
+ # Add opt prefix if not already there
943
+ if len(opt_name) > 0 and opt_name[0] not in ['-', '--']:
944
+ opt_name = f'{opt_prefix}{opt_name}'
945
+
946
+ # Append opt name + opt value to option string.
947
+ # Note: does not append opt value if value is True (flag)
948
+ opts_dict[original_opt_name] = {'name': opt_name, 'value': opt_val, 'conf': conf}
949
+ debug('final', obj={'name': original_opt_name, 'value': opt_val}, sub='init.options', obj_after=False, verbose=True) # noqa: E501
950
+
951
+ return opts_dict
952
+
953
+ @staticmethod
954
+ def _validate_chunked_input(self, inputs):
955
+ """Command does not support multiple inputs in non-worker mode. Consider running with a remote worker instead."""
956
+ if len(inputs) > 1 and self.sync and self.file_flag is None:
957
+ return False
958
+ return True
959
+
960
+ @staticmethod
961
+ def _validate_input_nonempty(self, inputs):
962
+ """Input is empty."""
963
+ if self.default_inputs is not None:
964
+ return True
965
+ if not inputs or len(inputs) == 0:
966
+ return False
967
+ return True
968
+
969
+ # @staticmethod
970
+ # def _validate_input_types_valid(self, input):
971
+ # pass
972
+
973
+ @staticmethod
974
+ def _get_opt_default(opt_name, opts_conf):
975
+ """Get the default value of an option.
976
+
977
+ Args:
978
+ opt_name (str): The name of the option to get the default value of (no aliases allowed).
979
+ opts_conf (dict): The options configuration, indexed by option name.
980
+
981
+ Returns:
982
+ any: The default value of the option.
983
+ """
984
+ for k, v in opts_conf.items():
985
+ if k == opt_name:
986
+ return v.get('default', None)
987
+ return None
988
+
989
+ @staticmethod
990
+ def _get_opt_value(opts, opt_name, opts_conf={}, opt_aliases=None, default=None, preprocess=False, process=False):
991
+ """Get the value of an option.
992
+
993
+ Args:
994
+ opts (dict): The options dict to search (input opts).
995
+ opt_name (str): The name of the option to get the value of.
996
+ opts_conf (dict): The options configuration, indexed by option name.
997
+ opt_aliases (list): The aliases to try.
998
+ default (any): The default value to return if the option is not found.
999
+ preprocess (bool): Whether to preprocess the value using the option preprocessor function.
1000
+ process (bool): Whether to process the value using the option processor function.
1001
+
1002
+ Returns:
1003
+ any: The value of the option.
1004
+
1005
+ Example:
1006
+ opts = {'target': 'example.com'}
1007
+ opts_conf = {'target': {'type': 'str', 'short': 't', 'default': 'example.com', 'pre_process': lambda x: x.upper()}} # noqa: E501
1008
+ opt_aliases = ['prefix_target', 'target']
1009
+
1010
+ # Example 1:
1011
+ opt_name = 'target'
1012
+ opt_value = Command._get_opt_value(opts, opt_name, opts_conf, opt_aliases, preprocess=True) # noqa: E501
1013
+ print(opt_value)
1014
+ # Output: EXAMPLE.COM
1015
+
1016
+ # Example 2:
1017
+ opt_name = 'prefix_target'
1018
+ opt_value = Command._get_opt_value(opts, opt_name, opts_conf, opt_aliases)
1019
+ print(opt_value)
1020
+ # Output: example.com
1021
+ """
1022
+ default = default or Command._get_opt_default(opt_name, opts_conf)
1023
+ opt_aliases = opt_aliases or []
1024
+ opt_names = []
1025
+ for prefix in opt_aliases:
1026
+ opt_names.extend([f'{prefix}.{opt_name}', f'{prefix}_{opt_name}'])
1027
+ opt_names.append(opt_name)
1028
+ opt_names = list(dict.fromkeys(opt_names))
1029
+ opt_values = [opts.get(o) for o in opt_names]
1030
+ opt_conf = [conf for _, conf in opts_conf.items() if _ == opt_name]
1031
+ if opt_conf:
1032
+ opt_conf = opt_conf[0]
1033
+ alias = opt_conf.get('short')
1034
+ if alias:
1035
+ opt_values.append(opts.get(alias))
1036
+ if OPT_NOT_SUPPORTED in opt_values:
1037
+ debug('skipped (unsupported)', obj={'name': opt_name}, obj_after=False, sub='init.options', verbose=True)
1038
+ return None
1039
+ value = next((v for v in opt_values if v is not None), default)
1040
+ if opt_conf:
1041
+ preprocessor = opt_conf.get('pre_process')
1042
+ processor = opt_conf.get('process')
1043
+ if preprocess and preprocessor:
1044
+ value = preprocessor(value)
1045
+ if process and processor:
1046
+ value = processor(value)
1047
+ debug('got opt value', obj={'name': opt_name, 'value': value, 'aliases': opt_names, 'values': opt_values}, obj_after=False, sub='init.options', verbose=True) # noqa: E501
1048
+ return value
1049
+
1050
+ def _build_cmd(self):
1051
+ """Build command string."""
1052
+
1053
+ # Add JSON flag to cmd
1054
+ if self.json_flag:
1055
+ parts = self.json_flag.split(' ')
1056
+ for part in parts:
1057
+ self.cmd += f' {shlex.quote(part)}'
1058
+
1059
+ # Opts str
1060
+ opts_str = ''
1061
+ opts = {}
1062
+
1063
+ # Add options to cmd
1064
+ opts_dict = Command._process_opts(
1065
+ self.run_opts,
1066
+ self.opts,
1067
+ self.opt_key_map,
1068
+ self.opt_value_map,
1069
+ self.opt_prefix,
1070
+ opt_aliases=self.opt_aliases,
1071
+ preprocess=False,
1072
+ process=False)
1073
+
1074
+ # Add meta options to cmd
1075
+ meta_opts_dict = Command._process_opts(
1076
+ self.run_opts,
1077
+ self.meta_opts,
1078
+ self.opt_key_map,
1079
+ self.opt_value_map,
1080
+ self.opt_prefix,
1081
+ opt_aliases=self.opt_aliases,
1082
+ preprocess=False,
1083
+ process=False)
1084
+
1085
+ if opts_dict:
1086
+ opts.update(opts_dict)
1087
+ if meta_opts_dict:
1088
+ opts.update(meta_opts_dict)
1089
+
1090
+ opts = self.run_hooks('on_cmd_opts', opts, sub='init')
1091
+
1092
+ if opts:
1093
+ for opt_conf in opts.values():
1094
+ conf = opt_conf['conf']
1095
+ process = conf.get('process')
1096
+ if process:
1097
+ opt_conf['value'] = process(opt_conf['value'])
1098
+ internal = conf.get('internal', False)
1099
+ if internal:
1100
+ continue
1101
+ if conf.get('requires_sudo', False):
1102
+ self.requires_sudo = True
1103
+ opts_str += ' ' + Command._build_opt_str(opt_conf)
1104
+ if '{target}' in opts_str:
1105
+ opts_str = opts_str.replace('{target}', self.inputs[0])
1106
+ self.cmd_options = opts
1107
+ self.cmd += opts_str
1108
+
1109
+ @staticmethod
1110
+ def _build_opt_str(opt):
1111
+ """Build option string."""
1112
+ conf = opt['conf']
1113
+ shlex_quote = conf.get('shlex', True)
1114
+ value = opt['value']
1115
+ opt_name = opt['name']
1116
+ opts_str = ''
1117
+ value = [value] if not isinstance(value, list) else value
1118
+ for val in value:
1119
+ if val is True:
1120
+ opts_str += f'{opt_name}'
1121
+ else:
1122
+ if shlex_quote:
1123
+ val = shlex.quote(str(val))
1124
+ opts_str += f'{opt_name} {val} '
1125
+ return opts_str.strip()
1126
+
1127
+ def _build_cmd_input(self):
1128
+ """Many commands take as input a string or a list. This function facilitate this based on whether we pass a
1129
+ string or a list to the cmd.
1130
+ """
1131
+ cmd = self.cmd
1132
+ inputs = self.inputs
1133
+
1134
+ # If inputs is empty, return the previous command
1135
+ if not inputs:
1136
+ return
1137
+
1138
+ # If inputs has a single element but the tool does not support an input flag, use echo-piped_input input.
1139
+ # If the tool's input flag is set to None, assume it is a positional argument at the end of the command.
1140
+ # Otherwise use the input flag to pass the input.
1141
+ if len(inputs) == 1:
1142
+ input = shlex.quote(inputs[0])
1143
+ if self.input_flag == OPT_PIPE_INPUT:
1144
+ cmd = f'echo {input} | {cmd}'
1145
+ elif not self.input_flag:
1146
+ cmd += f' {input}'
1147
+ else:
1148
+ cmd += f' {self.input_flag} {input}'
1149
+
1150
+ # If inputs has multiple elements and the tool has input_flag set to OPT_PIPE_INPUT, use cat-piped_input input.
1151
+ # Otherwise pass the file path to the tool.
1152
+ else:
1153
+ fpath = f'{self.reports_folder}/.inputs/{self.unique_name}.txt'
1154
+
1155
+ # Write the input to a file
1156
+ with open(fpath, 'w') as f:
1157
+ f.write('\n'.join(inputs))
1158
+ if self.file_eof_newline:
1159
+ f.write('\n')
1160
+
1161
+ if self.file_copy_sudo:
1162
+ sudo_fpath = f'/tmp/{self.unique_name}.txt'
1163
+ shutil.copy(fpath, sudo_fpath)
1164
+ fpath = sudo_fpath
1165
+
1166
+ if self.file_flag == OPT_PIPE_INPUT:
1167
+ cmd = f'cat {fpath} | {cmd}'
1168
+ elif self.file_flag == OPT_SPACE_SEPARATED:
1169
+ cmd += ' ' + ' '.join(inputs)
1170
+ elif self.file_flag:
1171
+ cmd += f' {self.file_flag} {fpath}'
1172
+ else:
1173
+ cmd += f' {fpath}'
1174
+
1175
+ self.inputs_path = fpath
1176
+
1177
+ self.cmd = cmd
1178
+ self.shell = ' | ' in self.cmd