secator 0.22.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- secator/.gitignore +162 -0
- secator/__init__.py +0 -0
- secator/celery.py +453 -0
- secator/celery_signals.py +138 -0
- secator/celery_utils.py +320 -0
- secator/cli.py +2035 -0
- secator/cli_helper.py +395 -0
- secator/click.py +87 -0
- secator/config.py +670 -0
- secator/configs/__init__.py +0 -0
- secator/configs/profiles/__init__.py +0 -0
- secator/configs/profiles/aggressive.yaml +8 -0
- secator/configs/profiles/all_ports.yaml +7 -0
- secator/configs/profiles/full.yaml +31 -0
- secator/configs/profiles/http_headless.yaml +7 -0
- secator/configs/profiles/http_record.yaml +8 -0
- secator/configs/profiles/insane.yaml +8 -0
- secator/configs/profiles/paranoid.yaml +8 -0
- secator/configs/profiles/passive.yaml +11 -0
- secator/configs/profiles/polite.yaml +8 -0
- secator/configs/profiles/sneaky.yaml +8 -0
- secator/configs/profiles/tor.yaml +5 -0
- secator/configs/scans/__init__.py +0 -0
- secator/configs/scans/domain.yaml +31 -0
- secator/configs/scans/host.yaml +23 -0
- secator/configs/scans/network.yaml +30 -0
- secator/configs/scans/subdomain.yaml +27 -0
- secator/configs/scans/url.yaml +19 -0
- secator/configs/workflows/__init__.py +0 -0
- secator/configs/workflows/cidr_recon.yaml +48 -0
- secator/configs/workflows/code_scan.yaml +29 -0
- secator/configs/workflows/domain_recon.yaml +46 -0
- secator/configs/workflows/host_recon.yaml +95 -0
- secator/configs/workflows/subdomain_recon.yaml +120 -0
- secator/configs/workflows/url_bypass.yaml +15 -0
- secator/configs/workflows/url_crawl.yaml +98 -0
- secator/configs/workflows/url_dirsearch.yaml +62 -0
- secator/configs/workflows/url_fuzz.yaml +68 -0
- secator/configs/workflows/url_params_fuzz.yaml +66 -0
- secator/configs/workflows/url_secrets_hunt.yaml +23 -0
- secator/configs/workflows/url_vuln.yaml +91 -0
- secator/configs/workflows/user_hunt.yaml +29 -0
- secator/configs/workflows/wordpress.yaml +38 -0
- secator/cve.py +718 -0
- secator/decorators.py +7 -0
- secator/definitions.py +168 -0
- secator/exporters/__init__.py +14 -0
- secator/exporters/_base.py +3 -0
- secator/exporters/console.py +10 -0
- secator/exporters/csv.py +37 -0
- secator/exporters/gdrive.py +123 -0
- secator/exporters/json.py +16 -0
- secator/exporters/table.py +36 -0
- secator/exporters/txt.py +28 -0
- secator/hooks/__init__.py +0 -0
- secator/hooks/gcs.py +80 -0
- secator/hooks/mongodb.py +281 -0
- secator/installer.py +694 -0
- secator/loader.py +128 -0
- secator/output_types/__init__.py +49 -0
- secator/output_types/_base.py +108 -0
- secator/output_types/certificate.py +78 -0
- secator/output_types/domain.py +50 -0
- secator/output_types/error.py +42 -0
- secator/output_types/exploit.py +58 -0
- secator/output_types/info.py +24 -0
- secator/output_types/ip.py +47 -0
- secator/output_types/port.py +55 -0
- secator/output_types/progress.py +36 -0
- secator/output_types/record.py +36 -0
- secator/output_types/stat.py +41 -0
- secator/output_types/state.py +29 -0
- secator/output_types/subdomain.py +45 -0
- secator/output_types/tag.py +69 -0
- secator/output_types/target.py +38 -0
- secator/output_types/url.py +112 -0
- secator/output_types/user_account.py +41 -0
- secator/output_types/vulnerability.py +101 -0
- secator/output_types/warning.py +30 -0
- secator/report.py +140 -0
- secator/rich.py +130 -0
- secator/runners/__init__.py +14 -0
- secator/runners/_base.py +1240 -0
- secator/runners/_helpers.py +218 -0
- secator/runners/celery.py +18 -0
- secator/runners/command.py +1178 -0
- secator/runners/python.py +126 -0
- secator/runners/scan.py +87 -0
- secator/runners/task.py +81 -0
- secator/runners/workflow.py +168 -0
- secator/scans/__init__.py +29 -0
- secator/serializers/__init__.py +8 -0
- secator/serializers/dataclass.py +39 -0
- secator/serializers/json.py +45 -0
- secator/serializers/regex.py +25 -0
- secator/tasks/__init__.py +8 -0
- secator/tasks/_categories.py +487 -0
- secator/tasks/arjun.py +113 -0
- secator/tasks/arp.py +53 -0
- secator/tasks/arpscan.py +70 -0
- secator/tasks/bbot.py +372 -0
- secator/tasks/bup.py +118 -0
- secator/tasks/cariddi.py +193 -0
- secator/tasks/dalfox.py +87 -0
- secator/tasks/dirsearch.py +84 -0
- secator/tasks/dnsx.py +186 -0
- secator/tasks/feroxbuster.py +93 -0
- secator/tasks/ffuf.py +135 -0
- secator/tasks/fping.py +85 -0
- secator/tasks/gau.py +102 -0
- secator/tasks/getasn.py +60 -0
- secator/tasks/gf.py +36 -0
- secator/tasks/gitleaks.py +96 -0
- secator/tasks/gospider.py +84 -0
- secator/tasks/grype.py +109 -0
- secator/tasks/h8mail.py +75 -0
- secator/tasks/httpx.py +167 -0
- secator/tasks/jswhois.py +36 -0
- secator/tasks/katana.py +203 -0
- secator/tasks/maigret.py +87 -0
- secator/tasks/mapcidr.py +42 -0
- secator/tasks/msfconsole.py +179 -0
- secator/tasks/naabu.py +85 -0
- secator/tasks/nmap.py +487 -0
- secator/tasks/nuclei.py +151 -0
- secator/tasks/search_vulns.py +225 -0
- secator/tasks/searchsploit.py +109 -0
- secator/tasks/sshaudit.py +299 -0
- secator/tasks/subfinder.py +48 -0
- secator/tasks/testssl.py +283 -0
- secator/tasks/trivy.py +130 -0
- secator/tasks/trufflehog.py +240 -0
- secator/tasks/urlfinder.py +100 -0
- secator/tasks/wafw00f.py +106 -0
- secator/tasks/whois.py +34 -0
- secator/tasks/wpprobe.py +116 -0
- secator/tasks/wpscan.py +202 -0
- secator/tasks/x8.py +94 -0
- secator/tasks/xurlfind3r.py +83 -0
- secator/template.py +294 -0
- secator/thread.py +24 -0
- secator/tree.py +196 -0
- secator/utils.py +922 -0
- secator/utils_test.py +297 -0
- secator/workflows/__init__.py +29 -0
- secator-0.22.0.dist-info/METADATA +447 -0
- secator-0.22.0.dist-info/RECORD +150 -0
- secator-0.22.0.dist-info/WHEEL +4 -0
- secator-0.22.0.dist-info/entry_points.txt +2 -0
- secator-0.22.0.dist-info/licenses/LICENSE +60 -0
secator/runners/_base.py
ADDED
|
@@ -0,0 +1,1240 @@
|
|
|
1
|
+
import gc
|
|
2
|
+
import json
|
|
3
|
+
import logging
|
|
4
|
+
import sys
|
|
5
|
+
import textwrap
|
|
6
|
+
import uuid
|
|
7
|
+
|
|
8
|
+
from datetime import datetime
|
|
9
|
+
from pathlib import Path
|
|
10
|
+
from time import time
|
|
11
|
+
|
|
12
|
+
from dotmap import DotMap
|
|
13
|
+
import humanize
|
|
14
|
+
|
|
15
|
+
from secator.definitions import ADDONS_ENABLED, STATE_COLORS
|
|
16
|
+
from secator.celery_utils import CeleryData
|
|
17
|
+
from secator.config import CONFIG
|
|
18
|
+
from secator.output_types import FINDING_TYPES, OUTPUT_TYPES, OutputType, Progress, Info, Warning, Error, Target, State
|
|
19
|
+
from secator.report import Report
|
|
20
|
+
from secator.rich import console, console_stdout
|
|
21
|
+
from secator.runners._helpers import (get_task_folder_id, run_extractors)
|
|
22
|
+
from secator.utils import (debug, import_dynamic, rich_to_ansi, should_update, autodetect_type)
|
|
23
|
+
from secator.tree import build_runner_tree
|
|
24
|
+
from secator.loader import get_configs_by_type
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
logger = logging.getLogger(__name__)
|
|
28
|
+
|
|
29
|
+
HOOKS = [
|
|
30
|
+
'before_init',
|
|
31
|
+
'on_init',
|
|
32
|
+
'on_start',
|
|
33
|
+
'on_end',
|
|
34
|
+
'on_item_pre_convert',
|
|
35
|
+
'on_item',
|
|
36
|
+
'on_duplicate',
|
|
37
|
+
'on_interval',
|
|
38
|
+
]
|
|
39
|
+
|
|
40
|
+
VALIDATORS = [
|
|
41
|
+
'validate_input',
|
|
42
|
+
'validate_item'
|
|
43
|
+
]
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
def format_runner_name(runner):
|
|
47
|
+
"""Format runner name."""
|
|
48
|
+
colors = {
|
|
49
|
+
'task': 'bold gold3',
|
|
50
|
+
'workflow': 'bold dark_orange3',
|
|
51
|
+
'scan': 'bold red',
|
|
52
|
+
}
|
|
53
|
+
return f'[{colors[runner.config.type]}]{runner.unique_name}[/]'
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
class Runner:
|
|
57
|
+
"""Runner class.
|
|
58
|
+
|
|
59
|
+
Args:
|
|
60
|
+
config (secator.config.TemplateLoader): Runner config.
|
|
61
|
+
inputs (List[str]): List of inputs to run task on.
|
|
62
|
+
results (List[OutputType]): List of results to re-use.
|
|
63
|
+
run_opts (dict[str]): Run options.
|
|
64
|
+
hooks (dict[str, List[Callable]]): User hooks to register.
|
|
65
|
+
validators (dict): User validators to register.
|
|
66
|
+
context (dict): Runner context.
|
|
67
|
+
|
|
68
|
+
Yields:
|
|
69
|
+
OutputType: Output types.
|
|
70
|
+
"""
|
|
71
|
+
|
|
72
|
+
# Input field (mostly for tests and CLI)
|
|
73
|
+
input_types = []
|
|
74
|
+
|
|
75
|
+
# Output types
|
|
76
|
+
output_types = []
|
|
77
|
+
|
|
78
|
+
# Default inputs
|
|
79
|
+
default_inputs = None
|
|
80
|
+
|
|
81
|
+
# Default exporters
|
|
82
|
+
default_exporters = []
|
|
83
|
+
|
|
84
|
+
# Profiles
|
|
85
|
+
profiles = []
|
|
86
|
+
|
|
87
|
+
# Run hooks
|
|
88
|
+
enable_hooks = True
|
|
89
|
+
|
|
90
|
+
# Run validators
|
|
91
|
+
enable_validators = True
|
|
92
|
+
|
|
93
|
+
def __init__(self, config, inputs=[], results=[], run_opts={}, hooks={}, validators={}, context={}):
|
|
94
|
+
# Runner config
|
|
95
|
+
self.config = DotMap(config.toDict())
|
|
96
|
+
self.name = run_opts.get('name', config.name)
|
|
97
|
+
self.description = run_opts.get('description', config.description or '')
|
|
98
|
+
self.workspace_name = context.get('workspace_name', 'default')
|
|
99
|
+
self.run_opts = run_opts.copy()
|
|
100
|
+
self.sync = run_opts.get('sync', True)
|
|
101
|
+
self.context = context
|
|
102
|
+
|
|
103
|
+
# Runner state
|
|
104
|
+
self.uuids = set()
|
|
105
|
+
self.results = []
|
|
106
|
+
self.results_count = 0
|
|
107
|
+
self.threads = []
|
|
108
|
+
self.output = ''
|
|
109
|
+
self.started = False
|
|
110
|
+
self.done = False
|
|
111
|
+
self.start_time = datetime.fromtimestamp(time())
|
|
112
|
+
self.end_time = None
|
|
113
|
+
self.last_updated_db = None
|
|
114
|
+
self.last_updated_celery = None
|
|
115
|
+
self.last_updated_progress = None
|
|
116
|
+
self.progress = 0
|
|
117
|
+
self.celery_result = None
|
|
118
|
+
self.celery_ids = []
|
|
119
|
+
self.celery_ids_map = {}
|
|
120
|
+
self.revoked = False
|
|
121
|
+
self.skipped = False
|
|
122
|
+
self.results_buffer = []
|
|
123
|
+
self._hooks = hooks
|
|
124
|
+
|
|
125
|
+
# Runner process options
|
|
126
|
+
self.no_poll = self.run_opts.get('no_poll', False)
|
|
127
|
+
self.no_live_updates = self.run_opts.get('no_live_updates', False)
|
|
128
|
+
self.no_process = not self.run_opts.get('process', True)
|
|
129
|
+
self.piped_input = self.run_opts.get('piped_input', False)
|
|
130
|
+
self.piped_output = self.run_opts.get('piped_output', False)
|
|
131
|
+
self.dry_run = self.run_opts.get('dry_run', False)
|
|
132
|
+
self.has_parent = self.run_opts.get('has_parent', False)
|
|
133
|
+
self.has_children = self.run_opts.get('has_children', False)
|
|
134
|
+
self.caller = self.run_opts.get('caller', None)
|
|
135
|
+
self.quiet = self.run_opts.get('quiet', False)
|
|
136
|
+
self._reports_folder = self.run_opts.get('reports_folder', None)
|
|
137
|
+
self.raise_on_error = self.run_opts.get('raise_on_error', False)
|
|
138
|
+
|
|
139
|
+
# Runner toggles
|
|
140
|
+
self.enable_duplicate_check = self.run_opts.get('enable_duplicate_check', True)
|
|
141
|
+
self.enable_profiles = self.run_opts.get('enable_profiles', True)
|
|
142
|
+
self.enable_reports = self.run_opts.get('enable_reports', not self.sync) and not self.dry_run and not self.no_process and not self.no_poll # noqa: E501
|
|
143
|
+
self.enable_hooks = self.run_opts.get('enable_hooks', True) and not self.dry_run and not self.no_process # noqa: E501
|
|
144
|
+
|
|
145
|
+
# Runner print opts
|
|
146
|
+
self.print_item = self.run_opts.get('print_item', False) and not self.dry_run
|
|
147
|
+
self.print_line = self.run_opts.get('print_line', False) and not self.quiet
|
|
148
|
+
self.print_remote_info = self.run_opts.get('print_remote_info', False) and not self.piped_input and not self.piped_output # noqa: E501
|
|
149
|
+
self.print_start = self.run_opts.get('print_start', False) and not self.dry_run # noqa: E501
|
|
150
|
+
self.print_end = self.run_opts.get('print_end', False) and not self.dry_run # noqa: E501
|
|
151
|
+
self.print_target = self.run_opts.get('print_target', False) and not self.dry_run and not self.has_parent
|
|
152
|
+
self.print_json = self.run_opts.get('print_json', False)
|
|
153
|
+
self.print_raw = self.run_opts.get('print_raw', False) or (self.piped_output and not self.print_json)
|
|
154
|
+
self.print_fmt = self.run_opts.get('fmt', '')
|
|
155
|
+
self.print_stat = self.run_opts.get('print_stat', False)
|
|
156
|
+
self.print_profiles = self.run_opts.get('print_profiles', False)
|
|
157
|
+
|
|
158
|
+
# Chunks
|
|
159
|
+
self.chunk = self.run_opts.get('chunk', None)
|
|
160
|
+
self.chunk_count = self.run_opts.get('chunk_count', None)
|
|
161
|
+
self.unique_name = self.name.replace('/', '_')
|
|
162
|
+
self.unique_name = f'{self.unique_name}_{self.chunk}' if self.chunk else self.unique_name
|
|
163
|
+
|
|
164
|
+
# Opt aliases
|
|
165
|
+
self.opt_aliases = []
|
|
166
|
+
if self.config.node_id:
|
|
167
|
+
self.opt_aliases.append(self.config.node_id.replace('.', '_'))
|
|
168
|
+
if self.config.node_name:
|
|
169
|
+
self.opt_aliases.append(self.config.node_name)
|
|
170
|
+
self.opt_aliases.append(self.unique_name)
|
|
171
|
+
|
|
172
|
+
# Begin initialization
|
|
173
|
+
self.debug(f'begin initialization of {self.unique_name}', sub='init')
|
|
174
|
+
|
|
175
|
+
# Hooks
|
|
176
|
+
self.resolved_hooks = {name: [] for name in HOOKS + getattr(self, 'hooks', [])}
|
|
177
|
+
self.debug('registering hooks', obj=list(self.resolved_hooks.keys()), sub='init')
|
|
178
|
+
self.register_hooks(hooks)
|
|
179
|
+
|
|
180
|
+
# Validators
|
|
181
|
+
self.resolved_validators = {name: [] for name in VALIDATORS + getattr(self, 'validators', [])}
|
|
182
|
+
self.debug('registering validators', obj={'validators': list(self.resolved_validators.keys())}, sub='init')
|
|
183
|
+
self.resolved_validators['validate_input'].append(self._validate_inputs)
|
|
184
|
+
self.register_validators(validators)
|
|
185
|
+
|
|
186
|
+
# Add prior results to runner results
|
|
187
|
+
self.debug(f'adding {len(results)} prior results to runner', sub='init')
|
|
188
|
+
if CONFIG.addons.mongodb.enabled:
|
|
189
|
+
self.debug(f'loading {len(results)} results from MongoDB', sub='init')
|
|
190
|
+
from secator.hooks.mongodb import get_results
|
|
191
|
+
results = get_results(results)
|
|
192
|
+
for result in results:
|
|
193
|
+
self.add_result(result, print=False, output=False, hooks=False, queue=not self.has_parent)
|
|
194
|
+
|
|
195
|
+
# Determine inputs
|
|
196
|
+
self.debug(f'resolving inputs with {len(self.dynamic_opts)} dynamic opts', obj=self.dynamic_opts, sub='init')
|
|
197
|
+
self.inputs = [inputs] if not isinstance(inputs, list) else inputs
|
|
198
|
+
self.inputs = list(set(self.inputs))
|
|
199
|
+
targets = [Target(name=target) for target in self.inputs]
|
|
200
|
+
for target in targets:
|
|
201
|
+
self.add_result(target, print=False, output=False)
|
|
202
|
+
|
|
203
|
+
# Run extractors on results
|
|
204
|
+
self._run_extractors()
|
|
205
|
+
self.debug(f'inputs ({len(self.inputs)})', obj=self.inputs, sub='init')
|
|
206
|
+
self.debug(f'run opts ({len(self.resolved_opts)})', obj=self.resolved_opts, sub='init')
|
|
207
|
+
self.debug(f'print opts ({len(self.resolved_print_opts)})', obj=self.resolved_print_opts, sub='init')
|
|
208
|
+
|
|
209
|
+
# Load profiles
|
|
210
|
+
profiles_str = run_opts.get('profiles') or []
|
|
211
|
+
self.debug('resolving profiles', obj={'profiles': profiles_str}, sub='init')
|
|
212
|
+
self.profiles = self.resolve_profiles(profiles_str)
|
|
213
|
+
|
|
214
|
+
# Determine exporters
|
|
215
|
+
exporters_str = self.run_opts.get('output') or self.default_exporters
|
|
216
|
+
self.debug('resolving exporters', obj={'exporters': exporters_str}, sub='init')
|
|
217
|
+
self.exporters = self.resolve_exporters(exporters_str)
|
|
218
|
+
|
|
219
|
+
# Profiler
|
|
220
|
+
self.enable_pyinstrument = self.run_opts.get('enable_pyinstrument', False) and ADDONS_ENABLED['trace']
|
|
221
|
+
if self.enable_pyinstrument:
|
|
222
|
+
self.debug('enabling profiler', sub='init')
|
|
223
|
+
from pyinstrument import Profiler
|
|
224
|
+
self.profiler = Profiler(async_mode=False, interval=0.0001)
|
|
225
|
+
try:
|
|
226
|
+
self.profiler.start()
|
|
227
|
+
except RuntimeError:
|
|
228
|
+
self.enable_pyinstrument = False
|
|
229
|
+
pass
|
|
230
|
+
|
|
231
|
+
# Input post-process
|
|
232
|
+
self.run_hooks('before_init', sub='init')
|
|
233
|
+
|
|
234
|
+
# Check if input is valid
|
|
235
|
+
self.inputs_valid = self.run_validators('validate_input', self.inputs, sub='init')
|
|
236
|
+
|
|
237
|
+
# Print targets
|
|
238
|
+
if self.print_target:
|
|
239
|
+
pluralize = 'targets' if len(self.self_targets) > 1 else 'target'
|
|
240
|
+
self._print(Info(message=f'Loaded {len(self.self_targets)} {pluralize} for {format_runner_name(self)}:'), rich=True)
|
|
241
|
+
for target in self.self_targets:
|
|
242
|
+
self._print(f' {repr(target)}', rich=True)
|
|
243
|
+
|
|
244
|
+
# Run hooks
|
|
245
|
+
self.run_hooks('on_init', sub='init')
|
|
246
|
+
|
|
247
|
+
@property
|
|
248
|
+
def resolved_opts(self):
|
|
249
|
+
return {k: v for k, v in self.run_opts.items() if v is not None and not k.startswith('print_') and not k.endswith('_')} # noqa: E501
|
|
250
|
+
|
|
251
|
+
@property
|
|
252
|
+
def resolved_print_opts(self):
|
|
253
|
+
return {k: v for k, v in self.__dict__.items() if k.startswith('print_') if v}
|
|
254
|
+
|
|
255
|
+
@property
|
|
256
|
+
def dynamic_opts(self):
|
|
257
|
+
return {k: v for k, v in self.run_opts.items() if k.endswith('_')}
|
|
258
|
+
|
|
259
|
+
@property
|
|
260
|
+
def elapsed(self):
|
|
261
|
+
if self.done:
|
|
262
|
+
return self.end_time - self.start_time
|
|
263
|
+
return datetime.fromtimestamp(time()) - self.start_time
|
|
264
|
+
|
|
265
|
+
@property
|
|
266
|
+
def elapsed_human(self):
|
|
267
|
+
return humanize.naturaldelta(self.elapsed)
|
|
268
|
+
|
|
269
|
+
@property
|
|
270
|
+
def targets(self):
|
|
271
|
+
return [r for r in self.results if isinstance(r, Target)]
|
|
272
|
+
|
|
273
|
+
@property
|
|
274
|
+
def self_targets(self):
|
|
275
|
+
return [r for r in self.results if isinstance(r, Target) and r._source.startswith(self.unique_name)]
|
|
276
|
+
|
|
277
|
+
@property
|
|
278
|
+
def infos(self):
|
|
279
|
+
if self.config.type == 'task':
|
|
280
|
+
return [r for r in self.results if isinstance(r, Info) and r._source.startswith(self.unique_name)]
|
|
281
|
+
return [r for r in self.results if isinstance(r, Info)]
|
|
282
|
+
|
|
283
|
+
@property
|
|
284
|
+
def warnings(self):
|
|
285
|
+
if self.config.type == 'task':
|
|
286
|
+
return [r for r in self.results if isinstance(r, Warning) and r._source.startswith(self.unique_name)]
|
|
287
|
+
return [r for r in self.results if isinstance(r, Warning)]
|
|
288
|
+
|
|
289
|
+
@property
|
|
290
|
+
def errors(self):
|
|
291
|
+
if self.config.type == 'task':
|
|
292
|
+
return [r for r in self.results if isinstance(r, Error) and r._source.startswith(self.unique_name)]
|
|
293
|
+
return [r for r in self.results if isinstance(r, Error)]
|
|
294
|
+
|
|
295
|
+
@property
|
|
296
|
+
def self_results(self):
|
|
297
|
+
return [r for r in self.results if r._source.startswith(self.unique_name)]
|
|
298
|
+
|
|
299
|
+
@property
|
|
300
|
+
def findings(self):
|
|
301
|
+
return [r for r in self.results if isinstance(r, tuple(FINDING_TYPES))]
|
|
302
|
+
|
|
303
|
+
@property
|
|
304
|
+
def findings_count(self):
|
|
305
|
+
return len(self.findings)
|
|
306
|
+
|
|
307
|
+
@property
|
|
308
|
+
def self_findings(self):
|
|
309
|
+
return [r for r in self.results if isinstance(r, tuple(FINDING_TYPES)) if r._source.startswith(self.unique_name)]
|
|
310
|
+
|
|
311
|
+
@property
|
|
312
|
+
def self_errors(self):
|
|
313
|
+
if self.config.type == 'task':
|
|
314
|
+
return [r for r in self.results if isinstance(r, Error) and r._source.startswith(self.unique_name)]
|
|
315
|
+
return [r for r in self.results if isinstance(r, Error)]
|
|
316
|
+
|
|
317
|
+
@property
|
|
318
|
+
def self_findings_count(self):
|
|
319
|
+
return len(self.self_findings)
|
|
320
|
+
|
|
321
|
+
@property
|
|
322
|
+
def status(self):
|
|
323
|
+
if not self.started:
|
|
324
|
+
return 'PENDING'
|
|
325
|
+
if self.revoked:
|
|
326
|
+
return 'REVOKED'
|
|
327
|
+
if self.skipped:
|
|
328
|
+
return 'SKIPPED'
|
|
329
|
+
if not self.done:
|
|
330
|
+
return 'RUNNING'
|
|
331
|
+
return 'FAILURE' if len(self.self_errors) > 0 else 'SUCCESS'
|
|
332
|
+
|
|
333
|
+
@property
|
|
334
|
+
def celery_state(self):
|
|
335
|
+
return {
|
|
336
|
+
'name': self.config.name,
|
|
337
|
+
'full_name': self.unique_name,
|
|
338
|
+
'state': self.status,
|
|
339
|
+
'progress': self.progress,
|
|
340
|
+
'results': self.self_results,
|
|
341
|
+
'chunk': self.chunk,
|
|
342
|
+
'chunk_count': self.chunk_count,
|
|
343
|
+
'chunk_info': f'{self.chunk}/{self.chunk_count}' if self.chunk and self.chunk_count else '',
|
|
344
|
+
'celery_id': self.context['celery_id'],
|
|
345
|
+
'count': self.self_findings_count,
|
|
346
|
+
'descr': self.description
|
|
347
|
+
}
|
|
348
|
+
|
|
349
|
+
@property
|
|
350
|
+
def reports_folder(self):
|
|
351
|
+
if self._reports_folder and Path(self._reports_folder).exists():
|
|
352
|
+
return self._reports_folder
|
|
353
|
+
_base = f'{CONFIG.dirs.reports}/{self.workspace_name}/{self.config.type}s'
|
|
354
|
+
_id = get_task_folder_id(_base)
|
|
355
|
+
path = Path(f'{_base}/{_id}')
|
|
356
|
+
path_inputs = path / '.inputs'
|
|
357
|
+
path_outputs = path / '.outputs'
|
|
358
|
+
if not path.exists():
|
|
359
|
+
self.debug(f'creating reports folder {path}', sub='start')
|
|
360
|
+
path.mkdir(parents=True, exist_ok=True)
|
|
361
|
+
path_inputs.mkdir(exist_ok=True)
|
|
362
|
+
path_outputs.mkdir(exist_ok=True)
|
|
363
|
+
self._reports_folder = path.resolve()
|
|
364
|
+
return self._reports_folder
|
|
365
|
+
|
|
366
|
+
@property
|
|
367
|
+
def id(self):
|
|
368
|
+
"""Get id from context.
|
|
369
|
+
|
|
370
|
+
Returns:
|
|
371
|
+
str: Id.
|
|
372
|
+
"""
|
|
373
|
+
return self.context.get('task_id', '') or self.context.get('workflow_id', '') or self.context.get('scan_id', '')
|
|
374
|
+
|
|
375
|
+
@property
|
|
376
|
+
def ancestor_id(self):
|
|
377
|
+
"""Get ancestor id from context.
|
|
378
|
+
|
|
379
|
+
Returns:
|
|
380
|
+
str: Ancestor id.
|
|
381
|
+
"""
|
|
382
|
+
return self.context.get('ancestor_id')
|
|
383
|
+
|
|
384
|
+
def run(self):
|
|
385
|
+
"""Run method.
|
|
386
|
+
|
|
387
|
+
Returns:
|
|
388
|
+
List[OutputType]: List of runner results.
|
|
389
|
+
"""
|
|
390
|
+
return list(self.__iter__())
|
|
391
|
+
|
|
392
|
+
def __iter__(self):
|
|
393
|
+
"""Process results from derived runner class in real-time and yield results.
|
|
394
|
+
|
|
395
|
+
Yields:
|
|
396
|
+
OutputType: runner result.
|
|
397
|
+
"""
|
|
398
|
+
try:
|
|
399
|
+
# If sync mode, set started
|
|
400
|
+
if self.sync:
|
|
401
|
+
self.mark_started()
|
|
402
|
+
|
|
403
|
+
# Yield results buffer
|
|
404
|
+
yield from self.results_buffer
|
|
405
|
+
self.results_buffer = []
|
|
406
|
+
|
|
407
|
+
# If any errors happened during validation, exit
|
|
408
|
+
if self.self_errors:
|
|
409
|
+
self._finalize()
|
|
410
|
+
return
|
|
411
|
+
|
|
412
|
+
# Loop and process items
|
|
413
|
+
for item in self.yielder():
|
|
414
|
+
yield from self._process_item(item)
|
|
415
|
+
self.run_hooks('on_interval', sub='item')
|
|
416
|
+
|
|
417
|
+
except BaseException as e:
|
|
418
|
+
self.debug(f'encountered exception {type(e).__name__}. Stopping remote tasks.', sub='run')
|
|
419
|
+
error = Error.from_exception(e)
|
|
420
|
+
self.add_result(error)
|
|
421
|
+
self.revoked = True
|
|
422
|
+
if not self.sync: # yield latest results from Celery
|
|
423
|
+
self.stop_celery_tasks()
|
|
424
|
+
for item in self.yielder():
|
|
425
|
+
yield from self._process_item(item)
|
|
426
|
+
self.run_hooks('on_interval', sub='item')
|
|
427
|
+
|
|
428
|
+
finally:
|
|
429
|
+
yield from self.results_buffer
|
|
430
|
+
self.results_buffer = []
|
|
431
|
+
self._finalize()
|
|
432
|
+
|
|
433
|
+
def _finalize(self):
|
|
434
|
+
"""Finalize the runner."""
|
|
435
|
+
self.join_threads()
|
|
436
|
+
gc.collect()
|
|
437
|
+
if self.sync:
|
|
438
|
+
self.mark_completed()
|
|
439
|
+
if self.enable_reports:
|
|
440
|
+
self.export_reports()
|
|
441
|
+
|
|
442
|
+
def join_threads(self):
|
|
443
|
+
"""Wait for all running threads to complete."""
|
|
444
|
+
if not self.threads:
|
|
445
|
+
return
|
|
446
|
+
self.debug(f'waiting for {len(self.threads)} threads to complete', sub='end')
|
|
447
|
+
for thread in self.threads:
|
|
448
|
+
error = thread.join()
|
|
449
|
+
if error:
|
|
450
|
+
self.add_result(error)
|
|
451
|
+
|
|
452
|
+
def _run_extractors(self):
|
|
453
|
+
"""Run extractors on results and targets."""
|
|
454
|
+
self.debug('running extractors', sub='init')
|
|
455
|
+
ctx = {'opts': DotMap(self.run_opts), 'targets': self.inputs, 'ancestor_id': self.ancestor_id}
|
|
456
|
+
inputs, run_opts, errors = run_extractors(
|
|
457
|
+
self.results,
|
|
458
|
+
self.run_opts,
|
|
459
|
+
self.inputs,
|
|
460
|
+
ctx=ctx,
|
|
461
|
+
dry_run=self.dry_run)
|
|
462
|
+
for error in errors:
|
|
463
|
+
self.add_result(error)
|
|
464
|
+
self.inputs = sorted(list(set(inputs)))
|
|
465
|
+
self.debug(f'extracted {len(self.inputs)} inputs', sub='init')
|
|
466
|
+
self.run_opts = run_opts
|
|
467
|
+
|
|
468
|
+
def add_result(self, item, print=True, output=True, hooks=True, queue=True):
|
|
469
|
+
"""Add item to runner results.
|
|
470
|
+
|
|
471
|
+
Args:
|
|
472
|
+
item (OutputType): Item.
|
|
473
|
+
print (bool): Whether to print it or not.
|
|
474
|
+
output (bool): Whether to add it to the output or not.
|
|
475
|
+
hooks (bool): Whether to run hooks on the item.
|
|
476
|
+
queue (bool): Whether to queue the item for later processing.
|
|
477
|
+
"""
|
|
478
|
+
if item._uuid and item._uuid in self.uuids:
|
|
479
|
+
return
|
|
480
|
+
|
|
481
|
+
# Update context with runner info
|
|
482
|
+
ctx = item._context.copy()
|
|
483
|
+
item._context = self.context.copy()
|
|
484
|
+
item._context.update(ctx)
|
|
485
|
+
item._context['ancestor_id'] = ctx.get('ancestor_id') or self.ancestor_id
|
|
486
|
+
|
|
487
|
+
# Set uuid
|
|
488
|
+
if not item._uuid:
|
|
489
|
+
item._uuid = str(uuid.uuid4())
|
|
490
|
+
|
|
491
|
+
# Set source
|
|
492
|
+
if not item._source:
|
|
493
|
+
item._source = self.unique_name
|
|
494
|
+
|
|
495
|
+
# Check for state updates
|
|
496
|
+
if isinstance(item, State) and self.celery_result and item.task_id == self.celery_result.id:
|
|
497
|
+
self.debug(f'update runner state from remote state: {item.state}', sub='item')
|
|
498
|
+
if item.state in ['FAILURE', 'SUCCESS', 'REVOKED']:
|
|
499
|
+
self.started = True
|
|
500
|
+
self.done = True
|
|
501
|
+
self.progress = 100
|
|
502
|
+
self.end_time = datetime.fromtimestamp(time())
|
|
503
|
+
elif item.state in ['RUNNING']:
|
|
504
|
+
self.started = True
|
|
505
|
+
self.start_time = datetime.fromtimestamp(time())
|
|
506
|
+
self.end_time = None
|
|
507
|
+
self.last_updated_celery = item._timestamp
|
|
508
|
+
return
|
|
509
|
+
|
|
510
|
+
# If progress item, update runner progress
|
|
511
|
+
elif isinstance(item, Progress) and item._source == self.unique_name:
|
|
512
|
+
self.debug(f'update runner progress: {item.percent}', sub='item', verbose=True)
|
|
513
|
+
if not should_update(CONFIG.runners.progress_update_frequency, self.last_updated_progress, item._timestamp):
|
|
514
|
+
return
|
|
515
|
+
self.progress = item.percent
|
|
516
|
+
self.last_updated_progress = item._timestamp
|
|
517
|
+
|
|
518
|
+
# If info item and task_id is defined, update runner celery_ids
|
|
519
|
+
elif isinstance(item, Info) and item.task_id and item.task_id not in self.celery_ids:
|
|
520
|
+
self.debug(f'update runner celery_ids from remote: {item.task_id}', sub='item')
|
|
521
|
+
self.celery_ids.append(item.task_id)
|
|
522
|
+
|
|
523
|
+
# If output type, run on_item hooks
|
|
524
|
+
elif isinstance(item, tuple(OUTPUT_TYPES)) and hooks:
|
|
525
|
+
item = self.run_hooks('on_item', item, sub='item')
|
|
526
|
+
if not item:
|
|
527
|
+
return
|
|
528
|
+
|
|
529
|
+
# Add item to results
|
|
530
|
+
self.uuids.add(item._uuid)
|
|
531
|
+
self.results.append(item)
|
|
532
|
+
self.results_count += 1
|
|
533
|
+
if output and item._type not in ['stat', 'progress']:
|
|
534
|
+
self.output += repr(item) + '\n'
|
|
535
|
+
if print:
|
|
536
|
+
self._print_item(item)
|
|
537
|
+
if queue:
|
|
538
|
+
self.results_buffer.append(item)
|
|
539
|
+
|
|
540
|
+
def add_subtask(self, task_id, task_name, task_description):
|
|
541
|
+
"""Add a Celery subtask to the current runner for tracking purposes.
|
|
542
|
+
|
|
543
|
+
Args:
|
|
544
|
+
task_id (str): Celery task id.
|
|
545
|
+
task_name (str): Task name.
|
|
546
|
+
task_description (str): Task description.
|
|
547
|
+
"""
|
|
548
|
+
self.celery_ids.append(task_id)
|
|
549
|
+
self.celery_ids_map[task_id] = {
|
|
550
|
+
'id': task_id,
|
|
551
|
+
'name': task_name,
|
|
552
|
+
'full_name': task_name,
|
|
553
|
+
'descr': task_description,
|
|
554
|
+
'state': 'PENDING',
|
|
555
|
+
'count': 0,
|
|
556
|
+
'progress': 0
|
|
557
|
+
}
|
|
558
|
+
|
|
559
|
+
def _print_item(self, item, force=False):
|
|
560
|
+
"""Print an item and add it to the runner's output.
|
|
561
|
+
|
|
562
|
+
Args:
|
|
563
|
+
item (str | OutputType): Secator output type to print.
|
|
564
|
+
force (bool): Whether to force-print it.
|
|
565
|
+
"""
|
|
566
|
+
item_str = str(item)
|
|
567
|
+
|
|
568
|
+
# Item is an output type
|
|
569
|
+
if isinstance(item, OutputType):
|
|
570
|
+
_type = item._type
|
|
571
|
+
print_this_type = getattr(self, f'print_{_type}', True)
|
|
572
|
+
self.debug(item, lazy=lambda x: repr(x), sub='item', allow_no_process=False, verbose=print_this_type)
|
|
573
|
+
if not print_this_type:
|
|
574
|
+
return
|
|
575
|
+
|
|
576
|
+
if self.print_item or force:
|
|
577
|
+
item_out = sys.stdout
|
|
578
|
+
|
|
579
|
+
# JSON lines output
|
|
580
|
+
if self.print_json:
|
|
581
|
+
self._print(item, out=sys.stdout)
|
|
582
|
+
item_out = None # suppress item repr output to sdout
|
|
583
|
+
|
|
584
|
+
# Raw output
|
|
585
|
+
elif self.print_raw:
|
|
586
|
+
item_out = sys.stderr if self.piped_output else None
|
|
587
|
+
|
|
588
|
+
# Format raw output with custom item fields
|
|
589
|
+
if self.print_fmt:
|
|
590
|
+
try:
|
|
591
|
+
item_str = item.format(**self.print_fmt)
|
|
592
|
+
except KeyError:
|
|
593
|
+
item_str = ''
|
|
594
|
+
|
|
595
|
+
# raw output is used to pipe, we should only pipe the first output type of a Runner.
|
|
596
|
+
if not isinstance(item, self.output_types[0]):
|
|
597
|
+
item_str = ''
|
|
598
|
+
|
|
599
|
+
if item_str:
|
|
600
|
+
self._print(item_str, out=sys.stdout)
|
|
601
|
+
|
|
602
|
+
# Repr output
|
|
603
|
+
if item_out:
|
|
604
|
+
item_repr = repr(item)
|
|
605
|
+
if self.print_remote_info and item._source:
|
|
606
|
+
item_repr += rich_to_ansi(rf' \[[dim]{item._source}[/]]')
|
|
607
|
+
# item_repr += f' ({self.__class__.__name__}) ({item._uuid}) ({item._context.get("ancestor_id")})' # for debugging
|
|
608
|
+
self._print(item_repr, out=item_out)
|
|
609
|
+
|
|
610
|
+
# Item is a line
|
|
611
|
+
elif isinstance(item, str):
|
|
612
|
+
self.debug(item, sub='line.print', allow_no_process=False, verbose=True)
|
|
613
|
+
if self.print_line or force:
|
|
614
|
+
self._print(item, out=sys.stderr, end='\n', rich=False)
|
|
615
|
+
|
|
616
|
+
def debug(self, *args, **kwargs):
|
|
617
|
+
"""Print debug with runner class name, only if self.no_process is True.
|
|
618
|
+
|
|
619
|
+
Args:
|
|
620
|
+
args (list): List of debug args.
|
|
621
|
+
kwargs (dict): Dict of debug kwargs.
|
|
622
|
+
"""
|
|
623
|
+
allow_no_process = kwargs.pop('allow_no_process', True)
|
|
624
|
+
if self.no_process and not allow_no_process:
|
|
625
|
+
return
|
|
626
|
+
sub = kwargs.get('sub')
|
|
627
|
+
new_sub = f'runner.{self.__class__.__name__}'
|
|
628
|
+
if sub:
|
|
629
|
+
new_sub += f'.{sub}'
|
|
630
|
+
kwargs['sub'] = new_sub
|
|
631
|
+
if self.id and not self.sync:
|
|
632
|
+
kwargs['id'] = self.id
|
|
633
|
+
debug(*args, **kwargs)
|
|
634
|
+
|
|
635
|
+
def mark_duplicates(self):
|
|
636
|
+
"""Check for duplicates and mark items as duplicates."""
|
|
637
|
+
if not self.enable_duplicate_check:
|
|
638
|
+
return
|
|
639
|
+
self.debug('running duplicate check', sub='end')
|
|
640
|
+
# dupe_count = 0
|
|
641
|
+
import concurrent.futures
|
|
642
|
+
executor = concurrent.futures.ThreadPoolExecutor(max_workers=100)
|
|
643
|
+
for item in self.results.copy():
|
|
644
|
+
executor.submit(self.check_duplicate, item)
|
|
645
|
+
executor.shutdown(wait=True)
|
|
646
|
+
# duplicates = [repr(i) for i in self.results if i._duplicate]
|
|
647
|
+
# if duplicates:
|
|
648
|
+
# duplicates_str = '\n\t'.join(duplicates)
|
|
649
|
+
# self.debug(f'Duplicates ({dupe_count}):\n\t{duplicates_str}', sub='duplicates', verbose=True)
|
|
650
|
+
# self.debug(f'duplicate check completed: {dupe_count} found', sub='duplicates')
|
|
651
|
+
|
|
652
|
+
def check_duplicate(self, item):
|
|
653
|
+
"""Check if an item is a duplicate in the list of results and mark it like so.
|
|
654
|
+
|
|
655
|
+
Args:
|
|
656
|
+
item (OutputType): Secator output type.
|
|
657
|
+
"""
|
|
658
|
+
self.debug('running duplicate check for item', obj=item.toDict(), obj_breaklines=True, sub='item.duplicate', verbose=True) # noqa: E501
|
|
659
|
+
others = [f for f in self.results if f == item and f._uuid != item._uuid]
|
|
660
|
+
if others:
|
|
661
|
+
main = max(item, *others)
|
|
662
|
+
dupes = [f for f in others if f._uuid != main._uuid]
|
|
663
|
+
main._duplicate = False
|
|
664
|
+
main._related.extend([dupe._uuid for dupe in dupes])
|
|
665
|
+
main._related = list(dict.fromkeys(main._related))
|
|
666
|
+
if main._uuid != item._uuid:
|
|
667
|
+
self.debug(f'found {len(others)} duplicates for', obj=item.toDict(), obj_breaklines=True, sub='item.duplicate', verbose=True) # noqa: E501
|
|
668
|
+
item._duplicate = True
|
|
669
|
+
item = self.run_hooks('on_item', item, sub='item.duplicate')
|
|
670
|
+
if item._uuid not in main._related:
|
|
671
|
+
main._related.append(item._uuid)
|
|
672
|
+
main = self.run_hooks('on_duplicate', main, sub='item.duplicate')
|
|
673
|
+
item = self.run_hooks('on_duplicate', item, sub='item.duplicate')
|
|
674
|
+
|
|
675
|
+
for dupe in dupes:
|
|
676
|
+
if not dupe._duplicate:
|
|
677
|
+
self.debug(
|
|
678
|
+
'found new duplicate', obj=dupe.toDict(), obj_breaklines=True,
|
|
679
|
+
sub='item.duplicate', verbose=True)
|
|
680
|
+
dupe._duplicate = True
|
|
681
|
+
dupe = self.run_hooks('on_duplicate', dupe, sub='item.duplicate')
|
|
682
|
+
|
|
683
|
+
def yielder(self):
|
|
684
|
+
"""Base yielder implementation.
|
|
685
|
+
|
|
686
|
+
This should be overridden by derived classes if they need custom behavior.
|
|
687
|
+
Otherwise, they can implement build_celery_workflow() and get standard behavior.
|
|
688
|
+
|
|
689
|
+
Yields:
|
|
690
|
+
secator.output_types.OutputType: Secator output type.
|
|
691
|
+
"""
|
|
692
|
+
# If existing celery result, yield from it
|
|
693
|
+
if self.celery_result:
|
|
694
|
+
yield from CeleryData.iter_results(
|
|
695
|
+
self.celery_result,
|
|
696
|
+
ids_map=self.celery_ids_map,
|
|
697
|
+
description=True,
|
|
698
|
+
revoked=self.revoked,
|
|
699
|
+
print_remote_info=self.print_remote_info,
|
|
700
|
+
print_remote_title=f'[bold gold3]{self.__class__.__name__.capitalize()}[/] [bold magenta]{self.name}[/] results'
|
|
701
|
+
)
|
|
702
|
+
return
|
|
703
|
+
|
|
704
|
+
# Build Celery workflow
|
|
705
|
+
self.debug('building celery workflow', sub='start')
|
|
706
|
+
workflow = self.build_celery_workflow()
|
|
707
|
+
self.print_target = False
|
|
708
|
+
|
|
709
|
+
# Run workflow and get results
|
|
710
|
+
if self.sync:
|
|
711
|
+
self.print_item = False
|
|
712
|
+
self.debug('running workflow in sync mode', sub='start')
|
|
713
|
+
results = workflow.apply().get()
|
|
714
|
+
else:
|
|
715
|
+
self.debug('running workflow in async mode', sub='start')
|
|
716
|
+
self.celery_result = workflow()
|
|
717
|
+
self.celery_ids.append(str(self.celery_result.id))
|
|
718
|
+
yield Info(
|
|
719
|
+
message=f'Celery task created: {self.celery_result.id}',
|
|
720
|
+
task_id=self.celery_result.id
|
|
721
|
+
)
|
|
722
|
+
if self.no_poll:
|
|
723
|
+
self.enable_reports = False
|
|
724
|
+
self.no_process = True
|
|
725
|
+
return
|
|
726
|
+
results = CeleryData.iter_results(
|
|
727
|
+
self.celery_result,
|
|
728
|
+
ids_map=self.celery_ids_map,
|
|
729
|
+
description=True,
|
|
730
|
+
print_remote_info=self.print_remote_info,
|
|
731
|
+
print_remote_title=f'[bold gold3]{self.__class__.__name__.capitalize()}[/] [bold magenta]{self.name}[/] results'
|
|
732
|
+
)
|
|
733
|
+
|
|
734
|
+
# Yield results
|
|
735
|
+
yield from results
|
|
736
|
+
|
|
737
|
+
def build_celery_workflow(self):
|
|
738
|
+
"""Build Celery workflow.
|
|
739
|
+
|
|
740
|
+
This should be implemented by derived classes.
|
|
741
|
+
|
|
742
|
+
Returns:
|
|
743
|
+
celery.Signature: Celery task signature.
|
|
744
|
+
"""
|
|
745
|
+
raise NotImplementedError("Derived classes must implement build_celery_workflow()")
|
|
746
|
+
|
|
747
|
+
def toDict(self):
|
|
748
|
+
"""Dict representation of the runner."""
|
|
749
|
+
data = {
|
|
750
|
+
'name': self.name,
|
|
751
|
+
'status': self.status,
|
|
752
|
+
'targets': self.inputs,
|
|
753
|
+
'start_time': self.start_time,
|
|
754
|
+
'end_time': self.end_time,
|
|
755
|
+
'elapsed': self.elapsed.total_seconds(),
|
|
756
|
+
'elapsed_human': self.elapsed_human,
|
|
757
|
+
'run_opts': self.resolved_opts,
|
|
758
|
+
}
|
|
759
|
+
data.update({
|
|
760
|
+
'config': self.config.toDict(),
|
|
761
|
+
'opts': self.config.supported_opts,
|
|
762
|
+
'has_parent': self.has_parent,
|
|
763
|
+
'has_children': self.has_children,
|
|
764
|
+
'chunk': self.chunk,
|
|
765
|
+
'chunk_count': self.chunk_count,
|
|
766
|
+
'sync': self.sync,
|
|
767
|
+
'done': self.done,
|
|
768
|
+
'output': self.output,
|
|
769
|
+
'progress': self.progress,
|
|
770
|
+
'last_updated_db': self.last_updated_db,
|
|
771
|
+
'context': self.context,
|
|
772
|
+
'errors': [e.toDict() for e in self.errors],
|
|
773
|
+
'warnings': [w.toDict() for w in self.warnings],
|
|
774
|
+
})
|
|
775
|
+
return data
|
|
776
|
+
|
|
777
|
+
def run_hooks(self, hook_type, *args, sub='hooks'):
|
|
778
|
+
""""Run hooks of a certain type.
|
|
779
|
+
|
|
780
|
+
Args:
|
|
781
|
+
hook_type (str): Hook type.
|
|
782
|
+
args (list): List of arguments to pass to the hook.
|
|
783
|
+
sub (str): Debug id.
|
|
784
|
+
|
|
785
|
+
Returns:
|
|
786
|
+
any: Hook return value.
|
|
787
|
+
"""
|
|
788
|
+
result = args[0] if len(args) > 0 else None
|
|
789
|
+
if self.no_process:
|
|
790
|
+
self.debug('hook skipped (no_process)', obj={'name': hook_type}, sub=sub, verbose=True) # noqa: E501
|
|
791
|
+
return result
|
|
792
|
+
if self.dry_run:
|
|
793
|
+
self.debug('hook skipped (dry_run)', obj={'name': hook_type}, sub=sub, verbose=True) # noqa: E501
|
|
794
|
+
return result
|
|
795
|
+
for hook in self.resolved_hooks[hook_type]:
|
|
796
|
+
fun = self.get_func_path(hook)
|
|
797
|
+
try:
|
|
798
|
+
if hook_type == 'on_interval' and not should_update(CONFIG.runners.backend_update_frequency, self.last_updated_db):
|
|
799
|
+
self.debug('hook skipped (backend update frequency)', obj={'name': hook_type, 'fun': fun}, sub=sub, verbose=True) # noqa: E501
|
|
800
|
+
return
|
|
801
|
+
if not self.enable_hooks or self.no_process:
|
|
802
|
+
self.debug('hook skipped (disabled hooks or no_process)', obj={'name': hook_type, 'fun': fun}, sub=sub, verbose=True) # noqa: E501
|
|
803
|
+
continue
|
|
804
|
+
result = hook(self, *args)
|
|
805
|
+
self.debug('hook success', obj={'name': hook_type, 'fun': fun}, sub=sub, verbose='item' in sub) # noqa: E501
|
|
806
|
+
except Exception as e:
|
|
807
|
+
self.debug('hook failed', obj={'name': hook_type, 'fun': fun}, sub=sub) # noqa: E501
|
|
808
|
+
error = Error.from_exception(e, message=f'Hook "{fun}" execution failed')
|
|
809
|
+
if self.raise_on_error:
|
|
810
|
+
raise e
|
|
811
|
+
self.add_result(error, hooks=False)
|
|
812
|
+
return result
|
|
813
|
+
|
|
814
|
+
def run_validators(self, validator_type, *args, error=True, sub='validators'):
|
|
815
|
+
"""Run validators of a certain type.
|
|
816
|
+
|
|
817
|
+
Args:
|
|
818
|
+
validator_type (str): Validator type. E.g: on_start.
|
|
819
|
+
args (list): List of arguments to pass to the validator.
|
|
820
|
+
error (bool): Whether to add an error to runner results if the validator failed.
|
|
821
|
+
sub (str): Debug id.
|
|
822
|
+
|
|
823
|
+
Returns:
|
|
824
|
+
bool: Validator return value.
|
|
825
|
+
"""
|
|
826
|
+
if self.no_process:
|
|
827
|
+
self.debug('validator skipped (no_process)', obj={'name': validator_type}, sub=sub, verbose=True) # noqa: E501
|
|
828
|
+
return True
|
|
829
|
+
if self.dry_run:
|
|
830
|
+
self.debug('validator skipped (dry_run)', obj={'name': validator_type}, sub=sub, verbose=True) # noqa: E501
|
|
831
|
+
return True
|
|
832
|
+
if not self.enable_validators:
|
|
833
|
+
self.debug('validator skipped (disabled validators)', obj={'name': validator_type}, sub=sub, verbose=True) # noqa: E501
|
|
834
|
+
return True
|
|
835
|
+
for validator in self.resolved_validators[validator_type]:
|
|
836
|
+
fun = self.get_func_path(validator)
|
|
837
|
+
if not validator(self, *args):
|
|
838
|
+
self.debug('validator failed', obj={'name': validator_type, 'fun': fun}, sub=sub) # noqa: E501
|
|
839
|
+
doc = validator.__doc__
|
|
840
|
+
if error:
|
|
841
|
+
message = 'Validator failed'
|
|
842
|
+
if doc:
|
|
843
|
+
message += f': {doc}'
|
|
844
|
+
err = Error(message=message)
|
|
845
|
+
self.add_result(err, print=False)
|
|
846
|
+
return False
|
|
847
|
+
self.debug('validator success', obj={'name': validator_type, 'fun': fun}, sub=sub) # noqa: E501
|
|
848
|
+
return True
|
|
849
|
+
|
|
850
|
+
def register_hooks(self, hooks):
|
|
851
|
+
"""Register hooks.
|
|
852
|
+
|
|
853
|
+
Args:
|
|
854
|
+
hooks (dict[str, List[Callable]]): List of hooks to register.
|
|
855
|
+
"""
|
|
856
|
+
for key in self.resolved_hooks:
|
|
857
|
+
# Register class + derived class hooks
|
|
858
|
+
class_hook = getattr(self, key, None)
|
|
859
|
+
if class_hook:
|
|
860
|
+
fun = self.get_func_path(class_hook)
|
|
861
|
+
self.debug('hook registered', obj={'name': key, 'fun': fun}, sub='init')
|
|
862
|
+
self.resolved_hooks[key].append(class_hook)
|
|
863
|
+
|
|
864
|
+
# Register user hooks
|
|
865
|
+
user_hooks = hooks.get(self.__class__, {}).get(key, [])
|
|
866
|
+
user_hooks.extend(hooks.get(key, []))
|
|
867
|
+
for hook in user_hooks:
|
|
868
|
+
fun = self.get_func_path(hook)
|
|
869
|
+
self.debug('hook registered', obj={'name': key, 'fun': fun}, sub='init')
|
|
870
|
+
self.resolved_hooks[key].extend(user_hooks)
|
|
871
|
+
|
|
872
|
+
def register_validators(self, validators):
|
|
873
|
+
"""Register validators.
|
|
874
|
+
|
|
875
|
+
Args:
|
|
876
|
+
validators (dict[str, List[Callable]]): Validators to register.
|
|
877
|
+
"""
|
|
878
|
+
# Register class + derived class hooks
|
|
879
|
+
for key in self.resolved_validators:
|
|
880
|
+
class_validator = getattr(self, key, None)
|
|
881
|
+
if class_validator:
|
|
882
|
+
fun = self.get_func_path(class_validator)
|
|
883
|
+
self.resolved_validators[key].append(class_validator)
|
|
884
|
+
self.debug('validator registered', obj={'name': key, 'fun': fun}, sub='init')
|
|
885
|
+
|
|
886
|
+
# Register user hooks
|
|
887
|
+
user_validators = validators.get(key, [])
|
|
888
|
+
for validator in user_validators:
|
|
889
|
+
fun = self.get_func_path(validator)
|
|
890
|
+
self.debug('validator registered', obj={'name': key, 'fun': fun}, sub='init')
|
|
891
|
+
self.resolved_validators[key].extend(user_validators)
|
|
892
|
+
|
|
893
|
+
def mark_started(self):
|
|
894
|
+
"""Mark runner as started."""
|
|
895
|
+
if self.started:
|
|
896
|
+
return
|
|
897
|
+
self.started = True
|
|
898
|
+
self.start_time = datetime.fromtimestamp(time())
|
|
899
|
+
self.debug(f'started (sync: {self.sync}, hooks: {self.enable_hooks}), chunk: {self.chunk}, chunk_count: {self.chunk_count}', sub='start') # noqa: E501
|
|
900
|
+
self.log_start()
|
|
901
|
+
self.run_hooks('on_start', sub='start')
|
|
902
|
+
|
|
903
|
+
def mark_completed(self):
|
|
904
|
+
"""Mark runner as completed."""
|
|
905
|
+
if self.done:
|
|
906
|
+
return
|
|
907
|
+
self.started = True
|
|
908
|
+
self.done = True
|
|
909
|
+
self.progress = 100
|
|
910
|
+
self.end_time = datetime.fromtimestamp(time())
|
|
911
|
+
self.debug(f'completed (status: {self.status}, sync: {self.sync}, reports: {self.enable_reports}, hooks: {self.enable_hooks})', sub='end') # noqa: E501
|
|
912
|
+
self.mark_duplicates()
|
|
913
|
+
self.run_hooks('on_end', sub='end')
|
|
914
|
+
self.export_profiler()
|
|
915
|
+
self.log_results()
|
|
916
|
+
|
|
917
|
+
def log_start(self):
|
|
918
|
+
"""Log runner start."""
|
|
919
|
+
if not self.print_start:
|
|
920
|
+
return
|
|
921
|
+
if self.has_parent:
|
|
922
|
+
return
|
|
923
|
+
if self.config.type != 'task':
|
|
924
|
+
tree = textwrap.indent(build_runner_tree(self.config).render_tree(), ' ')
|
|
925
|
+
info = Info(message=f'{self.config.type.capitalize()} built:\n{tree}', _source=self.unique_name)
|
|
926
|
+
self._print(info, rich=True)
|
|
927
|
+
remote_str = 'started' if self.sync else 'started in worker'
|
|
928
|
+
msg = f'{self.config.type.capitalize()} {format_runner_name(self)}'
|
|
929
|
+
if self.description:
|
|
930
|
+
msg += f' ([dim]{self.description}[/])'
|
|
931
|
+
info = Info(message=f'{msg} {remote_str}', _source=self.unique_name)
|
|
932
|
+
self._print(info, rich=True)
|
|
933
|
+
|
|
934
|
+
def log_results(self):
|
|
935
|
+
"""Log runner results."""
|
|
936
|
+
if not self.print_end:
|
|
937
|
+
return
|
|
938
|
+
if self.has_parent:
|
|
939
|
+
return
|
|
940
|
+
info = Info(
|
|
941
|
+
message=(
|
|
942
|
+
f'{self.config.type.capitalize()} {format_runner_name(self)} finished with status '
|
|
943
|
+
f'[bold {STATE_COLORS[self.status]}]{self.status}[/] and found '
|
|
944
|
+
f'[bold]{len(self.findings)}[/] findings'
|
|
945
|
+
)
|
|
946
|
+
)
|
|
947
|
+
self._print(info, rich=True)
|
|
948
|
+
|
|
949
|
+
def export_reports(self):
|
|
950
|
+
"""Export reports."""
|
|
951
|
+
if self.enable_reports and self.exporters and not self.no_process and not self.dry_run:
|
|
952
|
+
if self.print_end:
|
|
953
|
+
exporters_str = ', '.join([f'[bold cyan]{e.__name__.replace("Exporter", "").lower()}[/]' for e in self.exporters])
|
|
954
|
+
self._print(Info(message=f'Exporting results with exporters: {exporters_str}'), rich=True)
|
|
955
|
+
report = Report(self, exporters=self.exporters)
|
|
956
|
+
report.build()
|
|
957
|
+
report.send()
|
|
958
|
+
self.report = report
|
|
959
|
+
|
|
960
|
+
def export_profiler(self):
|
|
961
|
+
"""Export profiler."""
|
|
962
|
+
if self.enable_pyinstrument:
|
|
963
|
+
self.debug('stopping profiler', sub='end')
|
|
964
|
+
self.profiler.stop()
|
|
965
|
+
profile_path = Path(self.reports_folder) / f'{self.unique_name}_profile.html'
|
|
966
|
+
with profile_path.open('w', encoding='utf-8') as f_html:
|
|
967
|
+
f_html.write(self.profiler.output_html())
|
|
968
|
+
self._print_item(Info(message=f'Wrote profile to {str(profile_path)}'), force=True)
|
|
969
|
+
|
|
970
|
+
def stop_celery_tasks(self):
|
|
971
|
+
"""Stop all tasks running in Celery worker."""
|
|
972
|
+
from secator.celery import revoke_task
|
|
973
|
+
for task_id in self.celery_ids:
|
|
974
|
+
name = self.celery_ids_map.get(task_id, {}).get('full_name')
|
|
975
|
+
revoke_task(task_id, name)
|
|
976
|
+
|
|
977
|
+
def _convert_item_schema(self, item):
|
|
978
|
+
"""Convert dict item to a secator output type.
|
|
979
|
+
|
|
980
|
+
Args:
|
|
981
|
+
item (dict): Dict item.
|
|
982
|
+
|
|
983
|
+
Returns:
|
|
984
|
+
OutputType: Loaded item.
|
|
985
|
+
"""
|
|
986
|
+
# Skip if already converted
|
|
987
|
+
if isinstance(item, OutputType):
|
|
988
|
+
return item
|
|
989
|
+
|
|
990
|
+
# Init the new item and the list of output types to load from
|
|
991
|
+
new_item = None
|
|
992
|
+
output_types = getattr(self, 'output_types', [])
|
|
993
|
+
self.debug(f'input item: {item}', sub='item.convert', verbose=True)
|
|
994
|
+
|
|
995
|
+
# Use a function to pick proper output types
|
|
996
|
+
output_discriminator = getattr(self, 'output_discriminator', None)
|
|
997
|
+
if output_discriminator:
|
|
998
|
+
result = output_discriminator(item)
|
|
999
|
+
if result:
|
|
1000
|
+
self.debug('discriminated output type with output_discriminator', sub='item.convert', verbose=True)
|
|
1001
|
+
output_types = [result]
|
|
1002
|
+
else:
|
|
1003
|
+
output_types = []
|
|
1004
|
+
|
|
1005
|
+
# Use the _type key to pick proper output type
|
|
1006
|
+
elif '_type' in item:
|
|
1007
|
+
otypes = [o for o in output_types if o.get_name() == item['_type']]
|
|
1008
|
+
if otypes:
|
|
1009
|
+
output_types = [otypes[0]]
|
|
1010
|
+
self.debug('discriminated output type with _type key', sub='item.convert', verbose=True)
|
|
1011
|
+
|
|
1012
|
+
# Load item using picked output types
|
|
1013
|
+
self.debug(f'output types to try: {[str(o) for o in output_types]}', sub='item.convert', verbose=True)
|
|
1014
|
+
for klass in output_types:
|
|
1015
|
+
self.debug(f'loading item as {str(klass)}', sub='item.convert', verbose=True)
|
|
1016
|
+
output_map = getattr(self, 'output_map', {}).get(klass, {})
|
|
1017
|
+
try:
|
|
1018
|
+
new_item = klass.load(item, output_map)
|
|
1019
|
+
self.debug(f'successfully loaded item as {str(klass)}', sub='item.convert', verbose=True)
|
|
1020
|
+
break
|
|
1021
|
+
except (TypeError, KeyError) as e:
|
|
1022
|
+
self.debug(
|
|
1023
|
+
f'failed loading item as {str(klass)}: {type(e).__name__}: {str(e)}.',
|
|
1024
|
+
sub='item.convert', verbose=True)
|
|
1025
|
+
# error = Error.from_exception(e)
|
|
1026
|
+
# self.debug(repr(error), sub='debug.klass.load')
|
|
1027
|
+
continue
|
|
1028
|
+
|
|
1029
|
+
if not new_item:
|
|
1030
|
+
new_item = Warning(message=f'Failed to load item as output type:\n {item}')
|
|
1031
|
+
|
|
1032
|
+
self.debug(f'output item: {new_item.toDict()}', sub='item.convert', verbose=True)
|
|
1033
|
+
|
|
1034
|
+
return new_item
|
|
1035
|
+
|
|
1036
|
+
def _print(self, data, color=None, out=sys.stderr, rich=False, end='\n'):
|
|
1037
|
+
"""Print function.
|
|
1038
|
+
|
|
1039
|
+
Args:
|
|
1040
|
+
data (str or dict): Input data.
|
|
1041
|
+
color (str, Optional): Rich color.
|
|
1042
|
+
out (str, Optional): Output pipe (sys.stderr, sys.stdout, ...)
|
|
1043
|
+
rich (bool, Optional): Force rich output.
|
|
1044
|
+
end (str, Optional): End of line.
|
|
1045
|
+
add_to_output (bool, Optional): Whether to add the item to runner output.
|
|
1046
|
+
"""
|
|
1047
|
+
if rich:
|
|
1048
|
+
_console = console_stdout if out == sys.stdout else console
|
|
1049
|
+
_console.print(data, highlight=False, style=color, soft_wrap=True, end=end)
|
|
1050
|
+
else:
|
|
1051
|
+
if isinstance(data, (OutputType, dict)):
|
|
1052
|
+
if getattr(data, 'toDict', None):
|
|
1053
|
+
data = data.toDict()
|
|
1054
|
+
data = json.dumps(data, default=str)
|
|
1055
|
+
print(data, file=out)
|
|
1056
|
+
|
|
1057
|
+
def _get_findings_count(self):
|
|
1058
|
+
"""Get finding count.
|
|
1059
|
+
|
|
1060
|
+
Returns:
|
|
1061
|
+
dict[str,int]: Dict of finding type to count.
|
|
1062
|
+
"""
|
|
1063
|
+
count_map = {}
|
|
1064
|
+
for output_type in FINDING_TYPES:
|
|
1065
|
+
name = output_type.get_name()
|
|
1066
|
+
count = len([r for r in self.results if isinstance(r, output_type)])
|
|
1067
|
+
if count > 0:
|
|
1068
|
+
count_map[name] = count
|
|
1069
|
+
return count_map
|
|
1070
|
+
|
|
1071
|
+
def _process_item(self, item, print=True, output=True):
|
|
1072
|
+
"""Process an item yielded by the derived runner.
|
|
1073
|
+
|
|
1074
|
+
Args:
|
|
1075
|
+
item (dict | str): Input item.
|
|
1076
|
+
print (bool): Print item in console.
|
|
1077
|
+
output (bool): Add to runner output.
|
|
1078
|
+
|
|
1079
|
+
Yields:
|
|
1080
|
+
OutputType: Output type.
|
|
1081
|
+
"""
|
|
1082
|
+
# Item is a string, just print it
|
|
1083
|
+
if isinstance(item, str):
|
|
1084
|
+
self.output += item + '\n' if output else ''
|
|
1085
|
+
self._print_item(item) if item and print else ''
|
|
1086
|
+
return
|
|
1087
|
+
|
|
1088
|
+
# Abort further processing if no_process is set
|
|
1089
|
+
if self.no_process:
|
|
1090
|
+
return
|
|
1091
|
+
|
|
1092
|
+
# Run item validators
|
|
1093
|
+
if not self.run_validators('validate_item', item, error=False, sub='item'):
|
|
1094
|
+
return
|
|
1095
|
+
|
|
1096
|
+
# Convert output dict to another schema
|
|
1097
|
+
if isinstance(item, dict):
|
|
1098
|
+
item = self.run_hooks('on_item_pre_convert', item, sub='item')
|
|
1099
|
+
if not item:
|
|
1100
|
+
return
|
|
1101
|
+
item = self._convert_item_schema(item)
|
|
1102
|
+
|
|
1103
|
+
# Add item to results
|
|
1104
|
+
self.add_result(item, print=print, queue=False)
|
|
1105
|
+
|
|
1106
|
+
# Yield item
|
|
1107
|
+
yield item
|
|
1108
|
+
|
|
1109
|
+
@staticmethod
|
|
1110
|
+
def _validate_inputs(self, inputs):
|
|
1111
|
+
"""Input type is not supported by runner"""
|
|
1112
|
+
supported_types = ', '.join(self.config.input_types) if self.config.input_types else 'any'
|
|
1113
|
+
for _input in inputs:
|
|
1114
|
+
input_type = autodetect_type(_input)
|
|
1115
|
+
if self.config.input_types and input_type not in self.config.input_types:
|
|
1116
|
+
message = (
|
|
1117
|
+
f'Validator failed: target [bold blue]{_input}[/] of type [bold green]{input_type}[/] '
|
|
1118
|
+
f'is not supported by [bold gold3]{self.unique_name}[/]. Supported types: [bold green]{supported_types}[/]'
|
|
1119
|
+
)
|
|
1120
|
+
message += '. Removing from current inputs (runner context)'
|
|
1121
|
+
warning = Warning(message=message)
|
|
1122
|
+
self.inputs.remove(_input)
|
|
1123
|
+
self.add_result(warning)
|
|
1124
|
+
return True
|
|
1125
|
+
|
|
1126
|
+
@staticmethod
|
|
1127
|
+
def resolve_exporters(exporters):
|
|
1128
|
+
"""Resolve exporters from output options.
|
|
1129
|
+
|
|
1130
|
+
Args:
|
|
1131
|
+
exporters (list[str]): List of exporters to resolve.
|
|
1132
|
+
|
|
1133
|
+
Returns:
|
|
1134
|
+
list: List of exporter classes.
|
|
1135
|
+
"""
|
|
1136
|
+
if not exporters or exporters in ['false', 'False']:
|
|
1137
|
+
return []
|
|
1138
|
+
if isinstance(exporters, str):
|
|
1139
|
+
exporters = exporters.split(',')
|
|
1140
|
+
classes = [
|
|
1141
|
+
import_dynamic('secator.exporters', f'{o.capitalize()}Exporter')
|
|
1142
|
+
for o in exporters
|
|
1143
|
+
if o
|
|
1144
|
+
]
|
|
1145
|
+
return [cls for cls in classes if cls]
|
|
1146
|
+
|
|
1147
|
+
def resolve_profiles(self, profiles):
|
|
1148
|
+
"""Resolve profiles and update run options.
|
|
1149
|
+
|
|
1150
|
+
Args:
|
|
1151
|
+
profiles (list[str]): List of profile names to resolve.
|
|
1152
|
+
|
|
1153
|
+
Returns:
|
|
1154
|
+
list: List of profiles.
|
|
1155
|
+
"""
|
|
1156
|
+
# Return if profiles are disabled
|
|
1157
|
+
if not self.enable_profiles:
|
|
1158
|
+
return []
|
|
1159
|
+
|
|
1160
|
+
# Split profiles if comma separated
|
|
1161
|
+
if isinstance(profiles, str):
|
|
1162
|
+
profiles = profiles.split(',')
|
|
1163
|
+
|
|
1164
|
+
# Add default profiles
|
|
1165
|
+
default_profiles = CONFIG.profiles.defaults
|
|
1166
|
+
for p in default_profiles:
|
|
1167
|
+
if p in profiles:
|
|
1168
|
+
continue
|
|
1169
|
+
profiles.append(p)
|
|
1170
|
+
|
|
1171
|
+
# Abort if no profiles
|
|
1172
|
+
if not profiles:
|
|
1173
|
+
return []
|
|
1174
|
+
|
|
1175
|
+
# Get profile configs
|
|
1176
|
+
templates = []
|
|
1177
|
+
profile_configs = get_configs_by_type('profile')
|
|
1178
|
+
for pname in profiles:
|
|
1179
|
+
matches = [p for p in profile_configs if p.name == pname]
|
|
1180
|
+
if not matches:
|
|
1181
|
+
self._print(Warning(message=f'Profile "{pname}" was not found. Run [bold green]secator profiles list[/] to see available profiles.'), rich=True) # noqa: E501
|
|
1182
|
+
else:
|
|
1183
|
+
templates.append(matches[0])
|
|
1184
|
+
|
|
1185
|
+
if not templates:
|
|
1186
|
+
self.debug('no profiles loaded', sub='init')
|
|
1187
|
+
return
|
|
1188
|
+
|
|
1189
|
+
# Put enforced profiles last
|
|
1190
|
+
enforced_templates = [p for p in templates if p.enforce]
|
|
1191
|
+
non_enforced_templates = [p for p in templates if not p.enforce]
|
|
1192
|
+
templates = non_enforced_templates + enforced_templates
|
|
1193
|
+
profile_opts = {}
|
|
1194
|
+
for profile in templates:
|
|
1195
|
+
self.debug(f'profile {profile.name} opts (enforced: {profile.enforce}): {profile.opts}', sub='init')
|
|
1196
|
+
enforced = profile.enforce or False
|
|
1197
|
+
description = profile.description or ''
|
|
1198
|
+
if enforced:
|
|
1199
|
+
profile_opts.update(profile.opts)
|
|
1200
|
+
else:
|
|
1201
|
+
profile_opts.update({k: self.run_opts.get(k) or v for k, v in profile.opts.items()})
|
|
1202
|
+
if self.print_profiles:
|
|
1203
|
+
msg = f'Loaded profile [bold pink3]{profile.name}[/]'
|
|
1204
|
+
if description:
|
|
1205
|
+
msg += f' ([dim]{description}[/])'
|
|
1206
|
+
if enforced:
|
|
1207
|
+
msg += ' [bold red](enforced)[/]'
|
|
1208
|
+
profile_opts_str = ", ".join([f'[bold yellow3]{k}[/]=[dim yellow3]{v}[/]' for k, v in profile.opts.items()])
|
|
1209
|
+
msg += rf' \[[dim]{profile_opts_str}[/]]'
|
|
1210
|
+
self._print(Info(message=msg), rich=True)
|
|
1211
|
+
if profile_opts:
|
|
1212
|
+
self.run_opts.update(profile_opts)
|
|
1213
|
+
return templates
|
|
1214
|
+
|
|
1215
|
+
@classmethod
|
|
1216
|
+
def get_func_path(cls, func):
|
|
1217
|
+
"""Get the full symbolic path of a function or method, including staticmethods, using function and method
|
|
1218
|
+
attributes.
|
|
1219
|
+
|
|
1220
|
+
Args:
|
|
1221
|
+
func (function, method, or staticmethod): A function or method object.
|
|
1222
|
+
"""
|
|
1223
|
+
if hasattr(func, '__self__'):
|
|
1224
|
+
if func.__self__ is not None:
|
|
1225
|
+
# It's a method bound to an instance
|
|
1226
|
+
class_name = func.__self__.__class__.__name__
|
|
1227
|
+
return f"{func.__module__}.{class_name}.{func.__name__}"
|
|
1228
|
+
else:
|
|
1229
|
+
# It's a method bound to a class (class method)
|
|
1230
|
+
class_name = func.__qualname__.rsplit('.', 1)[0]
|
|
1231
|
+
return f"{func.__module__}.{class_name}.{func.__name__}"
|
|
1232
|
+
else:
|
|
1233
|
+
# Handle static and regular functions
|
|
1234
|
+
if '.' in func.__qualname__:
|
|
1235
|
+
# Static method or a function defined inside a class
|
|
1236
|
+
class_name, func_name = func.__qualname__.rsplit('.', 1)
|
|
1237
|
+
return f"{func.__module__}.{class_name}.{func_name}"
|
|
1238
|
+
else:
|
|
1239
|
+
# Regular function not attached to a class
|
|
1240
|
+
return f"{func.__module__}.{func.__name__}"
|