secator 0.5.2__py3-none-any.whl → 0.7.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of secator might be problematic. Click here for more details.
- secator/celery.py +160 -185
- secator/celery_utils.py +268 -0
- secator/cli.py +327 -106
- secator/config.py +27 -11
- secator/configs/workflows/host_recon.yaml +5 -3
- secator/configs/workflows/port_scan.yaml +7 -3
- secator/configs/workflows/url_bypass.yaml +10 -0
- secator/configs/workflows/url_vuln.yaml +1 -1
- secator/decorators.py +169 -92
- secator/definitions.py +10 -3
- secator/exporters/__init__.py +7 -5
- secator/exporters/console.py +10 -0
- secator/exporters/csv.py +27 -19
- secator/exporters/gdrive.py +16 -11
- secator/exporters/json.py +3 -1
- secator/exporters/table.py +30 -2
- secator/exporters/txt.py +20 -16
- secator/hooks/gcs.py +53 -0
- secator/hooks/mongodb.py +54 -28
- secator/output_types/__init__.py +29 -11
- secator/output_types/_base.py +11 -1
- secator/output_types/error.py +36 -0
- secator/output_types/exploit.py +1 -1
- secator/output_types/info.py +24 -0
- secator/output_types/ip.py +7 -0
- secator/output_types/port.py +8 -1
- secator/output_types/progress.py +6 -1
- secator/output_types/record.py +3 -1
- secator/output_types/stat.py +33 -0
- secator/output_types/tag.py +6 -4
- secator/output_types/url.py +6 -3
- secator/output_types/vulnerability.py +3 -2
- secator/output_types/warning.py +24 -0
- secator/report.py +55 -23
- secator/rich.py +44 -39
- secator/runners/_base.py +622 -635
- secator/runners/_helpers.py +5 -91
- secator/runners/celery.py +18 -0
- secator/runners/command.py +364 -211
- secator/runners/scan.py +8 -24
- secator/runners/task.py +21 -55
- secator/runners/workflow.py +41 -40
- secator/scans/__init__.py +28 -0
- secator/serializers/dataclass.py +6 -0
- secator/serializers/json.py +10 -5
- secator/serializers/regex.py +12 -4
- secator/tasks/_categories.py +6 -3
- secator/tasks/bbot.py +293 -0
- secator/tasks/bup.py +98 -0
- secator/tasks/cariddi.py +38 -49
- secator/tasks/dalfox.py +3 -0
- secator/tasks/dirsearch.py +12 -23
- secator/tasks/dnsx.py +49 -30
- secator/tasks/dnsxbrute.py +2 -0
- secator/tasks/feroxbuster.py +8 -17
- secator/tasks/ffuf.py +3 -2
- secator/tasks/fping.py +3 -3
- secator/tasks/gau.py +5 -0
- secator/tasks/gf.py +2 -2
- secator/tasks/gospider.py +4 -0
- secator/tasks/grype.py +9 -9
- secator/tasks/h8mail.py +31 -41
- secator/tasks/httpx.py +58 -21
- secator/tasks/katana.py +18 -22
- secator/tasks/maigret.py +26 -24
- secator/tasks/mapcidr.py +2 -3
- secator/tasks/msfconsole.py +4 -16
- secator/tasks/naabu.py +3 -1
- secator/tasks/nmap.py +50 -35
- secator/tasks/nuclei.py +9 -2
- secator/tasks/searchsploit.py +17 -9
- secator/tasks/subfinder.py +5 -1
- secator/tasks/wpscan.py +79 -93
- secator/template.py +61 -45
- secator/thread.py +24 -0
- secator/utils.py +330 -80
- secator/utils_test.py +48 -23
- secator/workflows/__init__.py +28 -0
- {secator-0.5.2.dist-info → secator-0.7.0.dist-info}/METADATA +12 -6
- secator-0.7.0.dist-info/RECORD +115 -0
- {secator-0.5.2.dist-info → secator-0.7.0.dist-info}/WHEEL +1 -1
- secator-0.5.2.dist-info/RECORD +0 -101
- {secator-0.5.2.dist-info → secator-0.7.0.dist-info}/entry_points.txt +0 -0
- {secator-0.5.2.dist-info → secator-0.7.0.dist-info}/licenses/LICENSE +0 -0
secator/celery.py
CHANGED
|
@@ -1,22 +1,28 @@
|
|
|
1
1
|
import gc
|
|
2
2
|
import logging
|
|
3
|
-
import
|
|
3
|
+
import sys
|
|
4
|
+
import uuid
|
|
5
|
+
|
|
6
|
+
from time import time
|
|
4
7
|
|
|
5
8
|
from celery import Celery, chain, chord, signals
|
|
6
9
|
from celery.app import trace
|
|
7
|
-
|
|
8
|
-
# from pyinstrument import Profiler # TODO: make pyinstrument optional
|
|
10
|
+
|
|
9
11
|
from rich.logging import RichHandler
|
|
12
|
+
from retry import retry
|
|
10
13
|
|
|
11
14
|
from secator.config import CONFIG
|
|
15
|
+
from secator.output_types import Info, Warning, Error
|
|
12
16
|
from secator.rich import console
|
|
13
17
|
from secator.runners import Scan, Task, Workflow
|
|
14
18
|
from secator.runners._helpers import run_extractors
|
|
15
|
-
from secator.utils import (
|
|
16
|
-
|
|
19
|
+
from secator.utils import (debug, deduplicate, flatten, should_update)
|
|
20
|
+
|
|
21
|
+
IN_CELERY_WORKER_PROCESS = sys.argv and ('secator.celery.app' in sys.argv or 'worker' in sys.argv)
|
|
17
22
|
|
|
18
|
-
|
|
19
|
-
#
|
|
23
|
+
#---------#
|
|
24
|
+
# Logging #
|
|
25
|
+
#---------#
|
|
20
26
|
|
|
21
27
|
rich_handler = RichHandler(rich_tracebacks=True)
|
|
22
28
|
rich_handler.setLevel(logging.INFO)
|
|
@@ -28,19 +34,18 @@ logging.basicConfig(
|
|
|
28
34
|
force=True)
|
|
29
35
|
logging.getLogger('kombu').setLevel(logging.ERROR)
|
|
30
36
|
logging.getLogger('celery').setLevel(logging.INFO if CONFIG.debug.level > 6 else logging.WARNING)
|
|
31
|
-
|
|
32
37
|
logger = logging.getLogger(__name__)
|
|
38
|
+
trace.LOG_SUCCESS = "Task %(name)s[%(id)s] succeeded in %(runtime)ss"
|
|
39
|
+
|
|
33
40
|
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
41
|
+
#------------#
|
|
42
|
+
# Celery app #
|
|
43
|
+
#------------#
|
|
37
44
|
|
|
38
45
|
app = Celery(__name__)
|
|
39
46
|
app.conf.update({
|
|
40
|
-
#
|
|
41
|
-
'
|
|
42
|
-
'worker_prefetch_multiplier': 1,
|
|
43
|
-
'worker_max_tasks_per_child': 10,
|
|
47
|
+
# Content types
|
|
48
|
+
'accept_content': ['application/x-python-serialize', 'application/json'],
|
|
44
49
|
|
|
45
50
|
# Broker config
|
|
46
51
|
'broker_url': CONFIG.celery.broker_url,
|
|
@@ -54,30 +59,37 @@ app.conf.update({
|
|
|
54
59
|
'broker_pool_limit': CONFIG.celery.broker_pool_limit,
|
|
55
60
|
'broker_connection_timeout': CONFIG.celery.broker_connection_timeout,
|
|
56
61
|
|
|
57
|
-
#
|
|
62
|
+
# Result backend config
|
|
58
63
|
'result_backend': CONFIG.celery.result_backend,
|
|
64
|
+
'result_expires': CONFIG.celery.result_expires,
|
|
59
65
|
'result_extended': True,
|
|
60
66
|
'result_backend_thread_safe': True,
|
|
67
|
+
'result_serializer': 'pickle',
|
|
61
68
|
# 'result_backend_transport_options': {'master_name': 'mymaster'}, # for Redis HA backend
|
|
62
69
|
|
|
63
70
|
# Task config
|
|
71
|
+
'task_acks_late': False,
|
|
72
|
+
'task_compression': 'gzip',
|
|
73
|
+
'task_create_missing_queues': True,
|
|
64
74
|
'task_eager_propagates': False,
|
|
75
|
+
'task_reject_on_worker_lost': False,
|
|
65
76
|
'task_routes': {
|
|
66
77
|
'secator.celery.run_workflow': {'queue': 'celery'},
|
|
67
78
|
'secator.celery.run_scan': {'queue': 'celery'},
|
|
68
79
|
'secator.celery.run_task': {'queue': 'celery'},
|
|
69
80
|
'secator.hooks.mongodb.tag_duplicates': {'queue': 'mongodb'}
|
|
70
81
|
},
|
|
71
|
-
'
|
|
72
|
-
'
|
|
73
|
-
'task_create_missing_queues': True,
|
|
74
|
-
'task_send_sent_event': True,
|
|
75
|
-
|
|
76
|
-
# Serialization / compression
|
|
77
|
-
'accept_content': ['application/x-python-serialize', 'application/json'],
|
|
78
|
-
'task_compression': 'gzip',
|
|
82
|
+
'task_store_eager_result': True,
|
|
83
|
+
# 'task_send_sent_event': True, # TODO: consider enabling this for Flower monitoring
|
|
79
84
|
'task_serializer': 'pickle',
|
|
80
|
-
|
|
85
|
+
|
|
86
|
+
# Worker config
|
|
87
|
+
# 'worker_direct': True, # TODO: consider enabling this to allow routing to specific workers
|
|
88
|
+
'worker_max_tasks_per_child': 10,
|
|
89
|
+
# 'worker_max_memory_per_child': 100000 # TODO: consider enabling this
|
|
90
|
+
'worker_pool_restarts': True,
|
|
91
|
+
'worker_prefetch_multiplier': 1,
|
|
92
|
+
# 'worker_send_task_events': True, # TODO: consider enabling this for Flower monitoring
|
|
81
93
|
})
|
|
82
94
|
app.autodiscover_tasks(['secator.hooks.mongodb'], related_name=None)
|
|
83
95
|
|
|
@@ -99,9 +111,34 @@ def void(*args, **kwargs):
|
|
|
99
111
|
pass
|
|
100
112
|
|
|
101
113
|
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
114
|
+
@retry(Exception, tries=3, delay=2)
|
|
115
|
+
def update_state(celery_task, task, force=False):
|
|
116
|
+
"""Update task state to add metadata information."""
|
|
117
|
+
if task.sync:
|
|
118
|
+
return
|
|
119
|
+
if not force and not should_update(CONFIG.runners.backend_update_frequency, task.last_updated_celery):
|
|
120
|
+
return
|
|
121
|
+
task.last_updated_celery = time()
|
|
122
|
+
debug(
|
|
123
|
+
'',
|
|
124
|
+
sub='celery.state',
|
|
125
|
+
id=celery_task.request.id,
|
|
126
|
+
obj={task.unique_name: task.status, 'count': task.self_findings_count},
|
|
127
|
+
obj_after=False,
|
|
128
|
+
verbose=True
|
|
129
|
+
)
|
|
130
|
+
return celery_task.update_state(
|
|
131
|
+
state='RUNNING',
|
|
132
|
+
meta=task.celery_state
|
|
133
|
+
)
|
|
134
|
+
|
|
135
|
+
|
|
136
|
+
def revoke_task(task_id, task_name=None):
|
|
137
|
+
message = f'Revoked task {task_id}'
|
|
138
|
+
if task_name:
|
|
139
|
+
message += f' ({task_name})'
|
|
140
|
+
app.control.revoke(task_id, terminate=True)
|
|
141
|
+
console.print(Info(message=message))
|
|
105
142
|
|
|
106
143
|
|
|
107
144
|
#--------------#
|
|
@@ -113,23 +150,39 @@ def chunker(seq, size):
|
|
|
113
150
|
return (seq[pos:pos + size] for pos in range(0, len(seq), size))
|
|
114
151
|
|
|
115
152
|
|
|
116
|
-
def break_task(
|
|
153
|
+
def break_task(task, task_opts, targets, results=[], chunk_size=1):
|
|
117
154
|
"""Break a task into multiple of the same type."""
|
|
118
155
|
chunks = targets
|
|
119
156
|
if chunk_size > 1:
|
|
120
157
|
chunks = list(chunker(targets, chunk_size))
|
|
158
|
+
debug(
|
|
159
|
+
'',
|
|
160
|
+
obj={task.unique_name: 'CHUNKED', 'chunk_size': chunk_size, 'chunks': len(chunks), 'target_count': len(targets)},
|
|
161
|
+
obj_after=False,
|
|
162
|
+
sub='celery.state',
|
|
163
|
+
verbose=True
|
|
164
|
+
)
|
|
121
165
|
|
|
122
166
|
# Clone opts
|
|
123
167
|
opts = task_opts.copy()
|
|
124
168
|
|
|
125
169
|
# Build signatures
|
|
126
170
|
sigs = []
|
|
171
|
+
task.ids_map = {}
|
|
127
172
|
for ix, chunk in enumerate(chunks):
|
|
173
|
+
if not isinstance(chunk, list):
|
|
174
|
+
chunk = [chunk]
|
|
128
175
|
if len(chunks) > 0: # add chunk to task opts for tracking chunks exec
|
|
129
176
|
opts['chunk'] = ix + 1
|
|
130
177
|
opts['chunk_count'] = len(chunks)
|
|
131
|
-
|
|
132
|
-
|
|
178
|
+
task_id = str(uuid.uuid4())
|
|
179
|
+
opts['has_parent'] = True
|
|
180
|
+
opts['enable_duplicate_check'] = False
|
|
181
|
+
sig = type(task).s(chunk, **opts).set(queue=type(task).profile, task_id=task_id)
|
|
182
|
+
full_name = f'{task.name}_{ix + 1}'
|
|
183
|
+
task.add_subtask(task_id, task.name, f'{task.name}_{ix + 1}')
|
|
184
|
+
info = Info(message=f'Celery chunked task created: {task_id}', _source=full_name, _uuid=str(uuid.uuid4()))
|
|
185
|
+
task.add_result(info)
|
|
133
186
|
sigs.append(sig)
|
|
134
187
|
|
|
135
188
|
# Build Celery workflow
|
|
@@ -140,15 +193,16 @@ def break_task(task_cls, task_opts, targets, results=[], chunk_size=1):
|
|
|
140
193
|
forward_results.s().set(queue='io'),
|
|
141
194
|
)
|
|
142
195
|
)
|
|
143
|
-
|
|
196
|
+
if task.sync:
|
|
197
|
+
task.print_item = False
|
|
198
|
+
task.results = workflow.apply().get()
|
|
199
|
+
else:
|
|
200
|
+
result = workflow.apply_async()
|
|
201
|
+
task.celery_result = result
|
|
144
202
|
|
|
145
203
|
|
|
146
204
|
@app.task(bind=True)
|
|
147
205
|
def run_task(self, args=[], kwargs={}):
|
|
148
|
-
if CONFIG.debug.level > 1:
|
|
149
|
-
logger.info(f'Received task with args {args} and kwargs {kwargs}')
|
|
150
|
-
if 'context' not in kwargs:
|
|
151
|
-
kwargs['context'] = {}
|
|
152
206
|
kwargs['context']['celery_id'] = self.request.id
|
|
153
207
|
task = Task(*args, **kwargs)
|
|
154
208
|
task.run()
|
|
@@ -156,10 +210,6 @@ def run_task(self, args=[], kwargs={}):
|
|
|
156
210
|
|
|
157
211
|
@app.task(bind=True)
|
|
158
212
|
def run_workflow(self, args=[], kwargs={}):
|
|
159
|
-
if CONFIG.debug.level > 1:
|
|
160
|
-
logger.info(f'Received workflow with args {args} and kwargs {kwargs}')
|
|
161
|
-
if 'context' not in kwargs:
|
|
162
|
-
kwargs['context'] = {}
|
|
163
213
|
kwargs['context']['celery_id'] = self.request.id
|
|
164
214
|
workflow = Workflow(*args, **kwargs)
|
|
165
215
|
workflow.run()
|
|
@@ -167,8 +217,6 @@ def run_workflow(self, args=[], kwargs={}):
|
|
|
167
217
|
|
|
168
218
|
@app.task(bind=True)
|
|
169
219
|
def run_scan(self, args=[], kwargs={}):
|
|
170
|
-
if CONFIG.debug.level > 1:
|
|
171
|
-
logger.info(f'Received scan with args {args} and kwargs {kwargs}')
|
|
172
220
|
if 'context' not in kwargs:
|
|
173
221
|
kwargs['context'] = {}
|
|
174
222
|
kwargs['context']['celery_id'] = self.request.id
|
|
@@ -178,167 +226,94 @@ def run_scan(self, args=[], kwargs={}):
|
|
|
178
226
|
|
|
179
227
|
@app.task(bind=True)
|
|
180
228
|
def run_command(self, results, name, targets, opts={}):
|
|
181
|
-
# profiler = Profiler(interval=0.0001)
|
|
182
|
-
# profiler.start()
|
|
183
229
|
chunk = opts.get('chunk')
|
|
184
|
-
chunk_count = opts.get('chunk_count')
|
|
185
|
-
description = opts.get('description')
|
|
186
230
|
sync = opts.get('sync', True)
|
|
187
231
|
|
|
188
232
|
# Set Celery request id in context
|
|
189
233
|
context = opts.get('context', {})
|
|
190
234
|
context['celery_id'] = self.request.id
|
|
191
235
|
opts['context'] = context
|
|
236
|
+
opts['print_remote_info'] = False
|
|
237
|
+
opts['results'] = results
|
|
238
|
+
|
|
239
|
+
# If we are in a Celery worker, print everything, always
|
|
240
|
+
if IN_CELERY_WORKER_PROCESS:
|
|
241
|
+
opts.update({
|
|
242
|
+
'print_item': True,
|
|
243
|
+
'print_line': True,
|
|
244
|
+
'print_cmd': True
|
|
245
|
+
})
|
|
246
|
+
|
|
247
|
+
# Flatten + dedupe results
|
|
248
|
+
results = flatten(results)
|
|
249
|
+
results = deduplicate(results, attr='_uuid')
|
|
192
250
|
|
|
193
|
-
#
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
# Update task state in backend
|
|
198
|
-
count = 0
|
|
199
|
-
msg_type = 'error'
|
|
200
|
-
task_results = []
|
|
201
|
-
task_state = 'RUNNING'
|
|
202
|
-
task = None
|
|
203
|
-
parent = True
|
|
204
|
-
state = {
|
|
205
|
-
'state': task_state,
|
|
206
|
-
'meta': {
|
|
207
|
-
'name': name,
|
|
208
|
-
'progress': 0,
|
|
209
|
-
'results': [],
|
|
210
|
-
'chunk': chunk,
|
|
211
|
-
'chunk_count': chunk_count,
|
|
212
|
-
'count': count,
|
|
213
|
-
'description': description
|
|
214
|
-
}
|
|
215
|
-
}
|
|
216
|
-
self.update_state(**state)
|
|
217
|
-
debug('updated', sub='celery.state', id=self.request.id, obj={full_name: 'RUNNING'}, obj_after=False, level=2)
|
|
218
|
-
# profile_root = Path('/code/.profiles')
|
|
219
|
-
# profile_root.mkdir(exist_ok=True)
|
|
220
|
-
# profile_path = f'/code/.profiles/{self.request.id}.bin'
|
|
221
|
-
# with memray.Tracker(profile_path):
|
|
222
|
-
try:
|
|
223
|
-
# Flatten + dedupe results
|
|
224
|
-
results = flatten(results)
|
|
225
|
-
results = deduplicate(results, attr='_uuid')
|
|
226
|
-
|
|
227
|
-
# Get expanded targets
|
|
228
|
-
if not chunk:
|
|
229
|
-
targets, opts = run_extractors(results, opts, targets)
|
|
230
|
-
if not targets:
|
|
231
|
-
msg_type = 'info'
|
|
232
|
-
raise TaskError(f'No targets were specified as input. Skipping. [{self.request.id}]')
|
|
251
|
+
# Get expanded targets
|
|
252
|
+
if not chunk and results:
|
|
253
|
+
targets, opts = run_extractors(results, opts, targets)
|
|
254
|
+
debug('after extractors', obj={'targets': targets, 'opts': opts}, sub='celery.state')
|
|
233
255
|
|
|
256
|
+
try:
|
|
234
257
|
# Get task class
|
|
235
258
|
task_cls = Task.get_task_class(name)
|
|
236
259
|
|
|
237
|
-
#
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
|
|
243
|
-
|
|
260
|
+
# Check if chunkable
|
|
261
|
+
many_targets = len(targets) > 1
|
|
262
|
+
targets_over_chunk_size = task_cls.input_chunk_size and len(targets) > task_cls.input_chunk_size
|
|
263
|
+
has_file_flag = task_cls.file_flag is not None
|
|
264
|
+
chunk_it = (sync and many_targets and not has_file_flag) or (not sync and many_targets and targets_over_chunk_size)
|
|
265
|
+
task_opts = opts.copy()
|
|
266
|
+
task_opts.update({
|
|
267
|
+
'print_remote_info': False,
|
|
268
|
+
'has_children': chunk_it,
|
|
269
|
+
})
|
|
270
|
+
if chunk_it:
|
|
271
|
+
task_opts['print_cmd'] = False
|
|
272
|
+
task = task_cls(targets, **task_opts)
|
|
273
|
+
debug(
|
|
274
|
+
'',
|
|
275
|
+
obj={
|
|
276
|
+
f'{task.unique_name}': 'CHUNK STATUS',
|
|
277
|
+
'chunk_it': chunk_it,
|
|
278
|
+
'sync': task.sync,
|
|
279
|
+
'many_targets': many_targets,
|
|
280
|
+
'targets_over_chunk_size': targets_over_chunk_size,
|
|
281
|
+
},
|
|
282
|
+
obj_after=False,
|
|
283
|
+
id=self.request.id,
|
|
284
|
+
sub='celery.state',
|
|
285
|
+
verbose=True
|
|
286
|
+
)
|
|
244
287
|
|
|
245
|
-
|
|
246
|
-
|
|
247
|
-
chunk_size =
|
|
248
|
-
|
|
249
|
-
|
|
250
|
-
task_cls,
|
|
288
|
+
# Chunk task if needed
|
|
289
|
+
if chunk_it:
|
|
290
|
+
chunk_size = task_cls.input_chunk_size if has_file_flag else 1
|
|
291
|
+
break_task(
|
|
292
|
+
task,
|
|
251
293
|
opts,
|
|
252
294
|
targets,
|
|
253
295
|
results=results,
|
|
254
296
|
chunk_size=chunk_size)
|
|
255
|
-
|
|
256
|
-
|
|
257
|
-
|
|
258
|
-
|
|
259
|
-
|
|
260
|
-
|
|
261
|
-
|
|
262
|
-
|
|
263
|
-
|
|
264
|
-
|
|
265
|
-
|
|
266
|
-
|
|
267
|
-
|
|
268
|
-
|
|
269
|
-
# otherwise, run normally
|
|
270
|
-
else:
|
|
271
|
-
# If list with 1 element
|
|
272
|
-
if isinstance(targets, list) and len(targets) == 1:
|
|
273
|
-
targets = targets[0]
|
|
274
|
-
|
|
275
|
-
# Run task
|
|
276
|
-
task = task_cls(targets, **opts)
|
|
277
|
-
for item in task:
|
|
278
|
-
task_results.append(item)
|
|
279
|
-
results.append(item)
|
|
280
|
-
count += 1
|
|
281
|
-
state['meta']['task_results'] = task_results
|
|
282
|
-
state['meta']['results'] = results
|
|
283
|
-
state['meta']['count'] = len(task_results)
|
|
284
|
-
if item._type == 'progress':
|
|
285
|
-
state['meta']['progress'] = item.percent
|
|
286
|
-
self.update_state(**state)
|
|
287
|
-
debug(
|
|
288
|
-
'items found', sub='celery.state', id=self.request.id, obj={full_name: len(task_results)},
|
|
289
|
-
obj_after=False, level=4)
|
|
290
|
-
|
|
291
|
-
# Update task state based on task return code
|
|
292
|
-
if task.return_code == 0:
|
|
293
|
-
task_state = 'SUCCESS'
|
|
294
|
-
task_exc = None
|
|
295
|
-
else:
|
|
296
|
-
task_state = 'FAILURE'
|
|
297
|
-
task_exc = TaskError('\n'.join(task.errors))
|
|
298
|
-
|
|
299
|
-
except BaseException as exc:
|
|
300
|
-
task_state = 'FAILURE'
|
|
301
|
-
task_exc = exc
|
|
297
|
+
|
|
298
|
+
# Update state before starting
|
|
299
|
+
update_state(self, task)
|
|
300
|
+
|
|
301
|
+
# Update state for each item found
|
|
302
|
+
for _ in task:
|
|
303
|
+
update_state(self, task)
|
|
304
|
+
|
|
305
|
+
except BaseException as e:
|
|
306
|
+
error = Error.from_exception(e)
|
|
307
|
+
error._source = task.unique_name
|
|
308
|
+
error._uuid = str(uuid.uuid4())
|
|
309
|
+
task.add_result(error, print=True)
|
|
310
|
+
task.stop_celery_tasks()
|
|
302
311
|
|
|
303
312
|
finally:
|
|
304
|
-
|
|
305
|
-
state['state'] = 'SUCCESS' # force task success to serialize exception
|
|
306
|
-
state['meta']['results'] = results
|
|
307
|
-
state['meta']['task_results'] = task_results
|
|
308
|
-
state['meta']['progress'] = 100
|
|
309
|
-
|
|
310
|
-
# Handle task failure
|
|
311
|
-
if task_state == 'FAILURE':
|
|
312
|
-
if isinstance(task_exc, TaskError):
|
|
313
|
-
exc_str = str(task_exc)
|
|
314
|
-
else: # full traceback
|
|
315
|
-
exc_str = ' '.join(traceback.format_exception(task_exc, value=task_exc, tb=task_exc.__traceback__))
|
|
316
|
-
state['meta'][msg_type] = exc_str
|
|
317
|
-
|
|
318
|
-
# Update task state with final status
|
|
319
|
-
self.update_state(**state)
|
|
320
|
-
debug('updated', sub='celery.state', id=self.request.id, obj={full_name: task_state}, obj_after=False, level=2)
|
|
321
|
-
|
|
322
|
-
# Update parent task if necessary
|
|
323
|
-
if task and task.has_children:
|
|
324
|
-
task.log_results()
|
|
325
|
-
task.run_hooks('on_end')
|
|
326
|
-
|
|
327
|
-
# profiler.stop()
|
|
328
|
-
# from pathlib import Path
|
|
329
|
-
# logger.info('Stopped profiling')
|
|
330
|
-
# profile_root = Path('/code/.profiles')
|
|
331
|
-
# profile_root.mkdir(exist_ok=True)
|
|
332
|
-
# profile_path = f'/code/.profiles/{self.request.id}.html'
|
|
333
|
-
# logger.info(f'Saving profile to {profile_path}')
|
|
334
|
-
# with open(profile_path, 'w', encoding='utf-8') as f_html:
|
|
335
|
-
# f_html.write(profiler.output_html())
|
|
336
|
-
|
|
337
|
-
# TODO: fix memory leak instead of running a garbage collector
|
|
313
|
+
update_state(self, task, force=True)
|
|
338
314
|
gc.collect()
|
|
339
|
-
|
|
340
|
-
|
|
341
|
-
return results if parent else task_results
|
|
315
|
+
debug('', obj={task.unique_name: task.status, 'results': task.results}, sub='celery.results', verbose=True)
|
|
316
|
+
return task.results
|
|
342
317
|
|
|
343
318
|
|
|
344
319
|
@app.task
|
|
@@ -363,7 +338,7 @@ def is_celery_worker_alive():
|
|
|
363
338
|
result = app.control.broadcast('ping', reply=True, limit=1, timeout=1)
|
|
364
339
|
result = bool(result)
|
|
365
340
|
if result:
|
|
366
|
-
console.print('Celery worker is alive !'
|
|
341
|
+
console.print(Info(message='Celery worker is alive !'))
|
|
367
342
|
else:
|
|
368
|
-
console.print('No Celery worker alive.'
|
|
343
|
+
console.print(Warning(message='No Celery worker alive.'))
|
|
369
344
|
return result
|