secator 0.6.0__py3-none-any.whl → 0.8.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of secator might be problematic. Click here for more details.

Files changed (90) hide show
  1. secator/celery.py +160 -185
  2. secator/celery_utils.py +268 -0
  3. secator/cli.py +427 -176
  4. secator/config.py +114 -68
  5. secator/configs/workflows/host_recon.yaml +5 -3
  6. secator/configs/workflows/port_scan.yaml +7 -3
  7. secator/configs/workflows/subdomain_recon.yaml +2 -2
  8. secator/configs/workflows/url_bypass.yaml +10 -0
  9. secator/configs/workflows/url_dirsearch.yaml +1 -1
  10. secator/configs/workflows/url_vuln.yaml +1 -1
  11. secator/decorators.py +170 -92
  12. secator/definitions.py +11 -4
  13. secator/exporters/__init__.py +7 -5
  14. secator/exporters/console.py +10 -0
  15. secator/exporters/csv.py +27 -19
  16. secator/exporters/gdrive.py +16 -11
  17. secator/exporters/json.py +3 -1
  18. secator/exporters/table.py +30 -2
  19. secator/exporters/txt.py +20 -16
  20. secator/hooks/gcs.py +53 -0
  21. secator/hooks/mongodb.py +53 -27
  22. secator/installer.py +277 -60
  23. secator/output_types/__init__.py +29 -11
  24. secator/output_types/_base.py +11 -1
  25. secator/output_types/error.py +36 -0
  26. secator/output_types/exploit.py +12 -8
  27. secator/output_types/info.py +24 -0
  28. secator/output_types/ip.py +8 -1
  29. secator/output_types/port.py +9 -2
  30. secator/output_types/progress.py +5 -0
  31. secator/output_types/record.py +5 -3
  32. secator/output_types/stat.py +33 -0
  33. secator/output_types/subdomain.py +1 -1
  34. secator/output_types/tag.py +8 -6
  35. secator/output_types/target.py +2 -2
  36. secator/output_types/url.py +14 -11
  37. secator/output_types/user_account.py +6 -6
  38. secator/output_types/vulnerability.py +8 -6
  39. secator/output_types/warning.py +24 -0
  40. secator/report.py +56 -23
  41. secator/rich.py +44 -39
  42. secator/runners/_base.py +629 -638
  43. secator/runners/_helpers.py +5 -91
  44. secator/runners/celery.py +18 -0
  45. secator/runners/command.py +404 -214
  46. secator/runners/scan.py +8 -24
  47. secator/runners/task.py +21 -55
  48. secator/runners/workflow.py +41 -40
  49. secator/scans/__init__.py +28 -0
  50. secator/serializers/dataclass.py +6 -0
  51. secator/serializers/json.py +10 -5
  52. secator/serializers/regex.py +12 -4
  53. secator/tasks/_categories.py +147 -42
  54. secator/tasks/bbot.py +295 -0
  55. secator/tasks/bup.py +99 -0
  56. secator/tasks/cariddi.py +38 -49
  57. secator/tasks/dalfox.py +3 -0
  58. secator/tasks/dirsearch.py +14 -25
  59. secator/tasks/dnsx.py +49 -30
  60. secator/tasks/dnsxbrute.py +4 -1
  61. secator/tasks/feroxbuster.py +10 -20
  62. secator/tasks/ffuf.py +3 -2
  63. secator/tasks/fping.py +4 -4
  64. secator/tasks/gau.py +5 -0
  65. secator/tasks/gf.py +2 -2
  66. secator/tasks/gospider.py +4 -0
  67. secator/tasks/grype.py +11 -13
  68. secator/tasks/h8mail.py +32 -42
  69. secator/tasks/httpx.py +58 -21
  70. secator/tasks/katana.py +19 -23
  71. secator/tasks/maigret.py +27 -25
  72. secator/tasks/mapcidr.py +2 -3
  73. secator/tasks/msfconsole.py +22 -19
  74. secator/tasks/naabu.py +18 -2
  75. secator/tasks/nmap.py +82 -55
  76. secator/tasks/nuclei.py +13 -3
  77. secator/tasks/searchsploit.py +26 -11
  78. secator/tasks/subfinder.py +5 -1
  79. secator/tasks/wpscan.py +91 -94
  80. secator/template.py +61 -45
  81. secator/thread.py +24 -0
  82. secator/utils.py +417 -78
  83. secator/utils_test.py +48 -23
  84. secator/workflows/__init__.py +28 -0
  85. {secator-0.6.0.dist-info → secator-0.8.0.dist-info}/METADATA +59 -48
  86. secator-0.8.0.dist-info/RECORD +115 -0
  87. {secator-0.6.0.dist-info → secator-0.8.0.dist-info}/WHEEL +1 -1
  88. secator-0.6.0.dist-info/RECORD +0 -101
  89. {secator-0.6.0.dist-info → secator-0.8.0.dist-info}/entry_points.txt +0 -0
  90. {secator-0.6.0.dist-info → secator-0.8.0.dist-info}/licenses/LICENSE +0 -0
secator/celery.py CHANGED
@@ -1,22 +1,28 @@
1
1
  import gc
2
2
  import logging
3
- import traceback
3
+ import sys
4
+ import uuid
5
+
6
+ from time import time
4
7
 
5
8
  from celery import Celery, chain, chord, signals
6
9
  from celery.app import trace
7
- from celery.result import allow_join_result
8
- # from pyinstrument import Profiler # TODO: make pyinstrument optional
10
+
9
11
  from rich.logging import RichHandler
12
+ from retry import retry
10
13
 
11
14
  from secator.config import CONFIG
15
+ from secator.output_types import Info, Error
12
16
  from secator.rich import console
13
17
  from secator.runners import Scan, Task, Workflow
14
18
  from secator.runners._helpers import run_extractors
15
- from secator.utils import (TaskError, debug, deduplicate,
16
- flatten)
19
+ from secator.utils import (debug, deduplicate, flatten, should_update)
20
+
21
+ IN_CELERY_WORKER_PROCESS = sys.argv and ('secator.celery.app' in sys.argv or 'worker' in sys.argv)
17
22
 
18
- # from pathlib import Path
19
- # import memray # TODO: conditional memray tracing
23
+ #---------#
24
+ # Logging #
25
+ #---------#
20
26
 
21
27
  rich_handler = RichHandler(rich_tracebacks=True)
22
28
  rich_handler.setLevel(logging.INFO)
@@ -28,19 +34,18 @@ logging.basicConfig(
28
34
  force=True)
29
35
  logging.getLogger('kombu').setLevel(logging.ERROR)
30
36
  logging.getLogger('celery').setLevel(logging.INFO if CONFIG.debug.level > 6 else logging.WARNING)
31
-
32
37
  logger = logging.getLogger(__name__)
38
+ trace.LOG_SUCCESS = "Task %(name)s[%(id)s] succeeded in %(runtime)ss"
39
+
33
40
 
34
- trace.LOG_SUCCESS = """\
35
- Task %(name)s[%(id)s] succeeded in %(runtime)ss\
36
- """
41
+ #------------#
42
+ # Celery app #
43
+ #------------#
37
44
 
38
45
  app = Celery(__name__)
39
46
  app.conf.update({
40
- # Worker config
41
- 'worker_send_task_events': True,
42
- 'worker_prefetch_multiplier': 1,
43
- 'worker_max_tasks_per_child': 10,
47
+ # Content types
48
+ 'accept_content': ['application/x-python-serialize', 'application/json'],
44
49
 
45
50
  # Broker config
46
51
  'broker_url': CONFIG.celery.broker_url,
@@ -54,30 +59,37 @@ app.conf.update({
54
59
  'broker_pool_limit': CONFIG.celery.broker_pool_limit,
55
60
  'broker_connection_timeout': CONFIG.celery.broker_connection_timeout,
56
61
 
57
- # Backend config
62
+ # Result backend config
58
63
  'result_backend': CONFIG.celery.result_backend,
64
+ 'result_expires': CONFIG.celery.result_expires,
59
65
  'result_extended': True,
60
66
  'result_backend_thread_safe': True,
67
+ 'result_serializer': 'pickle',
61
68
  # 'result_backend_transport_options': {'master_name': 'mymaster'}, # for Redis HA backend
62
69
 
63
70
  # Task config
71
+ 'task_acks_late': False,
72
+ 'task_compression': 'gzip',
73
+ 'task_create_missing_queues': True,
64
74
  'task_eager_propagates': False,
75
+ 'task_reject_on_worker_lost': False,
65
76
  'task_routes': {
66
77
  'secator.celery.run_workflow': {'queue': 'celery'},
67
78
  'secator.celery.run_scan': {'queue': 'celery'},
68
79
  'secator.celery.run_task': {'queue': 'celery'},
69
80
  'secator.hooks.mongodb.tag_duplicates': {'queue': 'mongodb'}
70
81
  },
71
- 'task_reject_on_worker_lost': True,
72
- 'task_acks_late': True,
73
- 'task_create_missing_queues': True,
74
- 'task_send_sent_event': True,
75
-
76
- # Serialization / compression
77
- 'accept_content': ['application/x-python-serialize', 'application/json'],
78
- 'task_compression': 'gzip',
82
+ 'task_store_eager_result': True,
83
+ # 'task_send_sent_event': True, # TODO: consider enabling this for Flower monitoring
79
84
  'task_serializer': 'pickle',
80
- 'result_serializer': 'pickle'
85
+
86
+ # Worker config
87
+ # 'worker_direct': True, # TODO: consider enabling this to allow routing to specific workers
88
+ 'worker_max_tasks_per_child': 10,
89
+ # 'worker_max_memory_per_child': 100000 # TODO: consider enabling this
90
+ 'worker_pool_restarts': True,
91
+ 'worker_prefetch_multiplier': 1,
92
+ # 'worker_send_task_events': True, # TODO: consider enabling this for Flower monitoring
81
93
  })
82
94
  app.autodiscover_tasks(['secator.hooks.mongodb'], related_name=None)
83
95
 
@@ -99,9 +111,34 @@ def void(*args, **kwargs):
99
111
  pass
100
112
 
101
113
 
102
- def revoke_task(task_id):
103
- console.print(f'Revoking task {task_id}')
104
- return app.control.revoke(task_id, terminate=True, signal='SIGINT')
114
+ @retry(Exception, tries=3, delay=2)
115
+ def update_state(celery_task, task, force=False):
116
+ """Update task state to add metadata information."""
117
+ if task.sync:
118
+ return
119
+ if not force and not should_update(CONFIG.runners.backend_update_frequency, task.last_updated_celery):
120
+ return
121
+ task.last_updated_celery = time()
122
+ debug(
123
+ '',
124
+ sub='celery.state',
125
+ id=celery_task.request.id,
126
+ obj={task.unique_name: task.status, 'count': task.self_findings_count},
127
+ obj_after=False,
128
+ verbose=True
129
+ )
130
+ return celery_task.update_state(
131
+ state='RUNNING',
132
+ meta=task.celery_state
133
+ )
134
+
135
+
136
+ def revoke_task(task_id, task_name=None):
137
+ message = f'Revoked task {task_id}'
138
+ if task_name:
139
+ message += f' ({task_name})'
140
+ app.control.revoke(task_id, terminate=True)
141
+ console.print(Info(message=message))
105
142
 
106
143
 
107
144
  #--------------#
@@ -113,23 +150,39 @@ def chunker(seq, size):
113
150
  return (seq[pos:pos + size] for pos in range(0, len(seq), size))
114
151
 
115
152
 
116
- def break_task(task_cls, task_opts, targets, results=[], chunk_size=1):
153
+ def break_task(task, task_opts, targets, results=[], chunk_size=1):
117
154
  """Break a task into multiple of the same type."""
118
155
  chunks = targets
119
156
  if chunk_size > 1:
120
157
  chunks = list(chunker(targets, chunk_size))
158
+ debug(
159
+ '',
160
+ obj={task.unique_name: 'CHUNKED', 'chunk_size': chunk_size, 'chunks': len(chunks), 'target_count': len(targets)},
161
+ obj_after=False,
162
+ sub='celery.state',
163
+ verbose=True
164
+ )
121
165
 
122
166
  # Clone opts
123
167
  opts = task_opts.copy()
124
168
 
125
169
  # Build signatures
126
170
  sigs = []
171
+ task.ids_map = {}
127
172
  for ix, chunk in enumerate(chunks):
173
+ if not isinstance(chunk, list):
174
+ chunk = [chunk]
128
175
  if len(chunks) > 0: # add chunk to task opts for tracking chunks exec
129
176
  opts['chunk'] = ix + 1
130
177
  opts['chunk_count'] = len(chunks)
131
- opts['parent'] = False
132
- sig = task_cls.s(chunk, **opts).set(queue=task_cls.profile)
178
+ task_id = str(uuid.uuid4())
179
+ opts['has_parent'] = True
180
+ opts['enable_duplicate_check'] = False
181
+ sig = type(task).s(chunk, **opts).set(queue=type(task).profile, task_id=task_id)
182
+ full_name = f'{task.name}_{ix + 1}'
183
+ task.add_subtask(task_id, task.name, f'{task.name}_{ix + 1}')
184
+ info = Info(message=f'Celery chunked task created: {task_id}', _source=full_name, _uuid=str(uuid.uuid4()))
185
+ task.add_result(info)
133
186
  sigs.append(sig)
134
187
 
135
188
  # Build Celery workflow
@@ -140,15 +193,16 @@ def break_task(task_cls, task_opts, targets, results=[], chunk_size=1):
140
193
  forward_results.s().set(queue='io'),
141
194
  )
142
195
  )
143
- return workflow
196
+ if task.sync:
197
+ task.print_item = False
198
+ task.results = workflow.apply().get()
199
+ else:
200
+ result = workflow.apply_async()
201
+ task.celery_result = result
144
202
 
145
203
 
146
204
  @app.task(bind=True)
147
205
  def run_task(self, args=[], kwargs={}):
148
- if CONFIG.debug.level > 1:
149
- logger.info(f'Received task with args {args} and kwargs {kwargs}')
150
- if 'context' not in kwargs:
151
- kwargs['context'] = {}
152
206
  kwargs['context']['celery_id'] = self.request.id
153
207
  task = Task(*args, **kwargs)
154
208
  task.run()
@@ -156,10 +210,6 @@ def run_task(self, args=[], kwargs={}):
156
210
 
157
211
  @app.task(bind=True)
158
212
  def run_workflow(self, args=[], kwargs={}):
159
- if CONFIG.debug.level > 1:
160
- logger.info(f'Received workflow with args {args} and kwargs {kwargs}')
161
- if 'context' not in kwargs:
162
- kwargs['context'] = {}
163
213
  kwargs['context']['celery_id'] = self.request.id
164
214
  workflow = Workflow(*args, **kwargs)
165
215
  workflow.run()
@@ -167,8 +217,6 @@ def run_workflow(self, args=[], kwargs={}):
167
217
 
168
218
  @app.task(bind=True)
169
219
  def run_scan(self, args=[], kwargs={}):
170
- if CONFIG.debug.level > 1:
171
- logger.info(f'Received scan with args {args} and kwargs {kwargs}')
172
220
  if 'context' not in kwargs:
173
221
  kwargs['context'] = {}
174
222
  kwargs['context']['celery_id'] = self.request.id
@@ -178,167 +226,94 @@ def run_scan(self, args=[], kwargs={}):
178
226
 
179
227
  @app.task(bind=True)
180
228
  def run_command(self, results, name, targets, opts={}):
181
- # profiler = Profiler(interval=0.0001)
182
- # profiler.start()
183
229
  chunk = opts.get('chunk')
184
- chunk_count = opts.get('chunk_count')
185
- description = opts.get('description')
186
230
  sync = opts.get('sync', True)
187
231
 
188
232
  # Set Celery request id in context
189
233
  context = opts.get('context', {})
190
234
  context['celery_id'] = self.request.id
191
235
  opts['context'] = context
236
+ opts['print_remote_info'] = False
237
+ opts['results'] = results
238
+
239
+ # If we are in a Celery worker, print everything, always
240
+ if IN_CELERY_WORKER_PROCESS:
241
+ opts.update({
242
+ 'print_item': True,
243
+ 'print_line': True,
244
+ 'print_cmd': True
245
+ })
246
+
247
+ # Flatten + dedupe results
248
+ results = flatten(results)
249
+ results = deduplicate(results, attr='_uuid')
192
250
 
193
- # Debug task
194
- full_name = name
195
- full_name += f' {chunk}/{chunk_count}' if chunk_count else ''
196
-
197
- # Update task state in backend
198
- count = 0
199
- msg_type = 'error'
200
- task_results = []
201
- task_state = 'RUNNING'
202
- task = None
203
- parent = True
204
- state = {
205
- 'state': task_state,
206
- 'meta': {
207
- 'name': name,
208
- 'progress': 0,
209
- 'results': [],
210
- 'chunk': chunk,
211
- 'chunk_count': chunk_count,
212
- 'count': count,
213
- 'description': description
214
- }
215
- }
216
- self.update_state(**state)
217
- debug('updated', sub='celery.state', id=self.request.id, obj={full_name: 'RUNNING'}, obj_after=False, level=2)
218
- # profile_root = Path('/code/.profiles')
219
- # profile_root.mkdir(exist_ok=True)
220
- # profile_path = f'/code/.profiles/{self.request.id}.bin'
221
- # with memray.Tracker(profile_path):
222
- try:
223
- # Flatten + dedupe results
224
- results = flatten(results)
225
- results = deduplicate(results, attr='_uuid')
226
-
227
- # Get expanded targets
228
- if not chunk:
229
- targets, opts = run_extractors(results, opts, targets)
230
- if not targets:
231
- msg_type = 'info'
232
- raise TaskError(f'No targets were specified as input. Skipping. [{self.request.id}]')
251
+ # Get expanded targets
252
+ if not chunk and results:
253
+ targets, opts = run_extractors(results, opts, targets)
254
+ debug('after extractors', obj={'targets': targets, 'opts': opts}, sub='celery.state')
233
255
 
256
+ try:
234
257
  # Get task class
235
258
  task_cls = Task.get_task_class(name)
236
259
 
237
- # Get split
238
- multiple_targets = isinstance(targets, list) and len(targets) > 1
239
- single_target_only = multiple_targets and task_cls.file_flag is None
240
- break_size_threshold = multiple_targets and task_cls.input_chunk_size and len(targets) > task_cls.input_chunk_size
241
-
242
- # If task doesn't support multiple targets, or if the number of targets is too big, split into multiple tasks
243
- if single_target_only or (not sync and break_size_threshold):
260
+ # Check if chunkable
261
+ many_targets = len(targets) > 1
262
+ targets_over_chunk_size = task_cls.input_chunk_size and len(targets) > task_cls.input_chunk_size
263
+ has_file_flag = task_cls.file_flag is not None
264
+ chunk_it = (sync and many_targets and not has_file_flag) or (not sync and many_targets and targets_over_chunk_size)
265
+ task_opts = opts.copy()
266
+ task_opts.update({
267
+ 'print_remote_info': False,
268
+ 'has_children': chunk_it,
269
+ })
270
+ if chunk_it:
271
+ task_opts['print_cmd'] = False
272
+ task = task_cls(targets, **task_opts)
273
+ debug(
274
+ '',
275
+ obj={
276
+ f'{task.unique_name}': 'CHUNK STATUS',
277
+ 'chunk_it': chunk_it,
278
+ 'sync': task.sync,
279
+ 'many_targets': many_targets,
280
+ 'targets_over_chunk_size': targets_over_chunk_size,
281
+ },
282
+ obj_after=False,
283
+ id=self.request.id,
284
+ sub='celery.state',
285
+ verbose=True
286
+ )
244
287
 
245
- # Initiate main task and set context for sub-tasks
246
- task = task_cls(targets, parent=parent, has_children=True, **opts)
247
- chunk_size = 1 if single_target_only else task_cls.input_chunk_size
248
- debug(f'breaking task by chunks of size {chunk_size}.', id=self.request.id, sub='celery.state')
249
- workflow = break_task(
250
- task_cls,
288
+ # Chunk task if needed
289
+ if chunk_it:
290
+ chunk_size = task_cls.input_chunk_size if has_file_flag else 1
291
+ break_task(
292
+ task,
251
293
  opts,
252
294
  targets,
253
295
  results=results,
254
296
  chunk_size=chunk_size)
255
- result = workflow.apply() if sync else workflow.apply_async()
256
- debug(
257
- 'waiting for subtasks', sub='celery.state', id=self.request.id, obj={full_name: 'RUNNING'},
258
- obj_after=False, level=2)
259
- if not sync:
260
- list(task.__class__.get_live_results(result))
261
- with allow_join_result():
262
- task_results = result.get()
263
- results.extend(task_results)
264
- task_state = 'SUCCESS'
265
- debug(
266
- 'all subtasks done', sub='celery.state', id=self.request.id, obj={full_name: 'RUNNING'},
267
- obj_after=False, level=2)
268
-
269
- # otherwise, run normally
270
- else:
271
- # If list with 1 element
272
- if isinstance(targets, list) and len(targets) == 1:
273
- targets = targets[0]
274
-
275
- # Run task
276
- task = task_cls(targets, **opts)
277
- for item in task:
278
- task_results.append(item)
279
- results.append(item)
280
- count += 1
281
- state['meta']['task_results'] = task_results
282
- state['meta']['results'] = results
283
- state['meta']['count'] = len(task_results)
284
- if item._type == 'progress':
285
- state['meta']['progress'] = item.percent
286
- self.update_state(**state)
287
- debug(
288
- 'items found', sub='celery.state', id=self.request.id, obj={full_name: len(task_results)},
289
- obj_after=False, level=4)
290
-
291
- # Update task state based on task return code
292
- if task.return_code == 0:
293
- task_state = 'SUCCESS'
294
- task_exc = None
295
- else:
296
- task_state = 'FAILURE'
297
- task_exc = TaskError('\n'.join(task.errors))
298
-
299
- except BaseException as exc:
300
- task_state = 'FAILURE'
301
- task_exc = exc
297
+
298
+ # Update state before starting
299
+ update_state(self, task)
300
+
301
+ # Update state for each item found
302
+ for _ in task:
303
+ update_state(self, task)
304
+
305
+ except BaseException as e:
306
+ error = Error.from_exception(e)
307
+ error._source = task.unique_name
308
+ error._uuid = str(uuid.uuid4())
309
+ task.add_result(error, print=True)
310
+ task.stop_celery_tasks()
302
311
 
303
312
  finally:
304
- # Set task state and exception
305
- state['state'] = 'SUCCESS' # force task success to serialize exception
306
- state['meta']['results'] = results
307
- state['meta']['task_results'] = task_results
308
- state['meta']['progress'] = 100
309
-
310
- # Handle task failure
311
- if task_state == 'FAILURE':
312
- if isinstance(task_exc, TaskError):
313
- exc_str = str(task_exc)
314
- else: # full traceback
315
- exc_str = ' '.join(traceback.format_exception(task_exc, value=task_exc, tb=task_exc.__traceback__))
316
- state['meta'][msg_type] = exc_str
317
-
318
- # Update task state with final status
319
- self.update_state(**state)
320
- debug('updated', sub='celery.state', id=self.request.id, obj={full_name: task_state}, obj_after=False, level=2)
321
-
322
- # Update parent task if necessary
323
- if task and task.has_children:
324
- task.log_results()
325
- task.run_hooks('on_end')
326
-
327
- # profiler.stop()
328
- # from pathlib import Path
329
- # logger.info('Stopped profiling')
330
- # profile_root = Path('/code/.profiles')
331
- # profile_root.mkdir(exist_ok=True)
332
- # profile_path = f'/code/.profiles/{self.request.id}.html'
333
- # logger.info(f'Saving profile to {profile_path}')
334
- # with open(profile_path, 'w', encoding='utf-8') as f_html:
335
- # f_html.write(profiler.output_html())
336
-
337
- # TODO: fix memory leak instead of running a garbage collector
313
+ update_state(self, task, force=True)
338
314
  gc.collect()
339
-
340
- # If running in chunk mode, only return chunk result, not all results
341
- return results if parent else task_results
315
+ debug('', obj={task.unique_name: task.status, 'results': task.results}, sub='celery.results', verbose=True)
316
+ return task.results
342
317
 
343
318
 
344
319
  @app.task
@@ -363,7 +338,7 @@ def is_celery_worker_alive():
363
338
  result = app.control.broadcast('ping', reply=True, limit=1, timeout=1)
364
339
  result = bool(result)
365
340
  if result:
366
- console.print('Celery worker is alive !', style='bold green')
341
+ console.print(Info(message='Celery worker is available, running remotely'))
367
342
  else:
368
- console.print('No Celery worker alive.', style='bold orange1')
343
+ console.print(Info(message='No Celery worker available, running locally'))
369
344
  return result