secator 0.9.4__py3-none-any.whl → 0.10.1a0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of secator might be problematic. Click here for more details.

secator/celery.py CHANGED
@@ -1,22 +1,23 @@
1
1
  import gc
2
2
  import json
3
3
  import logging
4
+ import os
4
5
  import sys
5
6
  import uuid
6
7
 
7
8
  from time import time
8
9
 
9
- from celery import Celery, chain, chord, signals
10
+ from celery import Celery, chord
10
11
  from celery.app import trace
11
12
 
12
13
  from rich.logging import RichHandler
13
14
  from retry import retry
14
15
 
16
+ from secator.celery_signals import setup_handlers
15
17
  from secator.config import CONFIG
16
- from secator.output_types import Info, Error
18
+ from secator.output_types import Info
17
19
  from secator.rich import console
18
20
  from secator.runners import Scan, Task, Workflow
19
- from secator.runners._helpers import run_extractors
20
21
  from secator.utils import (debug, deduplicate, flatten, should_update)
21
22
 
22
23
  IN_CELERY_WORKER_PROCESS = sys.argv and ('secator.celery.app' in sys.argv or 'worker' in sys.argv)
@@ -78,7 +79,8 @@ app.conf.update({
78
79
  'secator.celery.run_workflow': {'queue': 'celery'},
79
80
  'secator.celery.run_scan': {'queue': 'celery'},
80
81
  'secator.celery.run_task': {'queue': 'celery'},
81
- 'secator.hooks.mongodb.tag_duplicates': {'queue': 'mongodb'}
82
+ 'secator.celery.forward_results': {'queue': 'results'},
83
+ 'secator.hooks.mongodb.*': {'queue': 'mongodb'}
82
84
  },
83
85
  'task_store_eager_result': True,
84
86
  'task_send_sent_event': CONFIG.celery.task_send_sent_event,
@@ -93,29 +95,14 @@ app.conf.update({
93
95
  'worker_send_task_events': CONFIG.celery.worker_send_task_events
94
96
  })
95
97
  app.autodiscover_tasks(['secator.hooks.mongodb'], related_name=None)
96
-
97
-
98
- def maybe_override_logging():
99
- def decorator(func):
100
- if CONFIG.celery.override_default_logging:
101
- return signals.setup_logging.connect(func)
102
- else:
103
- return func
104
- return decorator
105
-
106
-
107
- @maybe_override_logging()
108
- def void(*args, **kwargs):
109
- """Override celery's logging setup to prevent it from altering our settings.
110
- github.com/celery/celery/issues/1867
111
- """
112
- pass
98
+ if IN_CELERY_WORKER_PROCESS:
99
+ setup_handlers()
113
100
 
114
101
 
115
102
  @retry(Exception, tries=3, delay=2)
116
103
  def update_state(celery_task, task, force=False):
117
104
  """Update task state to add metadata information."""
118
- if task.sync:
105
+ if not IN_CELERY_WORKER_PROCESS:
119
106
  return
120
107
  if not force and not should_update(CONFIG.runners.backend_update_frequency, task.last_updated_celery):
121
108
  return
@@ -151,6 +138,16 @@ def chunker(seq, size):
151
138
  return (seq[pos:pos + size] for pos in range(0, len(seq), size))
152
139
 
153
140
 
141
+ @app.task(bind=True)
142
+ def handle_runner_error(self, results, runner):
143
+ """Handle errors in Celery workflows (chunked tasks or runners)."""
144
+ results = forward_results(results)
145
+ runner.results = results
146
+ runner.log_results()
147
+ runner.run_hooks('on_end')
148
+ return runner.results
149
+
150
+
154
151
  def break_task(task, task_opts, targets, results=[], chunk_size=1):
155
152
  """Break a task into multiple of the same type."""
156
153
  chunks = targets
@@ -179,7 +176,8 @@ def break_task(task, task_opts, targets, results=[], chunk_size=1):
179
176
  task_id = str(uuid.uuid4())
180
177
  opts['has_parent'] = True
181
178
  opts['enable_duplicate_check'] = False
182
- sig = type(task).s(chunk, **opts).set(queue=type(task).profile, task_id=task_id)
179
+ opts['results'] = results
180
+ sig = type(task).si(chunk, **opts).set(queue=type(task).profile, task_id=task_id)
183
181
  full_name = f'{task.name}_{ix + 1}'
184
182
  task.add_subtask(task_id, task.name, f'{task.name}_{ix + 1}')
185
183
  info = Info(message=f'Celery chunked task created: {task_id}', _source=full_name, _uuid=str(uuid.uuid4()))
@@ -187,23 +185,17 @@ def break_task(task, task_opts, targets, results=[], chunk_size=1):
187
185
  sigs.append(sig)
188
186
 
189
187
  # Build Celery workflow
190
- workflow = chain(
191
- forward_results.s(results).set(queue='io'),
192
- chord(
193
- tuple(sigs),
194
- forward_results.s().set(queue='io'),
195
- )
188
+ workflow = chord(
189
+ tuple(sigs),
190
+ handle_runner_error.s(runner=task).set(queue='results')
196
191
  )
197
- if task.sync:
198
- task.print_item = False
199
- task.results = workflow.apply().get()
200
- else:
201
- result = workflow.apply_async()
202
- task.celery_result = result
192
+ return workflow
203
193
 
204
194
 
205
195
  @app.task(bind=True)
206
196
  def run_task(self, args=[], kwargs={}):
197
+ print('run task')
198
+ console.print(Info(message=f'Running task {self.request.id}'))
207
199
  kwargs['context']['celery_id'] = self.request.id
208
200
  task = Task(*args, **kwargs)
209
201
  task.run()
@@ -211,6 +203,7 @@ def run_task(self, args=[], kwargs={}):
211
203
 
212
204
  @app.task(bind=True)
213
205
  def run_workflow(self, args=[], kwargs={}):
206
+ console.print(Info(message=f'Running workflow {self.request.id}'))
214
207
  kwargs['context']['celery_id'] = self.request.id
215
208
  workflow = Workflow(*args, **kwargs)
216
209
  workflow.run()
@@ -218,6 +211,7 @@ def run_workflow(self, args=[], kwargs={}):
218
211
 
219
212
  @app.task(bind=True)
220
213
  def run_scan(self, args=[], kwargs={}):
214
+ console.print(Info(message=f'Running scan {self.request.id}'))
221
215
  if 'context' not in kwargs:
222
216
  kwargs['context'] = {}
223
217
  kwargs['context']['celery_id'] = self.request.id
@@ -227,94 +221,41 @@ def run_scan(self, args=[], kwargs={}):
227
221
 
228
222
  @app.task(bind=True)
229
223
  def run_command(self, results, name, targets, opts={}):
230
- chunk = opts.get('chunk')
231
- sync = opts.get('sync', True)
224
+ if IN_CELERY_WORKER_PROCESS:
225
+ opts.update({'print_item': True, 'print_line': True, 'print_cmd': True})
226
+ routing_key = self.request.delivery_info['routing_key']
227
+ console.print(Info(message=f'Task "{name}" running with routing key "{routing_key}"'))
228
+
229
+ # Flatten + dedupe + filter results
230
+ results = forward_results(results)
232
231
 
233
232
  # Set Celery request id in context
234
233
  context = opts.get('context', {})
235
234
  context['celery_id'] = self.request.id
235
+ context['worker_name'] = os.environ.get('WORKER_NAME', 'unknown')
236
236
  opts['context'] = context
237
- opts['print_remote_info'] = False
238
237
  opts['results'] = results
238
+ opts['sync'] = True
239
239
 
240
- # If we are in a Celery worker, print everything, always
241
- if IN_CELERY_WORKER_PROCESS:
242
- opts.update({
243
- 'print_item': True,
244
- 'print_line': True,
245
- 'print_cmd': True
246
- })
240
+ # Initialize task
241
+ sync = not IN_CELERY_WORKER_PROCESS
242
+ task_cls = Task.get_task_class(name)
243
+ task = task_cls(targets, **opts)
244
+ update_state(self, task, force=True)
247
245
 
248
- # Flatten + dedupe results
249
- results = flatten(results)
250
- results = deduplicate(results, attr='_uuid')
246
+ # Chunk task if needed
247
+ if task_cls.needs_chunking(targets, sync):
248
+ console.print(Info(message=f'Task {name} requires chunking, breaking into {len(targets)} tasks'))
249
+ return self.replace(break_task(task, opts, targets, results=results))
250
+
251
+ # Update state live
252
+ [update_state(self, task) for _ in task]
253
+ update_state(self, task, force=True)
254
+
255
+ # Garbage collection to save RAM
256
+ gc.collect()
251
257
 
252
- # Get expanded targets
253
- if not chunk and results:
254
- targets, opts = run_extractors(results, opts, targets)
255
- debug('after extractors', obj={'targets': targets, 'opts': opts}, sub='celery.state')
256
-
257
- try:
258
- # Get task class
259
- task_cls = Task.get_task_class(name)
260
-
261
- # Check if chunkable
262
- many_targets = len(targets) > 1
263
- targets_over_chunk_size = task_cls.input_chunk_size and len(targets) > task_cls.input_chunk_size
264
- has_file_flag = task_cls.file_flag is not None
265
- chunk_it = (sync and many_targets and not has_file_flag) or (not sync and many_targets and targets_over_chunk_size)
266
- task_opts = opts.copy()
267
- task_opts.update({
268
- 'print_remote_info': False,
269
- 'has_children': chunk_it,
270
- })
271
- if chunk_it:
272
- task_opts['print_cmd'] = False
273
- task = task_cls(targets, **task_opts)
274
- debug(
275
- '',
276
- obj={
277
- f'{task.unique_name}': 'CHUNK STATUS',
278
- 'chunk_it': chunk_it,
279
- 'sync': task.sync,
280
- 'many_targets': many_targets,
281
- 'targets_over_chunk_size': targets_over_chunk_size,
282
- },
283
- obj_after=False,
284
- id=self.request.id,
285
- sub='celery.state',
286
- verbose=True
287
- )
288
-
289
- # Chunk task if needed
290
- if chunk_it:
291
- chunk_size = task_cls.input_chunk_size if has_file_flag else 1
292
- break_task(
293
- task,
294
- opts,
295
- targets,
296
- results=results,
297
- chunk_size=chunk_size)
298
-
299
- # Update state before starting
300
- update_state(self, task)
301
-
302
- # Update state for each item found
303
- for _ in task:
304
- update_state(self, task)
305
-
306
- except BaseException as e:
307
- error = Error.from_exception(e)
308
- error._source = task.unique_name
309
- error._uuid = str(uuid.uuid4())
310
- task.add_result(error, print=True)
311
- task.stop_celery_tasks()
312
-
313
- finally:
314
- update_state(self, task, force=True)
315
- gc.collect()
316
- debug('', obj={task.unique_name: task.status, 'results': task.results}, sub='celery.results', verbose=True)
317
- return task.results
258
+ return task.results
318
259
 
319
260
 
320
261
  @app.task
@@ -327,6 +268,7 @@ def forward_results(results):
327
268
  results = results['results']
328
269
  results = flatten(results)
329
270
  results = deduplicate(results, attr='_uuid')
271
+ console.print(Info(message=f'Forwarding {len(results)} results ...'))
330
272
  return results
331
273
 
332
274
  #--------------#
@@ -0,0 +1,141 @@
1
+ import os
2
+ import signal
3
+ import threading
4
+ from pathlib import Path
5
+
6
+ from celery import signals
7
+
8
+ from secator.config import CONFIG
9
+ from secator.output_types import Info
10
+ from secator.rich import console
11
+
12
+ IDLE_TIMEOUT = CONFIG.celery.worker_kill_after_idle_seconds
13
+
14
+ # File-based state management system
15
+ STATE_DIR = Path("/tmp/celery_state")
16
+ STATE_DIR.mkdir(exist_ok=True, parents=True)
17
+
18
+
19
+ def get_lock_file_path():
20
+ worker_name = os.environ.get("WORKER_NAME", f"unknown_{os.getpid()}")
21
+ return Path(f"/tmp/celery_worker_{worker_name}.lock")
22
+
23
+
24
+ def set_task_running(task_id):
25
+ """Mark that a task is running in current worker"""
26
+ with open(get_lock_file_path(), "w") as f:
27
+ f.write(task_id)
28
+
29
+
30
+ def clear_task_running():
31
+ """Clear the task running state"""
32
+ lock_file = get_lock_file_path()
33
+ if lock_file.exists():
34
+ lock_file.unlink()
35
+
36
+
37
+ def is_task_running():
38
+ """Check if a task is currently running"""
39
+ return get_lock_file_path().exists()
40
+
41
+
42
+ def kill_worker(parent=False):
43
+ """Kill current worker using its pid by sending a SIGTERM to Celery master process."""
44
+ worker_name = os.environ.get('WORKER_NAME', 'unknown')
45
+
46
+ # Check if a task is running via the lock file
47
+ if not is_task_running():
48
+ pid = os.getppid() if parent else os.getpid()
49
+ console.print(Info(message=f'Sending SIGTERM to worker {worker_name} with pid {pid}'))
50
+ os.kill(pid, signal.SIGTERM)
51
+ else:
52
+ console.print(Info(message=f'Cancelling worker shutdown of {worker_name} since a task is running'))
53
+
54
+
55
+ def setup_idle_timer(timeout):
56
+ """Setup a timer to kill the worker after being idle"""
57
+ if timeout == -1:
58
+ return
59
+
60
+ console.print(Info(message=f'Starting inactivity timer for {timeout} seconds ...'))
61
+ timer = threading.Timer(timeout, kill_worker)
62
+ timer.daemon = True # Make sure timer is killed when worker exits
63
+ timer.start()
64
+
65
+
66
+ def maybe_override_logging():
67
+ def decorator(func):
68
+ if CONFIG.celery.override_default_logging:
69
+ return signals.setup_logging.connect(func)
70
+ else:
71
+ return func
72
+ return decorator
73
+
74
+
75
+ @maybe_override_logging()
76
+ def setup_logging(*args, **kwargs):
77
+ """Override celery's logging setup to prevent it from altering our settings.
78
+ github.com/celery/celery/issues/1867
79
+ """
80
+ pass
81
+
82
+
83
+ def capture_worker_name(sender, instance, **kwargs):
84
+ os.environ["WORKER_NAME"] = '{0}'.format(sender)
85
+
86
+
87
+ def worker_init_handler(**kwargs):
88
+ if IDLE_TIMEOUT != -1:
89
+ setup_idle_timer(IDLE_TIMEOUT)
90
+
91
+
92
+ def task_prerun_handler(task_id, **kwargs):
93
+ # Mark that a task is running
94
+ set_task_running(task_id)
95
+
96
+
97
+ def task_postrun_handler(**kwargs):
98
+ # Mark that no task is running
99
+ clear_task_running()
100
+
101
+ # Get sender name from kwargs
102
+ sender_name = kwargs['sender'].name
103
+
104
+ if CONFIG.celery.worker_kill_after_task and sender_name.startswith('secator.'):
105
+ worker_name = os.environ.get('WORKER_NAME', 'unknown')
106
+ console.print(Info(message=f'Shutdown worker {worker_name} since config celery.worker_kill_after_task is set.'))
107
+ kill_worker(parent=True)
108
+ return
109
+
110
+ # Set up a new idle timer
111
+ if IDLE_TIMEOUT != -1:
112
+ console.print(Info(message=f'Reset inactivity timer to {IDLE_TIMEOUT} seconds'))
113
+ setup_idle_timer(IDLE_TIMEOUT)
114
+
115
+
116
+ def task_revoked_handler(request=None, **kwargs):
117
+ """Handle revoked tasks by clearing the task running state"""
118
+ console.print(Info(message='Task was revoked, clearing running state'))
119
+ clear_task_running()
120
+
121
+ # Set up a new idle timer
122
+ if IDLE_TIMEOUT != -1:
123
+ console.print(Info(message=f'Reset inactivity timer to {IDLE_TIMEOUT} seconds after task revocation'))
124
+ setup_idle_timer(IDLE_TIMEOUT)
125
+
126
+
127
+ def worker_shutdown_handler(**kwargs):
128
+ """Cleanup lock files when worker shuts down"""
129
+ lock_file = get_lock_file_path()
130
+ if lock_file.exists():
131
+ lock_file.unlink()
132
+
133
+
134
+ def setup_handlers():
135
+ signals.celeryd_after_setup.connect(capture_worker_name)
136
+ signals.setup_logging.connect(setup_logging)
137
+ signals.task_prerun.connect(task_prerun_handler)
138
+ signals.task_postrun.connect(task_postrun_handler)
139
+ signals.task_revoked.connect(task_revoked_handler)
140
+ signals.worker_ready.connect(worker_init_handler)
141
+ signals.worker_shutdown.connect(worker_shutdown_handler)
secator/celery_utils.py CHANGED
@@ -264,5 +264,5 @@ class CeleryData(object):
264
264
  CeleryData.get_task_ids(result.parent, ids=ids)
265
265
 
266
266
  except kombu.exceptions.DecodeError:
267
- debug('kombu decode error', sub='celery.data.get_task_ids')
267
+ debug('kombu decode error', sub='celery.data')
268
268
  return
secator/cli.py CHANGED
@@ -148,7 +148,7 @@ def worker(hostname, concurrency, reload, queue, pool, check, dev, stop, show):
148
148
  return
149
149
 
150
150
  if not queue:
151
- queue = 'io,cpu,' + ','.join([r['queue'] for r in app.conf.task_routes.values()])
151
+ queue = 'io,cpu,poll,' + ','.join(set([r['queue'] for r in app.conf.task_routes.values()]))
152
152
 
153
153
  app_str = 'secator.celery.app'
154
154
  celery = f'{sys.executable} -m celery'
@@ -171,7 +171,8 @@ def worker(hostname, concurrency, reload, queue, pool, check, dev, stop, show):
171
171
  patterns = "celery.py;tasks/*.py;runners/*.py;serializers/*.py;output_types/*.py;hooks/*.py;exporters/*.py"
172
172
  cmd = f'watchmedo auto-restart --directory=./ --patterns="{patterns}" --recursive -- {cmd}'
173
173
 
174
- Command.execute(cmd, name='secator_worker')
174
+ ret = Command.execute(cmd, name='secator_worker')
175
+ sys.exit(ret.return_code)
175
176
 
176
177
 
177
178
  #-------#
secator/config.py CHANGED
@@ -73,6 +73,8 @@ class Celery(StrictModel):
73
73
  worker_max_tasks_per_child: int = 20
74
74
  worker_prefetch_multiplier: int = 1
75
75
  worker_send_task_events: bool = False
76
+ worker_kill_after_task: bool = False
77
+ worker_kill_after_idle_seconds: int = -1
76
78
 
77
79
 
78
80
  class Cli(StrictModel):
@@ -499,8 +501,8 @@ class Config(DotMap):
499
501
  self.set(path, value, set_partial=False)
500
502
  if not self.validate(print_errors=False) and print_errors:
501
503
  console.print(f'[bold red]{var} (override failed)[/]')
502
- elif print_errors:
503
- console.print(f'[bold red]{var} (override failed: key not found)[/]')
504
+ # elif print_errors:
505
+ # console.print(f'[bold red]{var} (override failed: key not found)[/]')
504
506
 
505
507
 
506
508
  def download_files(data: dict, target_folder: Path, offline_mode: bool, type: str):
secator/decorators.py CHANGED
@@ -28,6 +28,7 @@ RUNNER_OPTS = {
28
28
  RUNNER_GLOBAL_OPTS = {
29
29
  'sync': {'is_flag': True, 'help': 'Run tasks synchronously (automatic if no worker is alive)'},
30
30
  'worker': {'is_flag': True, 'default': False, 'help': 'Run tasks in worker'},
31
+ 'no_poll': {'is_flag': True, 'default': False, 'help': 'Do not live poll for tasks results when running in worker'},
31
32
  'proxy': {'type': str, 'help': 'HTTP proxy'},
32
33
  'driver': {'type': str, 'help': 'Export real-time results. E.g: "mongodb"'}
33
34
  # 'debug': {'type': int, 'default': 0, 'help': 'Debug mode'},
secator/hooks/mongodb.py CHANGED
@@ -20,11 +20,19 @@ MONGODB_MAX_POOL_SIZE = CONFIG.addons.mongodb.max_pool_size
20
20
 
21
21
  logger = logging.getLogger(__name__)
22
22
 
23
- client = pymongo.MongoClient(
24
- escape_mongodb_url(MONGODB_URL),
25
- maxPoolSize=MONGODB_MAX_POOL_SIZE,
26
- serverSelectionTimeoutMS=MONGODB_CONNECT_TIMEOUT
27
- )
23
+ _mongodb_client = None
24
+
25
+
26
+ def get_mongodb_client():
27
+ """Get or create MongoDB client"""
28
+ global _mongodb_client
29
+ if _mongodb_client is None:
30
+ _mongodb_client = pymongo.MongoClient(
31
+ escape_mongodb_url(MONGODB_URL),
32
+ maxPoolSize=MONGODB_MAX_POOL_SIZE,
33
+ serverSelectionTimeoutMS=MONGODB_CONNECT_TIMEOUT
34
+ )
35
+ return _mongodb_client
28
36
 
29
37
 
30
38
  def get_runner_dbg(runner):
@@ -39,6 +47,7 @@ def get_runner_dbg(runner):
39
47
 
40
48
 
41
49
  def update_runner(self):
50
+ client = get_mongodb_client()
42
51
  db = client.main
43
52
  type = self.config.type
44
53
  collection = f'{type}s'
@@ -72,6 +81,7 @@ def update_finding(self, item):
72
81
  if type(item) not in FINDING_TYPES:
73
82
  return item
74
83
  start_time = time.time()
84
+ client = get_mongodb_client()
75
85
  db = client.main
76
86
  update = item.toDict()
77
87
  _type = item._type
@@ -97,15 +107,14 @@ def update_finding(self, item):
97
107
 
98
108
 
99
109
  def find_duplicates(self):
110
+ from secator.celery import IN_CELERY_WORKER_PROCESS
100
111
  ws_id = self.toDict().get('context', {}).get('workspace_id')
101
112
  if not ws_id:
102
113
  return
103
- if self.sync:
104
- debug(f'running duplicate check on workspace {ws_id}', sub='hooks.mongodb')
114
+ if not IN_CELERY_WORKER_PROCESS:
105
115
  tag_duplicates(ws_id)
106
116
  else:
107
- celery_id = tag_duplicates.delay(ws_id)
108
- debug(f'running duplicate check on workspace {ws_id}', id=celery_id, sub='hooks.mongodb')
117
+ tag_duplicates.delay(ws_id)
109
118
 
110
119
 
111
120
  def load_finding(obj):
@@ -132,6 +141,8 @@ def tag_duplicates(ws_id: str = None):
132
141
  Args:
133
142
  ws_id (str): Workspace id.
134
143
  """
144
+ debug(f'running duplicate check on workspace {ws_id}', sub='hooks.mongodb')
145
+ client = get_mongodb_client()
135
146
  db = client.main
136
147
  workspace_query = list(
137
148
  db.findings.find({'_context.workspace_id': str(ws_id), '_tagged': True}).sort('_timestamp', -1))
@@ -172,19 +183,19 @@ def tag_duplicates(ws_id: str = None):
172
183
  'seen dupes': len(seen_dupes)
173
184
  },
174
185
  id=ws_id,
175
- sub='hooks.mongodb.duplicates',
186
+ sub='hooks.mongodb',
176
187
  verbose=True)
177
188
  tmp_duplicates_ids = list(dict.fromkeys([i._uuid for i in tmp_duplicates]))
178
- debug(f'duplicate ids: {tmp_duplicates_ids}', id=ws_id, sub='hooks.mongodb.duplicates', verbose=True)
189
+ debug(f'duplicate ids: {tmp_duplicates_ids}', id=ws_id, sub='hooks.mongodb', verbose=True)
179
190
 
180
191
  # Update latest object as non-duplicate
181
192
  if tmp_duplicates:
182
193
  duplicates.extend([f for f in tmp_duplicates])
183
194
  db.findings.update_one({'_id': ObjectId(item._uuid)}, {'$set': {'_related': tmp_duplicates_ids}})
184
- debug(f'adding {item._uuid} as non-duplicate', id=ws_id, sub='hooks.mongodb.duplicates', verbose=True)
195
+ debug(f'adding {item._uuid} as non-duplicate', id=ws_id, sub='hooks.mongodb', verbose=True)
185
196
  non_duplicates.append(item)
186
197
  else:
187
- debug(f'adding {item._uuid} as non-duplicate', id=ws_id, sub='hooks.mongodb.duplicates', verbose=True)
198
+ debug(f'adding {item._uuid} as non-duplicate', id=ws_id, sub='hooks.mongodb', verbose=True)
188
199
  non_duplicates.append(item)
189
200
 
190
201
  # debug(f'found {len(duplicates)} total duplicates')
@@ -208,7 +219,7 @@ def tag_duplicates(ws_id: str = None):
208
219
  'duplicates': len(duplicates_ids),
209
220
  'non-duplicates': len(non_duplicates_ids)
210
221
  },
211
- sub='hooks.mongodb.duplicates')
222
+ sub='hooks.mongodb')
212
223
 
213
224
 
214
225
  HOOKS = {
@@ -232,6 +243,6 @@ HOOKS = {
232
243
  'on_item': [update_finding],
233
244
  'on_duplicate': [update_finding],
234
245
  'on_interval': [update_runner],
235
- 'on_end': [update_runner, find_duplicates]
246
+ 'on_end': [update_runner]
236
247
  }
237
248
  }
@@ -21,10 +21,13 @@ class Error(OutputType):
21
21
  _sort_by = ('_timestamp',)
22
22
 
23
23
  def from_exception(e, **kwargs):
24
- message = type(e).__name__
24
+ errtype = type(e).__name__
25
+ message = errtype
25
26
  if str(e):
26
27
  message += f': {str(e)}'
27
- return Error(message=message, traceback=traceback_as_string(e), **kwargs)
28
+ traceback = traceback_as_string(e) if errtype not in ['KeyboardInterrupt', 'GreenletExit'] else ''
29
+ error = Error(message=message, traceback=traceback, **kwargs)
30
+ return error
28
31
 
29
32
  def __str__(self):
30
33
  return self.message
secator/runners/_base.py CHANGED
@@ -15,7 +15,7 @@ from secator.config import CONFIG
15
15
  from secator.output_types import FINDING_TYPES, OutputType, Progress, Info, Warning, Error, Target
16
16
  from secator.report import Report
17
17
  from secator.rich import console, console_stdout
18
- from secator.runners._helpers import (get_task_folder_id, process_extractor)
18
+ from secator.runners._helpers import (get_task_folder_id, process_extractor, run_extractors)
19
19
  from secator.utils import (debug, import_dynamic, merge_opts, rich_to_ansi, should_update)
20
20
 
21
21
  logger = logging.getLogger(__name__)
@@ -69,15 +69,14 @@ class Runner:
69
69
  reports_folder = None
70
70
 
71
71
  def __init__(self, config, inputs=[], results=[], run_opts={}, hooks={}, validators={}, context={}):
72
+ self.uuids = []
73
+ self.results = []
74
+ self.output = ''
75
+
76
+ # Runner config
72
77
  self.config = config
73
78
  self.name = run_opts.get('name', config.name)
74
79
  self.description = run_opts.get('description', config.description)
75
- if not isinstance(inputs, list):
76
- inputs = [inputs]
77
- self.inputs = inputs
78
- self.uuids = []
79
- self.output = ''
80
- self.results = []
81
80
  self.workspace_name = context.get('workspace_name', 'default')
82
81
  self.run_opts = run_opts.copy()
83
82
  self.sync = run_opts.get('sync', True)
@@ -96,6 +95,40 @@ class Runner:
96
95
  self.celery_ids_map = {}
97
96
  self.caller = self.run_opts.get('caller', None)
98
97
  self.threads = []
98
+ self.no_poll = self.run_opts.get('no_poll', False)
99
+ self.quiet = self.run_opts.get('quiet', False)
100
+
101
+ # Runner process options
102
+ self.no_process = self.run_opts.get('no_process', False)
103
+ self.piped_input = self.run_opts.get('piped_input', False)
104
+ self.piped_output = self.run_opts.get('piped_output', False)
105
+ self.enable_duplicate_check = self.run_opts.get('enable_duplicate_check', True)
106
+
107
+ # Runner print opts
108
+ self.print_item = self.run_opts.get('print_item', False)
109
+ self.print_line = self.run_opts.get('print_line', False) and not self.quiet
110
+ self.print_remote_info = self.run_opts.get('print_remote_info', False) and not self.piped_input and not self.piped_output # noqa: E501
111
+ self.print_json = self.run_opts.get('print_json', False)
112
+ self.print_raw = self.run_opts.get('print_raw', False) or self.piped_output
113
+ self.print_fmt = self.run_opts.get('fmt', '')
114
+ self.print_progress = self.run_opts.get('print_progress', False) and not self.quiet and not self.print_raw
115
+ self.print_target = self.run_opts.get('print_target', False) and not self.quiet and not self.print_raw
116
+ self.print_stat = self.run_opts.get('print_stat', False) and not self.quiet and not self.print_raw
117
+ self.raise_on_error = self.run_opts.get('raise_on_error', False)
118
+ self.print_opts = {k: v for k, v in self.__dict__.items() if k.startswith('print_') if v}
119
+
120
+ # Determine inputs
121
+ inputs = [inputs] if not isinstance(inputs, list) else inputs
122
+ if results:
123
+ inputs, run_opts, errors = run_extractors(results, run_opts, inputs)
124
+ for error in errors:
125
+ self.add_result(error, print=True)
126
+ self.inputs = inputs
127
+
128
+ # Debug
129
+ self.debug('Inputs', obj=self.inputs, sub='init')
130
+ self.debug('Run opts', obj={k: v for k, v in self.run_opts.items() if v is not None}, sub='init')
131
+ self.debug('Print opts', obj={k: v for k, v in self.print_opts.items() if v is not None}, sub='init')
99
132
 
100
133
  # Determine exporters
101
134
  exporters_str = self.run_opts.get('output') or self.default_exporters
@@ -122,31 +155,6 @@ class Runner:
122
155
  self.enable_profiler = False
123
156
  pass
124
157
 
125
- # Process opts
126
- self.quiet = self.run_opts.get('quiet', False)
127
- self.no_process = self.run_opts.get('no_process', False)
128
- self.piped_input = self.run_opts.get('piped_input', False)
129
- self.piped_output = self.run_opts.get('piped_output', False)
130
- self.enable_duplicate_check = self.run_opts.get('enable_duplicate_check', True)
131
-
132
- # Print opts
133
- self.print_item = self.run_opts.get('print_item', False)
134
- self.print_line = self.run_opts.get('print_line', False) and not self.quiet
135
- self.print_remote_info = self.run_opts.get('print_remote_info', False) and not self.piped_input and not self.piped_output # noqa: E501
136
- self.print_json = self.run_opts.get('print_json', False)
137
- self.print_raw = self.run_opts.get('print_raw', False) or self.piped_output
138
- self.print_fmt = self.run_opts.get('fmt', '')
139
- self.print_progress = self.run_opts.get('print_progress', False) and not self.quiet and not self.print_raw
140
- self.print_target = self.run_opts.get('print_target', False) and not self.quiet and not self.print_raw
141
- self.print_stat = self.run_opts.get('print_stat', False) and not self.quiet and not self.print_raw
142
- self.raise_on_error = self.run_opts.get('raise_on_error', not self.sync)
143
- self.print_opts = {k: v for k, v in self.__dict__.items() if k.startswith('print_') if v}
144
-
145
- # Debug
146
- self.debug('Inputs', obj=self.inputs, sub='init')
147
- self.debug('Run opts', obj={k: v for k, v in self.run_opts.items() if v is not None}, sub='init')
148
- self.debug('Print opts', obj={k: v for k, v in self.print_opts.items() if v is not None}, sub='init')
149
-
150
158
  # Hooks
151
159
  self.hooks = {name: [] for name in HOOKS + getattr(self, 'hooks', [])}
152
160
  self.register_hooks(hooks)
@@ -165,7 +173,7 @@ class Runner:
165
173
 
166
174
  # Process prior results
167
175
  for result in results:
168
- list(self._process_item(result, print=False))
176
+ list(self._process_item(result, print=False, output=False))
169
177
 
170
178
  # Input post-process
171
179
  self.run_hooks('before_init')
@@ -218,6 +226,12 @@ class Runner:
218
226
  def self_findings(self):
219
227
  return [r for r in self.results if isinstance(r, tuple(FINDING_TYPES)) if r._source.startswith(self.unique_name)]
220
228
 
229
+ @property
230
+ def self_errors(self):
231
+ if self.config.type == 'task':
232
+ return [r for r in self.results if isinstance(r, Error) and r._source.startswith(self.unique_name)]
233
+ return [r for r in self.results if isinstance(r, Error)]
234
+
221
235
  @property
222
236
  def self_findings_count(self):
223
237
  return len(self.self_findings)
@@ -226,7 +240,7 @@ class Runner:
226
240
  def status(self):
227
241
  if not self.done:
228
242
  return 'RUNNING'
229
- return 'FAILURE' if len(self.errors) > 0 else 'SUCCESS'
243
+ return 'FAILURE' if len(self.self_errors) > 0 else 'SUCCESS'
230
244
 
231
245
  @property
232
246
  def celery_state(self):
@@ -524,7 +538,7 @@ class Runner:
524
538
  fun = self.get_func_path(hook)
525
539
  try:
526
540
  if hook_type == 'on_interval' and not should_update(CONFIG.runners.backend_update_frequency, self.last_updated_db):
527
- self.debug('', obj={f'{name} [dim yellow]->[/] {fun}': '[dim gray11]skipped[/]'}, id=_id, sub='hooks.db', verbose=True) # noqa: E501
541
+ self.debug('', obj={f'{name} [dim yellow]->[/] {fun}': '[dim gray11]skipped[/]'}, id=_id, sub='hooks', verbose=True) # noqa: E501
528
542
  return
529
543
  if not self.enable_hooks or self.no_process:
530
544
  self.debug('', obj={f'{name} [dim yellow]->[/] {fun}': '[dim gray11]skipped[/]'}, id=_id, sub='hooks', verbose=True) # noqa: E501
@@ -635,6 +649,8 @@ class Runner:
635
649
 
636
650
  def log_results(self):
637
651
  """Log runner results."""
652
+ if self.no_poll:
653
+ return
638
654
  self.done = True
639
655
  self.progress = 100
640
656
  self.end_time = datetime.fromtimestamp(time())
@@ -780,19 +796,20 @@ class Runner:
780
796
  count_map[name] = count
781
797
  return count_map
782
798
 
783
- def _process_item(self, item, print=True):
799
+ def _process_item(self, item, print=True, output=True):
784
800
  """Process an item yielded by the derived runner.
785
801
 
786
802
  Args:
787
803
  item (dict | str): Input item.
788
804
  print (bool): Print item in console.
805
+ output (bool): Add to runner output.
789
806
 
790
807
  Yields:
791
808
  OutputType: Output type.
792
809
  """
793
810
  # Item is a string, just print it
794
811
  if isinstance(item, str):
795
- self.output += item + '\n'
812
+ self.output += item + '\n' if output else ''
796
813
  self._print_item(item) if item and print else ''
797
814
  return
798
815
 
@@ -1,6 +1,7 @@
1
1
  import os
2
2
 
3
- from secator.utils import deduplicate
3
+ from secator.output_types import Error
4
+ from secator.utils import deduplicate, debug
4
5
 
5
6
 
6
7
  def run_extractors(results, opts, inputs=[]):
@@ -12,17 +13,19 @@ def run_extractors(results, opts, inputs=[]):
12
13
  inputs (list): Original inputs.
13
14
 
14
15
  Returns:
15
- tuple: inputs, options.
16
+ tuple: inputs, options, errors.
16
17
  """
17
18
  extractors = {k: v for k, v in opts.items() if k.endswith('_')}
19
+ errors = []
18
20
  for key, val in extractors.items():
19
21
  key = key.rstrip('_')
20
- values = extract_from_results(results, val)
22
+ values, err = extract_from_results(results, val)
23
+ errors.extend(err)
21
24
  if key == 'targets':
22
25
  inputs = deduplicate(values)
23
26
  else:
24
27
  opts[key] = deduplicate(values)
25
- return inputs, opts
28
+ return inputs, opts, errors
26
29
 
27
30
 
28
31
  def extract_from_results(results, extractors):
@@ -33,14 +36,19 @@ def extract_from_results(results, extractors):
33
36
  extractors (list): List of extractors to extract from.
34
37
 
35
38
  Returns:
36
- list: List of extracted results (flat).
39
+ tuple: List of extracted results (flat), list of errors.
37
40
  """
38
- extracted = []
41
+ extracted_results = []
42
+ errors = []
39
43
  if not isinstance(extractors, list):
40
44
  extractors = [extractors]
41
45
  for extractor in extractors:
42
- extracted.extend(process_extractor(results, extractor))
43
- return extracted
46
+ try:
47
+ extracted_results.extend(process_extractor(results, extractor))
48
+ except Exception as e:
49
+ error = Error.from_exception(e)
50
+ errors.append(error)
51
+ return extracted_results, errors
44
52
 
45
53
 
46
54
  def process_extractor(results, extractor, ctx={}):
@@ -53,6 +61,7 @@ def process_extractor(results, extractor, ctx={}):
53
61
  Returns:
54
62
  list: List of extracted results.
55
63
  """
64
+ debug('before extract', obj={'results': results, 'extractor': extractor}, sub='extractor')
56
65
  if isinstance(extractor, dict):
57
66
  _type = extractor['type']
58
67
  _field = extractor.get('field')
@@ -66,6 +75,7 @@ def process_extractor(results, extractor, ctx={}):
66
75
  if _field:
67
76
  _field = '{' + _field + '}' if not _field.startswith('{') else _field
68
77
  items = [_field.format(**item.toDict()) for item in items]
78
+ debug('after extract', obj={'items': items}, sub='extractor')
69
79
  return items
70
80
 
71
81
 
@@ -110,7 +110,7 @@ class Command(Runner):
110
110
  proxy_http = False
111
111
 
112
112
  # Profile
113
- profile = 'cpu'
113
+ profile = 'io'
114
114
 
115
115
  def __init__(self, inputs=[], **run_opts):
116
116
 
@@ -193,6 +193,14 @@ class Command(Runner):
193
193
  })
194
194
  return res
195
195
 
196
+ @classmethod
197
+ def needs_chunking(cls, targets, sync):
198
+ many_targets = len(targets) > 1
199
+ targets_over_chunk_size = cls.input_chunk_size and len(targets) > cls.input_chunk_size
200
+ has_file_flag = cls.file_flag is not None
201
+ chunk_it = (sync and many_targets and not has_file_flag) or (not sync and many_targets and targets_over_chunk_size)
202
+ return chunk_it
203
+
196
204
  @classmethod
197
205
  def delay(cls, *args, **kwargs):
198
206
  # TODO: Move this to TaskBase
@@ -858,6 +866,8 @@ class Command(Runner):
858
866
  cmd = f'cat {fpath} | {cmd}'
859
867
  elif self.file_flag:
860
868
  cmd += f' {self.file_flag} {fpath}'
869
+ else:
870
+ cmd += f' {fpath}'
861
871
 
862
872
  self.inputs_path = fpath
863
873
 
secator/runners/task.py CHANGED
@@ -26,6 +26,7 @@ class Task(Runner):
26
26
  # Run opts
27
27
  run_opts = self.run_opts.copy()
28
28
  run_opts.pop('output', None)
29
+ run_opts.pop('no_poll', False)
29
30
 
30
31
  # Set task output types
31
32
  self.output_types = task_cls.output_types
@@ -48,6 +49,8 @@ class Task(Runner):
48
49
  message=f'Celery task created: {self.celery_result.id}',
49
50
  task_id=self.celery_result.id
50
51
  )
52
+ if self.no_poll:
53
+ return
51
54
  results = CeleryData.iter_results(
52
55
  self.celery_result,
53
56
  ids_map=self.celery_ids_map,
@@ -26,6 +26,7 @@ class Workflow(Runner):
26
26
  # Task opts
27
27
  run_opts = self.run_opts.copy()
28
28
  run_opts['hooks'] = self._hooks.get(Task, {})
29
+ run_opts.pop('no_poll', False)
29
30
 
30
31
  # Build Celery workflow
31
32
  workflow = self.build_celery_workflow(
@@ -46,6 +47,8 @@ class Workflow(Runner):
46
47
  message=f'Celery task created: {self.celery_result.id}',
47
48
  task_id=self.celery_result.id
48
49
  )
50
+ if self.no_poll:
51
+ return
49
52
  results = CeleryData.iter_results(
50
53
  self.celery_result,
51
54
  ids_map=self.celery_ids_map,
@@ -70,7 +73,7 @@ class Workflow(Runner):
70
73
  self.inputs,
71
74
  self.config.options,
72
75
  run_opts)
73
- sigs = [forward_results.si(results).set(queue='io')] + sigs + [forward_results.s().set(queue='io')]
76
+ sigs = [forward_results.si(results).set(queue='results')] + sigs + [forward_results.s().set(queue='results')]
74
77
  workflow = chain(*sigs)
75
78
  return workflow
76
79
 
@@ -102,7 +105,7 @@ class Workflow(Runner):
102
105
  workflow_opts,
103
106
  run_opts
104
107
  )
105
- sig = chord((tasks), forward_results.s().set(queue='io'))
108
+ sig = chord((tasks), forward_results.s().set(queue='results'))
106
109
  elif task_name == '_chain':
107
110
  tasks = self.get_tasks(
108
111
  task_opts,
secator/tasks/fping.py CHANGED
@@ -13,7 +13,6 @@ class fping(ReconIp):
13
13
  cmd = 'fping -a'
14
14
  file_flag = '-f'
15
15
  input_flag = None
16
- ignore_return_code = True
17
16
  opt_prefix = '--'
18
17
  opt_key_map = {
19
18
  DELAY: 'period',
secator/tasks/gospider.py CHANGED
@@ -55,7 +55,6 @@ class gospider(HttpCrawler):
55
55
  }
56
56
  install_cmd = 'go install -v github.com/jaeles-project/gospider@latest'
57
57
  install_github_handle = 'jaeles-project/gospider'
58
- ignore_return_code = True
59
58
  proxychains = False
60
59
  proxy_socks5 = True # with leaks... https://github.com/jaeles-project/gospider/issues/61
61
60
  proxy_http = True # with leaks... https://github.com/jaeles-project/gospider/issues/61
secator/tasks/katana.py CHANGED
@@ -30,7 +30,8 @@ class katana(HttpCrawler):
30
30
  'jsluice': {'is_flag': True, 'short': 'jsl', 'default': True, 'help': 'Enable jsluice parsing in javascript file (memory intensive)'}, # noqa: E501
31
31
  'known_files': {'type': str, 'short': 'kf', 'default': 'all', 'help': 'Enable crawling of known files (all, robotstxt, sitemapxml)'}, # noqa: E501
32
32
  'omit_raw': {'is_flag': True, 'short': 'or', 'default': True, 'help': 'Omit raw requests/responses from jsonl output'}, # noqa: E501
33
- 'omit_body': {'is_flag': True, 'short': 'ob', 'default': True, 'help': 'Omit response body from jsonl output'}
33
+ 'omit_body': {'is_flag': True, 'short': 'ob', 'default': True, 'help': 'Omit response body from jsonl output'},
34
+ 'no_sandbox': {'is_flag': True, 'short': 'ns', 'default': False, 'help': 'Disable sandboxing'},
34
35
  }
35
36
  opt_key_map = {
36
37
  HEADER: 'headers',
secator/tasks/naabu.py CHANGED
@@ -47,8 +47,8 @@ class naabu(ReconPort):
47
47
  }
48
48
  }
49
49
  output_types = [Port]
50
- install_cmd = 'go install -v github.com/projectdiscovery/naabu/v2/cmd/naabu@latest'
51
- install_github_handle = 'projectdiscovery/naabu'
50
+ install_cmd = 'go install -v github.com/projectdiscovery/naabu/v2/cmd/naabu@v2.3.3'
51
+ # install_github_handle = 'projectdiscovery/naabu'
52
52
  install_pre = {'apt': ['libpcap-dev'], 'apk': ['libpcap-dev', 'libc6-compat'], 'pacman|brew': ['libpcap']}
53
53
  install_post = {'arch|alpine': 'sudo ln -sf /usr/lib/libpcap.so /usr/lib/libpcap.so.0.8'}
54
54
  proxychains = False
secator/tasks/nuclei.py CHANGED
@@ -16,7 +16,6 @@ class nuclei(VulnMulti):
16
16
  cmd = 'nuclei'
17
17
  file_flag = '-l'
18
18
  input_flag = '-u'
19
- input_chunk_size = 1
20
19
  json_flag = '-jsonl'
21
20
  opts = {
22
21
  'templates': {'type': str, 'short': 't', 'help': 'Templates'},
@@ -74,7 +73,6 @@ class nuclei(VulnMulti):
74
73
  EXTRA_DATA: lambda x: {k: v for k, v in x.items() if k not in ['duration', 'errors', 'percent']}
75
74
  }
76
75
  }
77
- ignore_return_code = True
78
76
  install_pre = {
79
77
  '*': ['git']
80
78
  }
secator/tasks/wpscan.py CHANGED
@@ -82,7 +82,6 @@ class wpscan(VulnHttp):
82
82
  proxychains = False
83
83
  proxy_http = True
84
84
  proxy_socks5 = False
85
- ignore_return_code = True
86
85
  profile = 'io'
87
86
 
88
87
  @staticmethod
secator/template.py CHANGED
@@ -6,8 +6,10 @@ from pathlib import Path
6
6
  import yaml
7
7
  from dotmap import DotMap
8
8
 
9
- from secator.rich import console
10
9
  from secator.config import CONFIG, CONFIGS_FOLDER
10
+ from secator.rich import console
11
+ from secator.utils import convert_functions_to_strings
12
+
11
13
 
12
14
  TEMPLATES_DIR_KEYS = ['workflow', 'scan', 'profile']
13
15
 
@@ -106,7 +108,7 @@ class TemplateLoader(DotMap):
106
108
  task_opts = task_class.get_supported_opts()
107
109
  for name, conf in task_opts.items():
108
110
  if name not in opts or not opts[name].get('supported', False):
109
- opts[name] = conf
111
+ opts[name] = convert_functions_to_strings(conf)
110
112
  return opts
111
113
 
112
114
  def _extract_tasks(self):
secator/utils.py CHANGED
@@ -2,6 +2,7 @@ import fnmatch
2
2
  import inspect
3
3
  import importlib
4
4
  import itertools
5
+ import json
5
6
  import logging
6
7
  import operator
7
8
  import os
@@ -771,6 +772,8 @@ def process_wordlist(val):
771
772
  template_wordlist = getattr(CONFIG.wordlists.templates, val)
772
773
  if template_wordlist:
773
774
  return template_wordlist
775
+ elif Path(val).exists():
776
+ return val
774
777
  else:
775
778
  return download_file(
776
779
  val,
@@ -778,3 +781,22 @@ def process_wordlist(val):
778
781
  offline_mode=CONFIG.offline_mode,
779
782
  type='wordlist'
780
783
  )
784
+
785
+
786
+ def convert_functions_to_strings(data):
787
+ """Recursively convert functions to strings in a dict.
788
+
789
+ Args:
790
+ data (dict): Dictionary to convert.
791
+
792
+ Returns:
793
+ dict: Converted dictionary.
794
+ """
795
+ if isinstance(data, dict):
796
+ return {k: convert_functions_to_strings(v) for k, v in data.items()}
797
+ elif isinstance(data, list):
798
+ return [convert_functions_to_strings(v) for v in data]
799
+ elif callable(data):
800
+ return json.dumps(data.__name__) # or use inspect.getsource(data) if you want the actual function code
801
+ else:
802
+ return data
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: secator
3
- Version: 0.9.4
3
+ Version: 0.10.1a0
4
4
  Summary: The pentester's swiss knife.
5
5
  Project-URL: Homepage, https://github.com/freelabz/secator
6
6
  Project-URL: Issues, https://github.com/freelabz/secator/issues
@@ -1,17 +1,18 @@
1
1
  secator/.gitignore,sha256=da8MUc3hdb6Mo0WjZu2upn5uZMbXcBGvhdhTQ1L89HI,3093
2
2
  secator/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
3
- secator/celery.py,sha256=dvXUuvkPaoSd0Blq5Abz7ncinVXzjnES8aOLxXxvmvo,9865
4
- secator/celery_utils.py,sha256=iIuCn_3YkPXCtpnbaYqpppU2TARzSDyTIYHkrRyt54s,7725
5
- secator/cli.py,sha256=SX_SNUA6LLdG7ICpUs5iSiNYOp_DkQLGE0uuB_KSrXE,43879
6
- secator/config.py,sha256=b5I4F2DO1WxxmHvnxii_lrelEefAB3ZMKBkSL-C0y4c,19569
7
- secator/decorators.py,sha256=tjH7WodxJEBIf2CCbegmvOe8H9DKSFh4iPLEhDNGPCA,13784
3
+ secator/celery.py,sha256=9KXKv4EamJYJrHt_Ppn7aIp1AiFaTn2V0J_tZBwtWK0,8802
4
+ secator/celery_signals.py,sha256=HobT7hCbVKPEHvCNwxCvQxFVUyocU1kkrTXi67b1DDw,4346
5
+ secator/celery_utils.py,sha256=UWqLZpUaOXcztC_GD6uEDLiP8bGmD3WiTQN-u3lialg,7712
6
+ secator/cli.py,sha256=3_tTTusW12MCejFgtOeYjiedjrJpyQj_gsCK8FkTMJA,43922
7
+ secator/config.py,sha256=xItKM29yvMqzNZZygSNZXZ2V9vJbTdRuLTfIoRfP3XE,19653
8
+ secator/decorators.py,sha256=3kYadCz6haIZtnjkFHSRfenTdc6Yu7bHd-0IVjhD72w,13902
8
9
  secator/definitions.py,sha256=gFtLT9fjNtX_1qkiCjNfQyCvYq07IhScsQzX4o20_SE,3084
9
10
  secator/installer.py,sha256=Q5qmGbxGmuhysEA9YovTpy-YY2TxxFskhrzSX44c42E,17971
10
11
  secator/report.py,sha256=qJkEdCFttDBXIwUNUzZqFU_sG8l0PvyTSTogZVBv1Rs,3628
11
12
  secator/rich.py,sha256=owmuLcTTUt8xYBTE3_SqWTkPeAomcU_8bPdW_V-U8VM,3264
12
- secator/template.py,sha256=Qy4RjcmlifeSA8CleWUBb9fluxuYHzxgEH0H-8qs8R4,4323
13
+ secator/template.py,sha256=Sb6PjCTGIkZ7I0OGWFp5CaXmjt-6VPe_xpcRhWhjGpU,4409
13
14
  secator/thread.py,sha256=rgRgEtcMgs2wyfLWVlCTUCLWeg6jsMo5iKpyyrON5rY,655
14
- secator/utils.py,sha256=HMw0Q4omL-a5VcbvUhATC30oOSEKxTVLANgVRfWKnkc,21211
15
+ secator/utils.py,sha256=zlG3-f3KEN9DdiT5kCqHhIASdEudYDgSYPkB76DTLLk,21787
15
16
  secator/utils_test.py,sha256=ArHwkWW89t0IDqxO4HjJWd_tm7tp1illP4pu3nLq5yo,6559
16
17
  secator/configs/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
17
18
  secator/configs/profiles/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -48,10 +49,10 @@ secator/exporters/table.py,sha256=RY7Tct5kowEx8Oza8QMXFx6fKBARYfLxEbbvjKiE3eQ,11
48
49
  secator/exporters/txt.py,sha256=oMtr22di6cqyE_5yJoiWP-KElrI5QgvK1cOUrj7H7js,730
49
50
  secator/hooks/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
50
51
  secator/hooks/gcs.py,sha256=MIhntyWYz9BZdTXhWl5JznaczSq1_7fl3TVqPufuTSo,1490
51
- secator/hooks/mongodb.py,sha256=HyjtpJSoxvSZ6aG8uBf1RFLNKGXCBDQL5eEh4xzDonA,7545
52
+ secator/hooks/mongodb.py,sha256=XKbm_SrcSbQ2koILWvhzSg4tqdvHXgX5aU5x46Edu1s,7716
52
53
  secator/output_types/__init__.py,sha256=LxCW0K1f2vdgUapc4pIEsUpBfC0TQVvqo7T57rGuZGk,1159
53
54
  secator/output_types/_base.py,sha256=OgS6ICt66TzPsqo1JZwRIIwbng2HRX1i_u5qbUECgNk,2820
54
- secator/output_types/error.py,sha256=QjiJ5RoN3-utHqAyvgL2jlmZp7-u7emgUQpvLpYammU,1405
55
+ secator/output_types/error.py,sha256=39gpEJfKM2EuyOhD9lSkjjna2QicMvnLdFav6kHmhlg,1529
55
56
  secator/output_types/exploit.py,sha256=-BKTqPBg94rVgjw8YSmcYuBCI2x-73WwMd9ITP9qr3Y,1750
56
57
  secator/output_types/info.py,sha256=R8xeiD3ocNOsvkJPhrQgsx6q-Ea1G0eTAqyuh5JrAR0,843
57
58
  secator/output_types/ip.py,sha256=CyE3qkp55Kmj5YRl0CZGS4XrHX8N5apWrLN3OMzaK0U,1127
@@ -67,13 +68,13 @@ secator/output_types/user_account.py,sha256=rm10somxyu30JHjj629IkR15Nhahylud_fVO
67
68
  secator/output_types/vulnerability.py,sha256=nF7OT9zGez8sZvLrkhjBOORjVi8hCqfCYUFq3eZ_ywo,2870
68
69
  secator/output_types/warning.py,sha256=47GtmG083GqGPb_R5JDFmARJ9Mqrme58UxwJhgdGPuI,853
69
70
  secator/runners/__init__.py,sha256=EBbOk37vkBy9p8Hhrbi-2VtM_rTwQ3b-0ggTyiD22cE,290
70
- secator/runners/_base.py,sha256=tcTsL35dAHsIMfgcclTtvDk2kQM4Hhu-8IZTyHJgqTs,28973
71
- secator/runners/_helpers.py,sha256=FGogmmdHfCWmIyq7wRprwU1oOSxesOu3Y0N4GyAgiGw,2000
71
+ secator/runners/_base.py,sha256=T9gjOqe-UPDHe5ZdVRBtUtxTefRgDcq9JV08F6UV5ZU,29596
72
+ secator/runners/_helpers.py,sha256=QhJmdmFdu5XSx3LBFf4Q4Hy2EXS6bLGnJUq8G7C6f68,2410
72
73
  secator/runners/celery.py,sha256=bqvDTTdoHiGRCt0FRvlgFHQ_nsjKMP5P0PzGbwfCj_0,425
73
- secator/runners/command.py,sha256=xjNTecsdtu94-3Gb7SoXDZLvN91wGPhYakMAw7d3R4o,25090
74
+ secator/runners/command.py,sha256=PqCOHDKJXvG4weB8mXDTElGxc8i8pK2RoyTKUBpHASU,25480
74
75
  secator/runners/scan.py,sha256=tuPuqwL6fIS4UbCoy5WPKthYWm_LL-vCPRD2qK58HZE,1232
75
- secator/runners/task.py,sha256=JXlwo3DyQnu69RbQ8xvJnXu6y0rDYN-3iT4q4gy39tI,2004
76
- secator/runners/workflow.py,sha256=vry_MZFx6dRrorTrdsUqvhMZGOLPCdzpxkvN6fnt62w,3783
76
+ secator/runners/task.py,sha256=f2AduWpIy8JHK-Qitl_2Kh0fia573_YHAyAlV6MsJ50,2068
77
+ secator/runners/workflow.py,sha256=XEhBfL-f3vGH0HgEPnj62d8ITxjH_tPXiNSVkaonuwQ,3862
77
78
  secator/scans/__init__.py,sha256=nlNLiRl7Vu--c_iXClFFcagMd_b_OWKitq8tX1-1krQ,641
78
79
  secator/serializers/__init__.py,sha256=OP5cmFl77ovgSCW_IDcZ21St2mUt5UK4QHfrsK2KvH8,248
79
80
  secator/serializers/dataclass.py,sha256=RqICpfsYWGjHAACAA2h2jZ_69CFHim4VZwcBqowGMcQ,1010
@@ -90,26 +91,26 @@ secator/tasks/dnsx.py,sha256=nK14_DeyX0QTpAMdIP0LSSEOEu5_tQemyFW0XPjA2f8,2266
90
91
  secator/tasks/dnsxbrute.py,sha256=5VnSpd5ken7vWxFX1bcsGcUN8LpaVhcjafnuETzkMGs,1422
91
92
  secator/tasks/feroxbuster.py,sha256=3bKolPIdDBhdJ2fu4BP3w1cOlxDyI8WmtM-_2pDQ0AM,2773
92
93
  secator/tasks/ffuf.py,sha256=VGrtjFgSX6Q1I8h1wjPO5XwBFCfZXmn0DQsn9gxEUXc,2468
93
- secator/tasks/fping.py,sha256=m7eSXFU5yIeDD_kWh-h208ufSZAm4SpQzD34Ko0yCu8,1116
94
+ secator/tasks/fping.py,sha256=9nMIahBMizRwsos9py-ltXMEffIiyx1reVytj9sTyKU,1089
94
95
  secator/tasks/gau.py,sha256=1Qt0I_FqTh-QyJ0YR8I7i_T80HehWSvem_SS-TQKVm0,1648
95
96
  secator/tasks/gf.py,sha256=y8Fc0sRLGqNuwUjTBgLk3HEw3ZOnh09nB_GTufGErNA,962
96
- secator/tasks/gospider.py,sha256=XKLus6GnwN9MYU_ZFmNED-JeRn6n1Eg0CPgul8g1zLs,2302
97
+ secator/tasks/gospider.py,sha256=mpoBq2VQXUqgwWPLz41fzdW85hJeo8mn9FUUJj_DrUw,2275
97
98
  secator/tasks/grype.py,sha256=xoOuldnHCrS0O1Y4IzjbSVvoX5eX-fLSZ74THdRC2so,2447
98
99
  secator/tasks/h8mail.py,sha256=wNukV-aB-bXPZNq7WL8n1nFgH5b5tGh6vOF80Yna33I,1934
99
100
  secator/tasks/httpx.py,sha256=ONfCdAOV7ARCM9tSnlucIAM3UQeWcMUm8QZX8F7u9Pg,5895
100
- secator/tasks/katana.py,sha256=A0nnjKKT-A34LBtEuG25lWh5Ria4nwgo4Ti31403E-Q,5256
101
+ secator/tasks/katana.py,sha256=J0HKPT4QIrDj4uW2gZe7ByW6iEwPortSszqaHDvziwY,5355
101
102
  secator/tasks/maigret.py,sha256=6anhBzB4lEM90Lk23cAD_ku7I_ghTpj0W0i3h6HARD8,2088
102
103
  secator/tasks/mapcidr.py,sha256=56ocbaDmB5_C_ns-773CgZXGOKOtkI9q9xJs2Rlfqio,990
103
104
  secator/tasks/msfconsole.py,sha256=TXVrvzSWw9Ncv2h9QJtaEinTMbps_z0zX1PFirERVho,6430
104
- secator/tasks/naabu.py,sha256=aAEkQ10ma3Log8OVj8wHY1zUWmjpVQ5pehAMQLJQEV0,2089
105
+ secator/tasks/naabu.py,sha256=90WORQhwFwy71OGNaFe10pCkIG8IJP1XwWQ24OMgSc4,2091
105
106
  secator/tasks/nmap.py,sha256=Zu24sJHnlOf3NXLj3Ohi07-x7m-5Ajr5ULpNsUF-QT0,12546
106
- secator/tasks/nuclei.py,sha256=GesggkDJgoWNZwFfo2xno_a4pRBKEUtC33M_LxKDp9o,4292
107
+ secator/tasks/nuclei.py,sha256=bMXCRU5VWyrwI7Cv6BCj84NTpfjuALFumPqUSZ4Y6Ug,4243
107
108
  secator/tasks/searchsploit.py,sha256=gvtLZbL2hzAZ07Cf0cSj2Qs0GvWK94XyHvoPFsetXu8,3321
108
109
  secator/tasks/subfinder.py,sha256=C6W5NnXT92OUB1aSS9IYseqdI3wDMAz70TOEl8X-o3U,1213
109
- secator/tasks/wpscan.py,sha256=C8eW3vWfbSFrxm5iPzs3MgcagIfSs7u51QZiecYbT2Q,5577
110
+ secator/tasks/wpscan.py,sha256=036ywiEqZfX_Bt071U7qIm7bi6pNk7vodflmuslJurA,5550
110
111
  secator/workflows/__init__.py,sha256=ivpZHiYYlj4JqlXLRmB9cmAPUGdk8QcUrCRL34hIqEA,665
111
- secator-0.9.4.dist-info/METADATA,sha256=5kEoLPx6LoijaPeHIJ9kDsgjDaHGUMt1dhp-VbxCppc,14723
112
- secator-0.9.4.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
113
- secator-0.9.4.dist-info/entry_points.txt,sha256=lPgsqqUXWgiuGSfKy-se5gHdQlAXIwS_A46NYq7Acic,44
114
- secator-0.9.4.dist-info/licenses/LICENSE,sha256=19W5Jsy4WTctNkqmZIqLRV1gTDOp01S3LDj9iSgWaJ0,2867
115
- secator-0.9.4.dist-info/RECORD,,
112
+ secator-0.10.1a0.dist-info/METADATA,sha256=c2JLeTa-Pv7TzcWFDPQlfuR1XLU6YnVegdnb1d5_-gc,14726
113
+ secator-0.10.1a0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
114
+ secator-0.10.1a0.dist-info/entry_points.txt,sha256=lPgsqqUXWgiuGSfKy-se5gHdQlAXIwS_A46NYq7Acic,44
115
+ secator-0.10.1a0.dist-info/licenses/LICENSE,sha256=19W5Jsy4WTctNkqmZIqLRV1gTDOp01S3LDj9iSgWaJ0,2867
116
+ secator-0.10.1a0.dist-info/RECORD,,