secator 0.10.0__py3-none-any.whl → 0.10.1a0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of secator might be problematic. Click here for more details.

secator/celery.py CHANGED
@@ -7,7 +7,7 @@ import uuid
7
7
 
8
8
  from time import time
9
9
 
10
- from celery import Celery, chain, chord
10
+ from celery import Celery, chord
11
11
  from celery.app import trace
12
12
 
13
13
  from rich.logging import RichHandler
@@ -15,10 +15,9 @@ from retry import retry
15
15
 
16
16
  from secator.celery_signals import setup_handlers
17
17
  from secator.config import CONFIG
18
- from secator.output_types import Info, Error
18
+ from secator.output_types import Info
19
19
  from secator.rich import console
20
20
  from secator.runners import Scan, Task, Workflow
21
- from secator.runners._helpers import run_extractors
22
21
  from secator.utils import (debug, deduplicate, flatten, should_update)
23
22
 
24
23
  IN_CELERY_WORKER_PROCESS = sys.argv and ('secator.celery.app' in sys.argv or 'worker' in sys.argv)
@@ -103,7 +102,7 @@ if IN_CELERY_WORKER_PROCESS:
103
102
  @retry(Exception, tries=3, delay=2)
104
103
  def update_state(celery_task, task, force=False):
105
104
  """Update task state to add metadata information."""
106
- if task.sync:
105
+ if not IN_CELERY_WORKER_PROCESS:
107
106
  return
108
107
  if not force and not should_update(CONFIG.runners.backend_update_frequency, task.last_updated_celery):
109
108
  return
@@ -139,6 +138,16 @@ def chunker(seq, size):
139
138
  return (seq[pos:pos + size] for pos in range(0, len(seq), size))
140
139
 
141
140
 
141
+ @app.task(bind=True)
142
+ def handle_runner_error(self, results, runner):
143
+ """Handle errors in Celery workflows (chunked tasks or runners)."""
144
+ results = forward_results(results)
145
+ runner.results = results
146
+ runner.log_results()
147
+ runner.run_hooks('on_end')
148
+ return runner.results
149
+
150
+
142
151
  def break_task(task, task_opts, targets, results=[], chunk_size=1):
143
152
  """Break a task into multiple of the same type."""
144
153
  chunks = targets
@@ -167,7 +176,8 @@ def break_task(task, task_opts, targets, results=[], chunk_size=1):
167
176
  task_id = str(uuid.uuid4())
168
177
  opts['has_parent'] = True
169
178
  opts['enable_duplicate_check'] = False
170
- sig = type(task).s(chunk, **opts).set(queue=type(task).profile, task_id=task_id)
179
+ opts['results'] = results
180
+ sig = type(task).si(chunk, **opts).set(queue=type(task).profile, task_id=task_id)
171
181
  full_name = f'{task.name}_{ix + 1}'
172
182
  task.add_subtask(task_id, task.name, f'{task.name}_{ix + 1}')
173
183
  info = Info(message=f'Celery chunked task created: {task_id}', _source=full_name, _uuid=str(uuid.uuid4()))
@@ -175,23 +185,16 @@ def break_task(task, task_opts, targets, results=[], chunk_size=1):
175
185
  sigs.append(sig)
176
186
 
177
187
  # Build Celery workflow
178
- workflow = chain(
179
- forward_results.s(results).set(queue='results'),
180
- chord(
181
- tuple(sigs),
182
- forward_results.s().set(queue='results'),
183
- )
188
+ workflow = chord(
189
+ tuple(sigs),
190
+ handle_runner_error.s(runner=task).set(queue='results')
184
191
  )
185
- if task.sync:
186
- task.print_item = False
187
- task.results = workflow.apply().get()
188
- else:
189
- result = workflow.apply_async()
190
- task.celery_result = result
192
+ return workflow
191
193
 
192
194
 
193
195
  @app.task(bind=True)
194
196
  def run_task(self, args=[], kwargs={}):
197
+ print('run task')
195
198
  console.print(Info(message=f'Running task {self.request.id}'))
196
199
  kwargs['context']['celery_id'] = self.request.id
197
200
  task = Task(*args, **kwargs)
@@ -218,111 +221,41 @@ def run_scan(self, args=[], kwargs={}):
218
221
 
219
222
  @app.task(bind=True)
220
223
  def run_command(self, results, name, targets, opts={}):
221
- chunk = opts.get('chunk')
222
- sync = opts.get('sync', True)
224
+ if IN_CELERY_WORKER_PROCESS:
225
+ opts.update({'print_item': True, 'print_line': True, 'print_cmd': True})
226
+ routing_key = self.request.delivery_info['routing_key']
227
+ console.print(Info(message=f'Task "{name}" running with routing key "{routing_key}"'))
228
+
229
+ # Flatten + dedupe + filter results
230
+ results = forward_results(results)
223
231
 
224
232
  # Set Celery request id in context
225
233
  context = opts.get('context', {})
226
234
  context['celery_id'] = self.request.id
227
235
  context['worker_name'] = os.environ.get('WORKER_NAME', 'unknown')
228
236
  opts['context'] = context
229
- opts['print_remote_info'] = False
230
237
  opts['results'] = results
238
+ opts['sync'] = True
231
239
 
232
- # If we are in a Celery worker, print everything, always
233
- if IN_CELERY_WORKER_PROCESS:
234
- opts.update({
235
- 'print_item': True,
236
- 'print_line': True,
237
- 'print_cmd': True
238
- })
239
- routing_key = self.request.delivery_info['routing_key']
240
- console.print(Info(message=f'Task "{name}" running with routing key "{routing_key}"'))
240
+ # Initialize task
241
+ sync = not IN_CELERY_WORKER_PROCESS
242
+ task_cls = Task.get_task_class(name)
243
+ task = task_cls(targets, **opts)
244
+ update_state(self, task, force=True)
241
245
 
242
- # Flatten + dedupe results
243
- results = flatten(results)
244
- results = deduplicate(results, attr='_uuid')
246
+ # Chunk task if needed
247
+ if task_cls.needs_chunking(targets, sync):
248
+ console.print(Info(message=f'Task {name} requires chunking, breaking into {len(targets)} tasks'))
249
+ return self.replace(break_task(task, opts, targets, results=results))
250
+
251
+ # Update state live
252
+ [update_state(self, task) for _ in task]
253
+ update_state(self, task, force=True)
254
+
255
+ # Garbage collection to save RAM
256
+ gc.collect()
245
257
 
246
- # Get expanded targets
247
- if not chunk and results:
248
- targets, opts = run_extractors(results, opts, targets)
249
- debug('after extractors', obj={'targets': targets, 'opts': opts}, sub='celery.state')
250
-
251
- task = None
252
-
253
- try:
254
-
255
- # Get task class
256
- task_cls = Task.get_task_class(name)
257
-
258
- # Check if chunkable
259
- many_targets = len(targets) > 1
260
- targets_over_chunk_size = task_cls.input_chunk_size and len(targets) > task_cls.input_chunk_size
261
- has_file_flag = task_cls.file_flag is not None
262
- chunk_it = (sync and many_targets and not has_file_flag) or (not sync and many_targets and targets_over_chunk_size)
263
- task_opts = opts.copy()
264
- task_opts.update({
265
- 'print_remote_info': False,
266
- 'has_children': chunk_it,
267
- })
268
-
269
- if IN_CELERY_WORKER_PROCESS and chunk_it and routing_key != 'poll':
270
- console.print(Info(message=f'Task {name} is chunkable but not running on "poll" queue, re-routing to "poll" queue'))
271
- raise self.replace(run_command.si(results, name, targets, opts=opts).set(queue='poll', task_id=self.request.id))
272
-
273
- if chunk_it:
274
- task_opts['print_cmd'] = False
275
-
276
- task = task_cls(targets, **task_opts)
277
- debug(
278
- '',
279
- obj={
280
- f'{task.unique_name}': 'CHUNK STATUS',
281
- 'chunk_it': chunk_it,
282
- 'sync': task.sync,
283
- 'many_targets': many_targets,
284
- 'targets_over_chunk_size': targets_over_chunk_size,
285
- },
286
- obj_after=False,
287
- id=self.request.id,
288
- sub='celery.state',
289
- verbose=True
290
- )
291
-
292
- # Chunk task if needed
293
- if chunk_it:
294
- chunk_size = task_cls.input_chunk_size if has_file_flag else 1
295
- break_task(
296
- task,
297
- opts,
298
- targets,
299
- results=results,
300
- chunk_size=chunk_size)
301
- console.print(Info(message=f'Task "{name}" starts polling for chunked results'))
302
-
303
- # Update state before starting
304
- update_state(self, task)
305
-
306
- # Update state for each item found
307
- for _ in task:
308
- update_state(self, task)
309
-
310
- except BaseException as e:
311
- if not task:
312
- raise e
313
- error = Error.from_exception(e)
314
- error._source = task.unique_name
315
- error._uuid = str(uuid.uuid4())
316
- task.add_result(error, print=True)
317
- task.stop_celery_tasks()
318
-
319
- finally:
320
- if not task:
321
- raise
322
- update_state(self, task, force=True)
323
- gc.collect()
324
- debug('', obj={task.unique_name: task.status, 'results': task.results}, sub='celery.results', verbose=True)
325
- return task.results
258
+ return task.results
326
259
 
327
260
 
328
261
  @app.task
@@ -335,6 +268,7 @@ def forward_results(results):
335
268
  results = results['results']
336
269
  results = flatten(results)
337
270
  results = deduplicate(results, attr='_uuid')
271
+ console.print(Info(message=f'Forwarding {len(results)} results ...'))
338
272
  return results
339
273
 
340
274
  #--------------#
secator/celery_signals.py CHANGED
@@ -1,6 +1,7 @@
1
1
  import os
2
2
  import signal
3
3
  import threading
4
+ from pathlib import Path
4
5
 
5
6
  from celery import signals
6
7
 
@@ -8,57 +9,75 @@ from secator.config import CONFIG
8
9
  from secator.output_types import Info
9
10
  from secator.rich import console
10
11
 
11
-
12
12
  IDLE_TIMEOUT = CONFIG.celery.worker_kill_after_idle_seconds
13
- TASK_IN_PROGRESS = False
14
13
 
14
+ # File-based state management system
15
+ STATE_DIR = Path("/tmp/celery_state")
16
+ STATE_DIR.mkdir(exist_ok=True, parents=True)
17
+
18
+
19
+ def get_lock_file_path():
20
+ worker_name = os.environ.get("WORKER_NAME", f"unknown_{os.getpid()}")
21
+ return Path(f"/tmp/celery_worker_{worker_name}.lock")
22
+
23
+
24
+ def set_task_running(task_id):
25
+ """Mark that a task is running in current worker"""
26
+ with open(get_lock_file_path(), "w") as f:
27
+ f.write(task_id)
28
+
29
+
30
+ def clear_task_running():
31
+ """Clear the task running state"""
32
+ lock_file = get_lock_file_path()
33
+ if lock_file.exists():
34
+ lock_file.unlink()
15
35
 
16
- def kill_worker():
17
- """"Kill current worker using it's pid by sending a SIGTERM to Celery master process."""
18
- worker_name = os.environ['WORKER_NAME']
19
- if not TASK_IN_PROGRESS:
20
- pid = os.getpid()
21
- console.print(Info(message=f'Sending SIGTERM to worker {worker_name} with pid {pid}'))
22
- os.kill(pid, signal.SIGTERM)
23
- else:
24
- console.print(Info(message=f'Cancelling worker shutdown of {worker_name} since a task is currently in progress'))
25
36
 
37
+ def is_task_running():
38
+ """Check if a task is currently running"""
39
+ return get_lock_file_path().exists()
26
40
 
27
- class IdleTimer:
28
- def __init__(self, timeout):
29
- self.thread = None
30
- self.is_started = False
31
- self.thread = threading.Timer(timeout, kill_worker)
32
41
 
33
- def start(self):
34
- if self.is_started:
35
- self.cancel()
36
- self.thread.start()
37
- self.is_started = True
42
+ def kill_worker(parent=False):
43
+ """Kill current worker using its pid by sending a SIGTERM to Celery master process."""
44
+ worker_name = os.environ.get('WORKER_NAME', 'unknown')
38
45
 
39
- def cancel(self):
40
- self.thread.cancel()
41
- self.s_started = False
46
+ # Check if a task is running via the lock file
47
+ if not is_task_running():
48
+ pid = os.getppid() if parent else os.getpid()
49
+ console.print(Info(message=f'Sending SIGTERM to worker {worker_name} with pid {pid}'))
50
+ os.kill(pid, signal.SIGTERM)
51
+ else:
52
+ console.print(Info(message=f'Cancelling worker shutdown of {worker_name} since a task is running'))
42
53
 
43
54
 
44
- IDLE_TIMER = IdleTimer(IDLE_TIMEOUT)
55
+ def setup_idle_timer(timeout):
56
+ """Setup a timer to kill the worker after being idle"""
57
+ if timeout == -1:
58
+ return
59
+
60
+ console.print(Info(message=f'Starting inactivity timer for {timeout} seconds ...'))
61
+ timer = threading.Timer(timeout, kill_worker)
62
+ timer.daemon = True # Make sure timer is killed when worker exits
63
+ timer.start()
45
64
 
46
65
 
47
66
  def maybe_override_logging():
48
- def decorator(func):
49
- if CONFIG.celery.override_default_logging:
50
- return signals.setup_logging.connect(func)
51
- else:
52
- return func
53
- return decorator
67
+ def decorator(func):
68
+ if CONFIG.celery.override_default_logging:
69
+ return signals.setup_logging.connect(func)
70
+ else:
71
+ return func
72
+ return decorator
54
73
 
55
74
 
56
75
  @maybe_override_logging()
57
76
  def setup_logging(*args, **kwargs):
58
- """Override celery's logging setup to prevent it from altering our settings.
59
- github.com/celery/celery/issues/1867
60
- """
61
- pass
77
+ """Override celery's logging setup to prevent it from altering our settings.
78
+ github.com/celery/celery/issues/1867
79
+ """
80
+ pass
62
81
 
63
82
 
64
83
  def capture_worker_name(sender, instance, **kwargs):
@@ -66,38 +85,57 @@ def capture_worker_name(sender, instance, **kwargs):
66
85
 
67
86
 
68
87
  def worker_init_handler(**kwargs):
69
- if IDLE_TIMEOUT != -1:
70
- console.print(Info(message=f'Starting inactivity timer for {IDLE_TIMEOUT} seconds ...'))
71
- IDLE_TIMER.start()
88
+ if IDLE_TIMEOUT != -1:
89
+ setup_idle_timer(IDLE_TIMEOUT)
72
90
 
73
91
 
74
- def task_prerun_handler(**kwargs):
75
- global TASK_IN_PROGRESS, IDLE_TIMER
76
- TASK_IN_PROGRESS = True
77
- if IDLE_TIMEOUT != -1:
78
- IDLE_TIMER.cancel()
92
+ def task_prerun_handler(task_id, **kwargs):
93
+ # Mark that a task is running
94
+ set_task_running(task_id)
79
95
 
80
96
 
81
97
  def task_postrun_handler(**kwargs):
82
- global TASK_IN_PROGRESS, IDLE_TIMER
83
- TASK_IN_PROGRESS = False
84
- sender_name = kwargs['sender'].name
98
+ # Mark that no task is running
99
+ clear_task_running()
100
+
101
+ # Get sender name from kwargs
102
+ sender_name = kwargs['sender'].name
103
+
104
+ if CONFIG.celery.worker_kill_after_task and sender_name.startswith('secator.'):
105
+ worker_name = os.environ.get('WORKER_NAME', 'unknown')
106
+ console.print(Info(message=f'Shutdown worker {worker_name} since config celery.worker_kill_after_task is set.'))
107
+ kill_worker(parent=True)
108
+ return
109
+
110
+ # Set up a new idle timer
111
+ if IDLE_TIMEOUT != -1:
112
+ console.print(Info(message=f'Reset inactivity timer to {IDLE_TIMEOUT} seconds'))
113
+ setup_idle_timer(IDLE_TIMEOUT)
114
+
115
+
116
+ def task_revoked_handler(request=None, **kwargs):
117
+ """Handle revoked tasks by clearing the task running state"""
118
+ console.print(Info(message='Task was revoked, clearing running state'))
119
+ clear_task_running()
120
+
121
+ # Set up a new idle timer
122
+ if IDLE_TIMEOUT != -1:
123
+ console.print(Info(message=f'Reset inactivity timer to {IDLE_TIMEOUT} seconds after task revocation'))
124
+ setup_idle_timer(IDLE_TIMEOUT)
85
125
 
86
- if CONFIG.celery.worker_kill_after_task and sender_name.startswith('secator.'):
87
- worker_name = os.environ['WORKER_NAME']
88
- console.print(Info(message=f'Shutdown worker {worker_name} since config celery.worker_kill_after_task is set.'))
89
- IDLE_TIMER.cancel()
90
- kill_worker()
91
- return
92
126
 
93
- if IDLE_TIMEOUT != -1: # restart timer
94
- console.print(Info(message=f'Reset inactivity timer to {IDLE_TIMEOUT} seconds'))
95
- IDLE_TIMER.start()
127
+ def worker_shutdown_handler(**kwargs):
128
+ """Cleanup lock files when worker shuts down"""
129
+ lock_file = get_lock_file_path()
130
+ if lock_file.exists():
131
+ lock_file.unlink()
96
132
 
97
133
 
98
134
  def setup_handlers():
99
- signals.celeryd_after_setup.connect(capture_worker_name)
100
- signals.setup_logging.connect(setup_logging)
101
- signals.task_prerun.connect(task_prerun_handler)
102
- signals.task_postrun.connect(task_postrun_handler)
103
- signals.worker_ready.connect(worker_init_handler)
135
+ signals.celeryd_after_setup.connect(capture_worker_name)
136
+ signals.setup_logging.connect(setup_logging)
137
+ signals.task_prerun.connect(task_prerun_handler)
138
+ signals.task_postrun.connect(task_postrun_handler)
139
+ signals.task_revoked.connect(task_revoked_handler)
140
+ signals.worker_ready.connect(worker_init_handler)
141
+ signals.worker_shutdown.connect(worker_shutdown_handler)
secator/celery_utils.py CHANGED
@@ -264,5 +264,5 @@ class CeleryData(object):
264
264
  CeleryData.get_task_ids(result.parent, ids=ids)
265
265
 
266
266
  except kombu.exceptions.DecodeError:
267
- debug('kombu decode error', sub='celery.data.get_task_ids')
267
+ debug('kombu decode error', sub='celery.data')
268
268
  return
secator/cli.py CHANGED
@@ -171,7 +171,8 @@ def worker(hostname, concurrency, reload, queue, pool, check, dev, stop, show):
171
171
  patterns = "celery.py;tasks/*.py;runners/*.py;serializers/*.py;output_types/*.py;hooks/*.py;exporters/*.py"
172
172
  cmd = f'watchmedo auto-restart --directory=./ --patterns="{patterns}" --recursive -- {cmd}'
173
173
 
174
- Command.execute(cmd, name='secator_worker')
174
+ ret = Command.execute(cmd, name='secator_worker')
175
+ sys.exit(ret.return_code)
175
176
 
176
177
 
177
178
  #-------#
secator/hooks/mongodb.py CHANGED
@@ -20,11 +20,19 @@ MONGODB_MAX_POOL_SIZE = CONFIG.addons.mongodb.max_pool_size
20
20
 
21
21
  logger = logging.getLogger(__name__)
22
22
 
23
- client = pymongo.MongoClient(
24
- escape_mongodb_url(MONGODB_URL),
25
- maxPoolSize=MONGODB_MAX_POOL_SIZE,
26
- serverSelectionTimeoutMS=MONGODB_CONNECT_TIMEOUT
27
- )
23
+ _mongodb_client = None
24
+
25
+
26
+ def get_mongodb_client():
27
+ """Get or create MongoDB client"""
28
+ global _mongodb_client
29
+ if _mongodb_client is None:
30
+ _mongodb_client = pymongo.MongoClient(
31
+ escape_mongodb_url(MONGODB_URL),
32
+ maxPoolSize=MONGODB_MAX_POOL_SIZE,
33
+ serverSelectionTimeoutMS=MONGODB_CONNECT_TIMEOUT
34
+ )
35
+ return _mongodb_client
28
36
 
29
37
 
30
38
  def get_runner_dbg(runner):
@@ -39,6 +47,7 @@ def get_runner_dbg(runner):
39
47
 
40
48
 
41
49
  def update_runner(self):
50
+ client = get_mongodb_client()
42
51
  db = client.main
43
52
  type = self.config.type
44
53
  collection = f'{type}s'
@@ -72,6 +81,7 @@ def update_finding(self, item):
72
81
  if type(item) not in FINDING_TYPES:
73
82
  return item
74
83
  start_time = time.time()
84
+ client = get_mongodb_client()
75
85
  db = client.main
76
86
  update = item.toDict()
77
87
  _type = item._type
@@ -97,15 +107,14 @@ def update_finding(self, item):
97
107
 
98
108
 
99
109
  def find_duplicates(self):
110
+ from secator.celery import IN_CELERY_WORKER_PROCESS
100
111
  ws_id = self.toDict().get('context', {}).get('workspace_id')
101
112
  if not ws_id:
102
113
  return
103
- if self.sync:
104
- debug(f'running duplicate check on workspace {ws_id}', sub='hooks.mongodb')
114
+ if not IN_CELERY_WORKER_PROCESS:
105
115
  tag_duplicates(ws_id)
106
116
  else:
107
- celery_id = tag_duplicates.delay(ws_id)
108
- debug(f'running duplicate check on workspace {ws_id}', id=celery_id, sub='hooks.mongodb')
117
+ tag_duplicates.delay(ws_id)
109
118
 
110
119
 
111
120
  def load_finding(obj):
@@ -132,6 +141,8 @@ def tag_duplicates(ws_id: str = None):
132
141
  Args:
133
142
  ws_id (str): Workspace id.
134
143
  """
144
+ debug(f'running duplicate check on workspace {ws_id}', sub='hooks.mongodb')
145
+ client = get_mongodb_client()
135
146
  db = client.main
136
147
  workspace_query = list(
137
148
  db.findings.find({'_context.workspace_id': str(ws_id), '_tagged': True}).sort('_timestamp', -1))
@@ -172,19 +183,19 @@ def tag_duplicates(ws_id: str = None):
172
183
  'seen dupes': len(seen_dupes)
173
184
  },
174
185
  id=ws_id,
175
- sub='hooks.mongodb.duplicates',
186
+ sub='hooks.mongodb',
176
187
  verbose=True)
177
188
  tmp_duplicates_ids = list(dict.fromkeys([i._uuid for i in tmp_duplicates]))
178
- debug(f'duplicate ids: {tmp_duplicates_ids}', id=ws_id, sub='hooks.mongodb.duplicates', verbose=True)
189
+ debug(f'duplicate ids: {tmp_duplicates_ids}', id=ws_id, sub='hooks.mongodb', verbose=True)
179
190
 
180
191
  # Update latest object as non-duplicate
181
192
  if tmp_duplicates:
182
193
  duplicates.extend([f for f in tmp_duplicates])
183
194
  db.findings.update_one({'_id': ObjectId(item._uuid)}, {'$set': {'_related': tmp_duplicates_ids}})
184
- debug(f'adding {item._uuid} as non-duplicate', id=ws_id, sub='hooks.mongodb.duplicates', verbose=True)
195
+ debug(f'adding {item._uuid} as non-duplicate', id=ws_id, sub='hooks.mongodb', verbose=True)
185
196
  non_duplicates.append(item)
186
197
  else:
187
- debug(f'adding {item._uuid} as non-duplicate', id=ws_id, sub='hooks.mongodb.duplicates', verbose=True)
198
+ debug(f'adding {item._uuid} as non-duplicate', id=ws_id, sub='hooks.mongodb', verbose=True)
188
199
  non_duplicates.append(item)
189
200
 
190
201
  # debug(f'found {len(duplicates)} total duplicates')
@@ -208,7 +219,7 @@ def tag_duplicates(ws_id: str = None):
208
219
  'duplicates': len(duplicates_ids),
209
220
  'non-duplicates': len(non_duplicates_ids)
210
221
  },
211
- sub='hooks.mongodb.duplicates')
222
+ sub='hooks.mongodb')
212
223
 
213
224
 
214
225
  HOOKS = {
@@ -232,6 +243,6 @@ HOOKS = {
232
243
  'on_item': [update_finding],
233
244
  'on_duplicate': [update_finding],
234
245
  'on_interval': [update_runner],
235
- 'on_end': [update_runner, find_duplicates]
246
+ 'on_end': [update_runner]
236
247
  }
237
248
  }
@@ -21,10 +21,13 @@ class Error(OutputType):
21
21
  _sort_by = ('_timestamp',)
22
22
 
23
23
  def from_exception(e, **kwargs):
24
- message = type(e).__name__
24
+ errtype = type(e).__name__
25
+ message = errtype
25
26
  if str(e):
26
27
  message += f': {str(e)}'
27
- return Error(message=message, traceback=traceback_as_string(e), **kwargs)
28
+ traceback = traceback_as_string(e) if errtype not in ['KeyboardInterrupt', 'GreenletExit'] else ''
29
+ error = Error(message=message, traceback=traceback, **kwargs)
30
+ return error
28
31
 
29
32
  def __str__(self):
30
33
  return self.message
secator/runners/_base.py CHANGED
@@ -15,7 +15,7 @@ from secator.config import CONFIG
15
15
  from secator.output_types import FINDING_TYPES, OutputType, Progress, Info, Warning, Error, Target
16
16
  from secator.report import Report
17
17
  from secator.rich import console, console_stdout
18
- from secator.runners._helpers import (get_task_folder_id, process_extractor)
18
+ from secator.runners._helpers import (get_task_folder_id, process_extractor, run_extractors)
19
19
  from secator.utils import (debug, import_dynamic, merge_opts, rich_to_ansi, should_update)
20
20
 
21
21
  logger = logging.getLogger(__name__)
@@ -69,15 +69,14 @@ class Runner:
69
69
  reports_folder = None
70
70
 
71
71
  def __init__(self, config, inputs=[], results=[], run_opts={}, hooks={}, validators={}, context={}):
72
+ self.uuids = []
73
+ self.results = []
74
+ self.output = ''
75
+
76
+ # Runner config
72
77
  self.config = config
73
78
  self.name = run_opts.get('name', config.name)
74
79
  self.description = run_opts.get('description', config.description)
75
- if not isinstance(inputs, list):
76
- inputs = [inputs]
77
- self.inputs = inputs
78
- self.uuids = []
79
- self.output = ''
80
- self.results = []
81
80
  self.workspace_name = context.get('workspace_name', 'default')
82
81
  self.run_opts = run_opts.copy()
83
82
  self.sync = run_opts.get('sync', True)
@@ -97,6 +96,39 @@ class Runner:
97
96
  self.caller = self.run_opts.get('caller', None)
98
97
  self.threads = []
99
98
  self.no_poll = self.run_opts.get('no_poll', False)
99
+ self.quiet = self.run_opts.get('quiet', False)
100
+
101
+ # Runner process options
102
+ self.no_process = self.run_opts.get('no_process', False)
103
+ self.piped_input = self.run_opts.get('piped_input', False)
104
+ self.piped_output = self.run_opts.get('piped_output', False)
105
+ self.enable_duplicate_check = self.run_opts.get('enable_duplicate_check', True)
106
+
107
+ # Runner print opts
108
+ self.print_item = self.run_opts.get('print_item', False)
109
+ self.print_line = self.run_opts.get('print_line', False) and not self.quiet
110
+ self.print_remote_info = self.run_opts.get('print_remote_info', False) and not self.piped_input and not self.piped_output # noqa: E501
111
+ self.print_json = self.run_opts.get('print_json', False)
112
+ self.print_raw = self.run_opts.get('print_raw', False) or self.piped_output
113
+ self.print_fmt = self.run_opts.get('fmt', '')
114
+ self.print_progress = self.run_opts.get('print_progress', False) and not self.quiet and not self.print_raw
115
+ self.print_target = self.run_opts.get('print_target', False) and not self.quiet and not self.print_raw
116
+ self.print_stat = self.run_opts.get('print_stat', False) and not self.quiet and not self.print_raw
117
+ self.raise_on_error = self.run_opts.get('raise_on_error', False)
118
+ self.print_opts = {k: v for k, v in self.__dict__.items() if k.startswith('print_') if v}
119
+
120
+ # Determine inputs
121
+ inputs = [inputs] if not isinstance(inputs, list) else inputs
122
+ if results:
123
+ inputs, run_opts, errors = run_extractors(results, run_opts, inputs)
124
+ for error in errors:
125
+ self.add_result(error, print=True)
126
+ self.inputs = inputs
127
+
128
+ # Debug
129
+ self.debug('Inputs', obj=self.inputs, sub='init')
130
+ self.debug('Run opts', obj={k: v for k, v in self.run_opts.items() if v is not None}, sub='init')
131
+ self.debug('Print opts', obj={k: v for k, v in self.print_opts.items() if v is not None}, sub='init')
100
132
 
101
133
  # Determine exporters
102
134
  exporters_str = self.run_opts.get('output') or self.default_exporters
@@ -123,31 +155,6 @@ class Runner:
123
155
  self.enable_profiler = False
124
156
  pass
125
157
 
126
- # Process opts
127
- self.quiet = self.run_opts.get('quiet', False)
128
- self.no_process = self.run_opts.get('no_process', False)
129
- self.piped_input = self.run_opts.get('piped_input', False)
130
- self.piped_output = self.run_opts.get('piped_output', False)
131
- self.enable_duplicate_check = self.run_opts.get('enable_duplicate_check', True)
132
-
133
- # Print opts
134
- self.print_item = self.run_opts.get('print_item', False)
135
- self.print_line = self.run_opts.get('print_line', False) and not self.quiet
136
- self.print_remote_info = self.run_opts.get('print_remote_info', False) and not self.piped_input and not self.piped_output # noqa: E501
137
- self.print_json = self.run_opts.get('print_json', False)
138
- self.print_raw = self.run_opts.get('print_raw', False) or self.piped_output
139
- self.print_fmt = self.run_opts.get('fmt', '')
140
- self.print_progress = self.run_opts.get('print_progress', False) and not self.quiet and not self.print_raw
141
- self.print_target = self.run_opts.get('print_target', False) and not self.quiet and not self.print_raw
142
- self.print_stat = self.run_opts.get('print_stat', False) and not self.quiet and not self.print_raw
143
- self.raise_on_error = self.run_opts.get('raise_on_error', False)
144
- self.print_opts = {k: v for k, v in self.__dict__.items() if k.startswith('print_') if v}
145
-
146
- # Debug
147
- self.debug('Inputs', obj=self.inputs, sub='init')
148
- self.debug('Run opts', obj={k: v for k, v in self.run_opts.items() if v is not None}, sub='init')
149
- self.debug('Print opts', obj={k: v for k, v in self.print_opts.items() if v is not None}, sub='init')
150
-
151
158
  # Hooks
152
159
  self.hooks = {name: [] for name in HOOKS + getattr(self, 'hooks', [])}
153
160
  self.register_hooks(hooks)
@@ -166,7 +173,7 @@ class Runner:
166
173
 
167
174
  # Process prior results
168
175
  for result in results:
169
- list(self._process_item(result, print=False))
176
+ list(self._process_item(result, print=False, output=False))
170
177
 
171
178
  # Input post-process
172
179
  self.run_hooks('before_init')
@@ -219,6 +226,12 @@ class Runner:
219
226
  def self_findings(self):
220
227
  return [r for r in self.results if isinstance(r, tuple(FINDING_TYPES)) if r._source.startswith(self.unique_name)]
221
228
 
229
+ @property
230
+ def self_errors(self):
231
+ if self.config.type == 'task':
232
+ return [r for r in self.results if isinstance(r, Error) and r._source.startswith(self.unique_name)]
233
+ return [r for r in self.results if isinstance(r, Error)]
234
+
222
235
  @property
223
236
  def self_findings_count(self):
224
237
  return len(self.self_findings)
@@ -227,7 +240,7 @@ class Runner:
227
240
  def status(self):
228
241
  if not self.done:
229
242
  return 'RUNNING'
230
- return 'FAILURE' if len(self.errors) > 0 else 'SUCCESS'
243
+ return 'FAILURE' if len(self.self_errors) > 0 else 'SUCCESS'
231
244
 
232
245
  @property
233
246
  def celery_state(self):
@@ -525,7 +538,7 @@ class Runner:
525
538
  fun = self.get_func_path(hook)
526
539
  try:
527
540
  if hook_type == 'on_interval' and not should_update(CONFIG.runners.backend_update_frequency, self.last_updated_db):
528
- self.debug('', obj={f'{name} [dim yellow]->[/] {fun}': '[dim gray11]skipped[/]'}, id=_id, sub='hooks.db', verbose=True) # noqa: E501
541
+ self.debug('', obj={f'{name} [dim yellow]->[/] {fun}': '[dim gray11]skipped[/]'}, id=_id, sub='hooks', verbose=True) # noqa: E501
529
542
  return
530
543
  if not self.enable_hooks or self.no_process:
531
544
  self.debug('', obj={f'{name} [dim yellow]->[/] {fun}': '[dim gray11]skipped[/]'}, id=_id, sub='hooks', verbose=True) # noqa: E501
@@ -783,19 +796,20 @@ class Runner:
783
796
  count_map[name] = count
784
797
  return count_map
785
798
 
786
- def _process_item(self, item, print=True):
799
+ def _process_item(self, item, print=True, output=True):
787
800
  """Process an item yielded by the derived runner.
788
801
 
789
802
  Args:
790
803
  item (dict | str): Input item.
791
804
  print (bool): Print item in console.
805
+ output (bool): Add to runner output.
792
806
 
793
807
  Yields:
794
808
  OutputType: Output type.
795
809
  """
796
810
  # Item is a string, just print it
797
811
  if isinstance(item, str):
798
- self.output += item + '\n'
812
+ self.output += item + '\n' if output else ''
799
813
  self._print_item(item) if item and print else ''
800
814
  return
801
815
 
@@ -1,6 +1,7 @@
1
1
  import os
2
2
 
3
- from secator.utils import deduplicate
3
+ from secator.output_types import Error
4
+ from secator.utils import deduplicate, debug
4
5
 
5
6
 
6
7
  def run_extractors(results, opts, inputs=[]):
@@ -12,17 +13,19 @@ def run_extractors(results, opts, inputs=[]):
12
13
  inputs (list): Original inputs.
13
14
 
14
15
  Returns:
15
- tuple: inputs, options.
16
+ tuple: inputs, options, errors.
16
17
  """
17
18
  extractors = {k: v for k, v in opts.items() if k.endswith('_')}
19
+ errors = []
18
20
  for key, val in extractors.items():
19
21
  key = key.rstrip('_')
20
- values = extract_from_results(results, val)
22
+ values, err = extract_from_results(results, val)
23
+ errors.extend(err)
21
24
  if key == 'targets':
22
25
  inputs = deduplicate(values)
23
26
  else:
24
27
  opts[key] = deduplicate(values)
25
- return inputs, opts
28
+ return inputs, opts, errors
26
29
 
27
30
 
28
31
  def extract_from_results(results, extractors):
@@ -33,14 +36,19 @@ def extract_from_results(results, extractors):
33
36
  extractors (list): List of extractors to extract from.
34
37
 
35
38
  Returns:
36
- list: List of extracted results (flat).
39
+ tuple: List of extracted results (flat), list of errors.
37
40
  """
38
- extracted = []
41
+ extracted_results = []
42
+ errors = []
39
43
  if not isinstance(extractors, list):
40
44
  extractors = [extractors]
41
45
  for extractor in extractors:
42
- extracted.extend(process_extractor(results, extractor))
43
- return extracted
46
+ try:
47
+ extracted_results.extend(process_extractor(results, extractor))
48
+ except Exception as e:
49
+ error = Error.from_exception(e)
50
+ errors.append(error)
51
+ return extracted_results, errors
44
52
 
45
53
 
46
54
  def process_extractor(results, extractor, ctx={}):
@@ -53,6 +61,7 @@ def process_extractor(results, extractor, ctx={}):
53
61
  Returns:
54
62
  list: List of extracted results.
55
63
  """
64
+ debug('before extract', obj={'results': results, 'extractor': extractor}, sub='extractor')
56
65
  if isinstance(extractor, dict):
57
66
  _type = extractor['type']
58
67
  _field = extractor.get('field')
@@ -66,6 +75,7 @@ def process_extractor(results, extractor, ctx={}):
66
75
  if _field:
67
76
  _field = '{' + _field + '}' if not _field.startswith('{') else _field
68
77
  items = [_field.format(**item.toDict()) for item in items]
78
+ debug('after extract', obj={'items': items}, sub='extractor')
69
79
  return items
70
80
 
71
81
 
@@ -193,6 +193,14 @@ class Command(Runner):
193
193
  })
194
194
  return res
195
195
 
196
+ @classmethod
197
+ def needs_chunking(cls, targets, sync):
198
+ many_targets = len(targets) > 1
199
+ targets_over_chunk_size = cls.input_chunk_size and len(targets) > cls.input_chunk_size
200
+ has_file_flag = cls.file_flag is not None
201
+ chunk_it = (sync and many_targets and not has_file_flag) or (not sync and many_targets and targets_over_chunk_size)
202
+ return chunk_it
203
+
196
204
  @classmethod
197
205
  def delay(cls, *args, **kwargs):
198
206
  # TODO: Move this to TaskBase
@@ -858,6 +866,8 @@ class Command(Runner):
858
866
  cmd = f'cat {fpath} | {cmd}'
859
867
  elif self.file_flag:
860
868
  cmd += f' {self.file_flag} {fpath}'
869
+ else:
870
+ cmd += f' {fpath}'
861
871
 
862
872
  self.inputs_path = fpath
863
873
 
secator/tasks/fping.py CHANGED
@@ -13,7 +13,6 @@ class fping(ReconIp):
13
13
  cmd = 'fping -a'
14
14
  file_flag = '-f'
15
15
  input_flag = None
16
- ignore_return_code = True
17
16
  opt_prefix = '--'
18
17
  opt_key_map = {
19
18
  DELAY: 'period',
secator/tasks/gospider.py CHANGED
@@ -55,7 +55,6 @@ class gospider(HttpCrawler):
55
55
  }
56
56
  install_cmd = 'go install -v github.com/jaeles-project/gospider@latest'
57
57
  install_github_handle = 'jaeles-project/gospider'
58
- ignore_return_code = True
59
58
  proxychains = False
60
59
  proxy_socks5 = True # with leaks... https://github.com/jaeles-project/gospider/issues/61
61
60
  proxy_http = True # with leaks... https://github.com/jaeles-project/gospider/issues/61
secator/tasks/naabu.py CHANGED
@@ -47,8 +47,8 @@ class naabu(ReconPort):
47
47
  }
48
48
  }
49
49
  output_types = [Port]
50
- install_cmd = 'go install -v github.com/projectdiscovery/naabu/v2/cmd/naabu@latest'
51
- install_github_handle = 'projectdiscovery/naabu'
50
+ install_cmd = 'go install -v github.com/projectdiscovery/naabu/v2/cmd/naabu@v2.3.3'
51
+ # install_github_handle = 'projectdiscovery/naabu'
52
52
  install_pre = {'apt': ['libpcap-dev'], 'apk': ['libpcap-dev', 'libc6-compat'], 'pacman|brew': ['libpcap']}
53
53
  install_post = {'arch|alpine': 'sudo ln -sf /usr/lib/libpcap.so /usr/lib/libpcap.so.0.8'}
54
54
  proxychains = False
secator/tasks/nuclei.py CHANGED
@@ -73,7 +73,6 @@ class nuclei(VulnMulti):
73
73
  EXTRA_DATA: lambda x: {k: v for k, v in x.items() if k not in ['duration', 'errors', 'percent']}
74
74
  }
75
75
  }
76
- ignore_return_code = True
77
76
  install_pre = {
78
77
  '*': ['git']
79
78
  }
secator/tasks/wpscan.py CHANGED
@@ -82,7 +82,6 @@ class wpscan(VulnHttp):
82
82
  proxychains = False
83
83
  proxy_http = True
84
84
  proxy_socks5 = False
85
- ignore_return_code = True
86
85
  profile = 'io'
87
86
 
88
87
  @staticmethod
secator/utils.py CHANGED
@@ -772,6 +772,8 @@ def process_wordlist(val):
772
772
  template_wordlist = getattr(CONFIG.wordlists.templates, val)
773
773
  if template_wordlist:
774
774
  return template_wordlist
775
+ elif Path(val).exists():
776
+ return val
775
777
  else:
776
778
  return download_file(
777
779
  val,
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: secator
3
- Version: 0.10.0
3
+ Version: 0.10.1a0
4
4
  Summary: The pentester's swiss knife.
5
5
  Project-URL: Homepage, https://github.com/freelabz/secator
6
6
  Project-URL: Issues, https://github.com/freelabz/secator/issues
@@ -1,9 +1,9 @@
1
1
  secator/.gitignore,sha256=da8MUc3hdb6Mo0WjZu2upn5uZMbXcBGvhdhTQ1L89HI,3093
2
2
  secator/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
3
- secator/celery.py,sha256=cc7swFNPAuYMGRdL55Drwyo5RO4sK1l9UxDO-ojLM4Y,10518
4
- secator/celery_signals.py,sha256=WG9d41CoRIPjHpauIiAE41ekI7j7DPQ01uWU6RHtcL0,2719
5
- secator/celery_utils.py,sha256=iIuCn_3YkPXCtpnbaYqpppU2TARzSDyTIYHkrRyt54s,7725
6
- secator/cli.py,sha256=b-Oo_fACToy3pGfIo2Bzci_6rcWR4fONfOP01vnfVt4,43889
3
+ secator/celery.py,sha256=9KXKv4EamJYJrHt_Ppn7aIp1AiFaTn2V0J_tZBwtWK0,8802
4
+ secator/celery_signals.py,sha256=HobT7hCbVKPEHvCNwxCvQxFVUyocU1kkrTXi67b1DDw,4346
5
+ secator/celery_utils.py,sha256=UWqLZpUaOXcztC_GD6uEDLiP8bGmD3WiTQN-u3lialg,7712
6
+ secator/cli.py,sha256=3_tTTusW12MCejFgtOeYjiedjrJpyQj_gsCK8FkTMJA,43922
7
7
  secator/config.py,sha256=xItKM29yvMqzNZZygSNZXZ2V9vJbTdRuLTfIoRfP3XE,19653
8
8
  secator/decorators.py,sha256=3kYadCz6haIZtnjkFHSRfenTdc6Yu7bHd-0IVjhD72w,13902
9
9
  secator/definitions.py,sha256=gFtLT9fjNtX_1qkiCjNfQyCvYq07IhScsQzX4o20_SE,3084
@@ -12,7 +12,7 @@ secator/report.py,sha256=qJkEdCFttDBXIwUNUzZqFU_sG8l0PvyTSTogZVBv1Rs,3628
12
12
  secator/rich.py,sha256=owmuLcTTUt8xYBTE3_SqWTkPeAomcU_8bPdW_V-U8VM,3264
13
13
  secator/template.py,sha256=Sb6PjCTGIkZ7I0OGWFp5CaXmjt-6VPe_xpcRhWhjGpU,4409
14
14
  secator/thread.py,sha256=rgRgEtcMgs2wyfLWVlCTUCLWeg6jsMo5iKpyyrON5rY,655
15
- secator/utils.py,sha256=FBDa0BWPFLDLXKD_3FwFd8Bmz-fP0inKX8kG8LoivjU,21748
15
+ secator/utils.py,sha256=zlG3-f3KEN9DdiT5kCqHhIASdEudYDgSYPkB76DTLLk,21787
16
16
  secator/utils_test.py,sha256=ArHwkWW89t0IDqxO4HjJWd_tm7tp1illP4pu3nLq5yo,6559
17
17
  secator/configs/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
18
18
  secator/configs/profiles/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -49,10 +49,10 @@ secator/exporters/table.py,sha256=RY7Tct5kowEx8Oza8QMXFx6fKBARYfLxEbbvjKiE3eQ,11
49
49
  secator/exporters/txt.py,sha256=oMtr22di6cqyE_5yJoiWP-KElrI5QgvK1cOUrj7H7js,730
50
50
  secator/hooks/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
51
51
  secator/hooks/gcs.py,sha256=MIhntyWYz9BZdTXhWl5JznaczSq1_7fl3TVqPufuTSo,1490
52
- secator/hooks/mongodb.py,sha256=HyjtpJSoxvSZ6aG8uBf1RFLNKGXCBDQL5eEh4xzDonA,7545
52
+ secator/hooks/mongodb.py,sha256=XKbm_SrcSbQ2koILWvhzSg4tqdvHXgX5aU5x46Edu1s,7716
53
53
  secator/output_types/__init__.py,sha256=LxCW0K1f2vdgUapc4pIEsUpBfC0TQVvqo7T57rGuZGk,1159
54
54
  secator/output_types/_base.py,sha256=OgS6ICt66TzPsqo1JZwRIIwbng2HRX1i_u5qbUECgNk,2820
55
- secator/output_types/error.py,sha256=QjiJ5RoN3-utHqAyvgL2jlmZp7-u7emgUQpvLpYammU,1405
55
+ secator/output_types/error.py,sha256=39gpEJfKM2EuyOhD9lSkjjna2QicMvnLdFav6kHmhlg,1529
56
56
  secator/output_types/exploit.py,sha256=-BKTqPBg94rVgjw8YSmcYuBCI2x-73WwMd9ITP9qr3Y,1750
57
57
  secator/output_types/info.py,sha256=R8xeiD3ocNOsvkJPhrQgsx6q-Ea1G0eTAqyuh5JrAR0,843
58
58
  secator/output_types/ip.py,sha256=CyE3qkp55Kmj5YRl0CZGS4XrHX8N5apWrLN3OMzaK0U,1127
@@ -68,10 +68,10 @@ secator/output_types/user_account.py,sha256=rm10somxyu30JHjj629IkR15Nhahylud_fVO
68
68
  secator/output_types/vulnerability.py,sha256=nF7OT9zGez8sZvLrkhjBOORjVi8hCqfCYUFq3eZ_ywo,2870
69
69
  secator/output_types/warning.py,sha256=47GtmG083GqGPb_R5JDFmARJ9Mqrme58UxwJhgdGPuI,853
70
70
  secator/runners/__init__.py,sha256=EBbOk37vkBy9p8Hhrbi-2VtM_rTwQ3b-0ggTyiD22cE,290
71
- secator/runners/_base.py,sha256=QBYyrYCPMJz0YPP6lE8vkgIHmDLplO6byrsisqVjV5g,29047
72
- secator/runners/_helpers.py,sha256=FGogmmdHfCWmIyq7wRprwU1oOSxesOu3Y0N4GyAgiGw,2000
71
+ secator/runners/_base.py,sha256=T9gjOqe-UPDHe5ZdVRBtUtxTefRgDcq9JV08F6UV5ZU,29596
72
+ secator/runners/_helpers.py,sha256=QhJmdmFdu5XSx3LBFf4Q4Hy2EXS6bLGnJUq8G7C6f68,2410
73
73
  secator/runners/celery.py,sha256=bqvDTTdoHiGRCt0FRvlgFHQ_nsjKMP5P0PzGbwfCj_0,425
74
- secator/runners/command.py,sha256=x7ktQLwIy7CWV-AEL6n5xY2sRzAijGCURuB17hJWRpY,25089
74
+ secator/runners/command.py,sha256=PqCOHDKJXvG4weB8mXDTElGxc8i8pK2RoyTKUBpHASU,25480
75
75
  secator/runners/scan.py,sha256=tuPuqwL6fIS4UbCoy5WPKthYWm_LL-vCPRD2qK58HZE,1232
76
76
  secator/runners/task.py,sha256=f2AduWpIy8JHK-Qitl_2Kh0fia573_YHAyAlV6MsJ50,2068
77
77
  secator/runners/workflow.py,sha256=XEhBfL-f3vGH0HgEPnj62d8ITxjH_tPXiNSVkaonuwQ,3862
@@ -91,10 +91,10 @@ secator/tasks/dnsx.py,sha256=nK14_DeyX0QTpAMdIP0LSSEOEu5_tQemyFW0XPjA2f8,2266
91
91
  secator/tasks/dnsxbrute.py,sha256=5VnSpd5ken7vWxFX1bcsGcUN8LpaVhcjafnuETzkMGs,1422
92
92
  secator/tasks/feroxbuster.py,sha256=3bKolPIdDBhdJ2fu4BP3w1cOlxDyI8WmtM-_2pDQ0AM,2773
93
93
  secator/tasks/ffuf.py,sha256=VGrtjFgSX6Q1I8h1wjPO5XwBFCfZXmn0DQsn9gxEUXc,2468
94
- secator/tasks/fping.py,sha256=m7eSXFU5yIeDD_kWh-h208ufSZAm4SpQzD34Ko0yCu8,1116
94
+ secator/tasks/fping.py,sha256=9nMIahBMizRwsos9py-ltXMEffIiyx1reVytj9sTyKU,1089
95
95
  secator/tasks/gau.py,sha256=1Qt0I_FqTh-QyJ0YR8I7i_T80HehWSvem_SS-TQKVm0,1648
96
96
  secator/tasks/gf.py,sha256=y8Fc0sRLGqNuwUjTBgLk3HEw3ZOnh09nB_GTufGErNA,962
97
- secator/tasks/gospider.py,sha256=XKLus6GnwN9MYU_ZFmNED-JeRn6n1Eg0CPgul8g1zLs,2302
97
+ secator/tasks/gospider.py,sha256=mpoBq2VQXUqgwWPLz41fzdW85hJeo8mn9FUUJj_DrUw,2275
98
98
  secator/tasks/grype.py,sha256=xoOuldnHCrS0O1Y4IzjbSVvoX5eX-fLSZ74THdRC2so,2447
99
99
  secator/tasks/h8mail.py,sha256=wNukV-aB-bXPZNq7WL8n1nFgH5b5tGh6vOF80Yna33I,1934
100
100
  secator/tasks/httpx.py,sha256=ONfCdAOV7ARCM9tSnlucIAM3UQeWcMUm8QZX8F7u9Pg,5895
@@ -102,15 +102,15 @@ secator/tasks/katana.py,sha256=J0HKPT4QIrDj4uW2gZe7ByW6iEwPortSszqaHDvziwY,5355
102
102
  secator/tasks/maigret.py,sha256=6anhBzB4lEM90Lk23cAD_ku7I_ghTpj0W0i3h6HARD8,2088
103
103
  secator/tasks/mapcidr.py,sha256=56ocbaDmB5_C_ns-773CgZXGOKOtkI9q9xJs2Rlfqio,990
104
104
  secator/tasks/msfconsole.py,sha256=TXVrvzSWw9Ncv2h9QJtaEinTMbps_z0zX1PFirERVho,6430
105
- secator/tasks/naabu.py,sha256=aAEkQ10ma3Log8OVj8wHY1zUWmjpVQ5pehAMQLJQEV0,2089
105
+ secator/tasks/naabu.py,sha256=90WORQhwFwy71OGNaFe10pCkIG8IJP1XwWQ24OMgSc4,2091
106
106
  secator/tasks/nmap.py,sha256=Zu24sJHnlOf3NXLj3Ohi07-x7m-5Ajr5ULpNsUF-QT0,12546
107
- secator/tasks/nuclei.py,sha256=o677F5yv3mfIlYEpKY5_W6CT2Dlt315DuFOsCjHLE5c,4270
107
+ secator/tasks/nuclei.py,sha256=bMXCRU5VWyrwI7Cv6BCj84NTpfjuALFumPqUSZ4Y6Ug,4243
108
108
  secator/tasks/searchsploit.py,sha256=gvtLZbL2hzAZ07Cf0cSj2Qs0GvWK94XyHvoPFsetXu8,3321
109
109
  secator/tasks/subfinder.py,sha256=C6W5NnXT92OUB1aSS9IYseqdI3wDMAz70TOEl8X-o3U,1213
110
- secator/tasks/wpscan.py,sha256=C8eW3vWfbSFrxm5iPzs3MgcagIfSs7u51QZiecYbT2Q,5577
110
+ secator/tasks/wpscan.py,sha256=036ywiEqZfX_Bt071U7qIm7bi6pNk7vodflmuslJurA,5550
111
111
  secator/workflows/__init__.py,sha256=ivpZHiYYlj4JqlXLRmB9cmAPUGdk8QcUrCRL34hIqEA,665
112
- secator-0.10.0.dist-info/METADATA,sha256=TzYxMIn1BO_mo52EkeS5pyg7Ei13hf3Y787EsSa45GE,14724
113
- secator-0.10.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
114
- secator-0.10.0.dist-info/entry_points.txt,sha256=lPgsqqUXWgiuGSfKy-se5gHdQlAXIwS_A46NYq7Acic,44
115
- secator-0.10.0.dist-info/licenses/LICENSE,sha256=19W5Jsy4WTctNkqmZIqLRV1gTDOp01S3LDj9iSgWaJ0,2867
116
- secator-0.10.0.dist-info/RECORD,,
112
+ secator-0.10.1a0.dist-info/METADATA,sha256=c2JLeTa-Pv7TzcWFDPQlfuR1XLU6YnVegdnb1d5_-gc,14726
113
+ secator-0.10.1a0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
114
+ secator-0.10.1a0.dist-info/entry_points.txt,sha256=lPgsqqUXWgiuGSfKy-se5gHdQlAXIwS_A46NYq7Acic,44
115
+ secator-0.10.1a0.dist-info/licenses/LICENSE,sha256=19W5Jsy4WTctNkqmZIqLRV1gTDOp01S3LDj9iSgWaJ0,2867
116
+ secator-0.10.1a0.dist-info/RECORD,,