secator 0.10.1a2__tar.gz → 0.10.1a4__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of secator might be problematic. Click here for more details.
- {secator-0.10.1a2 → secator-0.10.1a4}/PKG-INFO +1 -1
- {secator-0.10.1a2 → secator-0.10.1a4}/pyproject.toml +1 -1
- {secator-0.10.1a2 → secator-0.10.1a4}/secator/celery.py +108 -62
- {secator-0.10.1a2 → secator-0.10.1a4}/secator/celery_signals.py +2 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/secator/celery_utils.py +38 -2
- {secator-0.10.1a2 → secator-0.10.1a4}/secator/config.py +1 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/secator/output_types/__init__.py +3 -1
- secator-0.10.1a4/secator/output_types/state.py +29 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/secator/runners/_base.py +85 -35
- {secator-0.10.1a2 → secator-0.10.1a4}/secator/runners/command.py +4 -5
- secator-0.10.1a4/secator/runners/scan.py +55 -0
- secator-0.10.1a4/secator/runners/task.py +71 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/secator/runners/workflow.py +23 -52
- {secator-0.10.1a2 → secator-0.10.1a4}/secator/scans/__init__.py +5 -5
- secator-0.10.1a2/secator/runners/scan.py +0 -43
- secator-0.10.1a2/secator/runners/task.py +0 -77
- {secator-0.10.1a2 → secator-0.10.1a4}/.docker/Dockerfile.alpine +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/.docker/Dockerfile.arch +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/.docker/Dockerfile.debian +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/.docker/Dockerfile.kali +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/.docker/Dockerfile.osx +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/.docker/Dockerfile.ubuntu +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/.docker/build_all.sh +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/.dockerignore +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/.flake8 +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/.gitignore +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/CHANGELOG.md +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/CONTRIBUTING.md +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/Dockerfile +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/LICENSE +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/README.md +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/SECURITY.md +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/cloudbuild.yaml +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/helm/.helmignore +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/helm/Chart.yaml +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/helm/templates/redis-service.yaml +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/helm/templates/redis.yaml +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/helm/templates/secator-manager.yaml +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/helm/templates/secator-worker.yaml +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/helm/values.yaml +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/scripts/download_cves.sh +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/scripts/install.sh +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/scripts/install_asciinema.sh +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/scripts/install_go.sh +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/scripts/install_ruby.sh +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/scripts/msf/exploit_cve.rc +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/scripts/msf/ftp_anonymous.rc +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/scripts/msf/ftp_version.rc +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/scripts/msf/ftp_vsftpd_234_backdoor.rc +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/scripts/msf/redis.rc +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/scripts/stories/STORY.md +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/scripts/stories/aliases.sh +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/scripts/stories/demo.sh +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/scripts/stories/fmt.sh +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/scripts/stories/input.sh +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/scripts/stories/pipe.sh +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/scripts/stories/short_demo.sh +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/secator/.gitignore +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/secator/__init__.py +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/secator/cli.py +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/secator/configs/__init__.py +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/secator/configs/profiles/__init__.py +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/secator/configs/profiles/aggressive.yaml +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/secator/configs/profiles/default.yaml +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/secator/configs/profiles/stealth.yaml +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/secator/configs/scans/__init__.py +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/secator/configs/scans/domain.yaml +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/secator/configs/scans/host.yaml +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/secator/configs/scans/network.yaml +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/secator/configs/scans/subdomain.yaml +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/secator/configs/scans/url.yaml +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/secator/configs/workflows/__init__.py +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/secator/configs/workflows/cidr_recon.yaml +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/secator/configs/workflows/code_scan.yaml +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/secator/configs/workflows/host_recon.yaml +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/secator/configs/workflows/port_scan.yaml +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/secator/configs/workflows/subdomain_recon.yaml +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/secator/configs/workflows/url_bypass.yaml +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/secator/configs/workflows/url_crawl.yaml +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/secator/configs/workflows/url_dirsearch.yaml +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/secator/configs/workflows/url_fuzz.yaml +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/secator/configs/workflows/url_nuclei.yaml +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/secator/configs/workflows/url_vuln.yaml +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/secator/configs/workflows/user_hunt.yaml +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/secator/configs/workflows/wordpress.yaml +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/secator/decorators.py +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/secator/definitions.py +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/secator/exporters/__init__.py +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/secator/exporters/_base.py +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/secator/exporters/console.py +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/secator/exporters/csv.py +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/secator/exporters/gdrive.py +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/secator/exporters/json.py +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/secator/exporters/table.py +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/secator/exporters/txt.py +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/secator/hooks/__init__.py +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/secator/hooks/gcs.py +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/secator/hooks/mongodb.py +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/secator/installer.py +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/secator/output_types/_base.py +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/secator/output_types/error.py +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/secator/output_types/exploit.py +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/secator/output_types/info.py +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/secator/output_types/ip.py +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/secator/output_types/port.py +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/secator/output_types/progress.py +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/secator/output_types/record.py +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/secator/output_types/stat.py +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/secator/output_types/subdomain.py +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/secator/output_types/tag.py +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/secator/output_types/target.py +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/secator/output_types/url.py +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/secator/output_types/user_account.py +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/secator/output_types/vulnerability.py +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/secator/output_types/warning.py +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/secator/report.py +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/secator/rich.py +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/secator/runners/__init__.py +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/secator/runners/_helpers.py +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/secator/runners/celery.py +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/secator/serializers/__init__.py +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/secator/serializers/dataclass.py +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/secator/serializers/json.py +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/secator/serializers/regex.py +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/secator/tasks/__init__.py +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/secator/tasks/_categories.py +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/secator/tasks/bbot.py +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/secator/tasks/bup.py +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/secator/tasks/cariddi.py +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/secator/tasks/dalfox.py +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/secator/tasks/dirsearch.py +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/secator/tasks/dnsx.py +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/secator/tasks/dnsxbrute.py +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/secator/tasks/feroxbuster.py +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/secator/tasks/ffuf.py +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/secator/tasks/fping.py +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/secator/tasks/gau.py +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/secator/tasks/gf.py +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/secator/tasks/gospider.py +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/secator/tasks/grype.py +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/secator/tasks/h8mail.py +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/secator/tasks/httpx.py +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/secator/tasks/katana.py +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/secator/tasks/maigret.py +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/secator/tasks/mapcidr.py +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/secator/tasks/msfconsole.py +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/secator/tasks/naabu.py +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/secator/tasks/nmap.py +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/secator/tasks/nuclei.py +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/secator/tasks/searchsploit.py +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/secator/tasks/subfinder.py +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/secator/tasks/wpscan.py +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/secator/template.py +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/secator/thread.py +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/secator/utils.py +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/secator/utils_test.py +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/secator/workflows/__init__.py +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/tests/__init__.py +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/tests/fixtures/h8mail_breach.txt +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/tests/fixtures/ls.py +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/tests/fixtures/msfconsole_input.rc +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/tests/fixtures/nmap_output.xml +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/tests/integration/__init__.py +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/tests/integration/inputs.py +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/tests/integration/outputs.py +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/tests/integration/setup.sh +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/tests/integration/teardown.sh +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/tests/integration/test_addons.py +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/tests/integration/test_celery.py +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/tests/integration/test_scans.py +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/tests/integration/test_tasks.py +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/tests/integration/test_tasks_categories.py +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/tests/integration/test_worker.py +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/tests/integration/test_workflows.py +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/tests/integration/wordlist.txt +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/tests/integration/wordlist_dns.txt +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/tests/integration/wordpress_toolbox/Dockerfile +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/tests/integration/wordpress_toolbox/Makefile +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/tests/performance/__init__.py +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/tests/performance/loadtester.py +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/tests/performance/test_worker.py +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/tests/unit/__init__.py +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/tests/unit/test_celery.py +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/tests/unit/test_cli.py +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/tests/unit/test_config.py +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/tests/unit/test_offline.py +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/tests/unit/test_runners.py +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/tests/unit/test_scans.py +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/tests/unit/test_serializers.py +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/tests/unit/test_tasks.py +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/tests/unit/test_tasks_categories.py +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/tests/unit/test_template.py +0 -0
- {secator-0.10.1a2 → secator-0.10.1a4}/tests/unit/test_utils.py +0 -0
|
@@ -2,7 +2,6 @@ import gc
|
|
|
2
2
|
import json
|
|
3
3
|
import logging
|
|
4
4
|
import os
|
|
5
|
-
import sys
|
|
6
5
|
import uuid
|
|
7
6
|
|
|
8
7
|
from time import time
|
|
@@ -13,14 +12,13 @@ from celery.app import trace
|
|
|
13
12
|
from rich.logging import RichHandler
|
|
14
13
|
from retry import retry
|
|
15
14
|
|
|
16
|
-
from secator.celery_signals import setup_handlers
|
|
15
|
+
from secator.celery_signals import IN_CELERY_WORKER_PROCESS, setup_handlers
|
|
17
16
|
from secator.config import CONFIG
|
|
18
17
|
from secator.output_types import Info
|
|
19
18
|
from secator.rich import console
|
|
20
19
|
from secator.runners import Scan, Task, Workflow
|
|
21
20
|
from secator.utils import (debug, deduplicate, flatten, should_update)
|
|
22
21
|
|
|
23
|
-
IN_CELERY_WORKER_PROCESS = sys.argv and ('secator.celery.app' in sys.argv or 'worker' in sys.argv)
|
|
24
22
|
|
|
25
23
|
#---------#
|
|
26
24
|
# Logging #
|
|
@@ -138,64 +136,11 @@ def chunker(seq, size):
|
|
|
138
136
|
return (seq[pos:pos + size] for pos in range(0, len(seq), size))
|
|
139
137
|
|
|
140
138
|
|
|
141
|
-
@app.task(bind=True)
|
|
142
|
-
def handle_runner_error(self, results, runner):
|
|
143
|
-
"""Handle errors in Celery workflows (chunked tasks or runners)."""
|
|
144
|
-
results = forward_results(results)
|
|
145
|
-
runner.results = results
|
|
146
|
-
runner.log_results()
|
|
147
|
-
runner.run_hooks('on_end')
|
|
148
|
-
return runner.results
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
def break_task(task, task_opts, targets, results=[], chunk_size=1):
|
|
152
|
-
"""Break a task into multiple of the same type."""
|
|
153
|
-
chunks = targets
|
|
154
|
-
if chunk_size > 1:
|
|
155
|
-
chunks = list(chunker(targets, chunk_size))
|
|
156
|
-
debug(
|
|
157
|
-
'',
|
|
158
|
-
obj={task.unique_name: 'CHUNKED', 'chunk_size': chunk_size, 'chunks': len(chunks), 'target_count': len(targets)},
|
|
159
|
-
obj_after=False,
|
|
160
|
-
sub='celery.state',
|
|
161
|
-
verbose=True
|
|
162
|
-
)
|
|
163
|
-
|
|
164
|
-
# Clone opts
|
|
165
|
-
opts = task_opts.copy()
|
|
166
|
-
|
|
167
|
-
# Build signatures
|
|
168
|
-
sigs = []
|
|
169
|
-
task.ids_map = {}
|
|
170
|
-
for ix, chunk in enumerate(chunks):
|
|
171
|
-
if not isinstance(chunk, list):
|
|
172
|
-
chunk = [chunk]
|
|
173
|
-
if len(chunks) > 0: # add chunk to task opts for tracking chunks exec
|
|
174
|
-
opts['chunk'] = ix + 1
|
|
175
|
-
opts['chunk_count'] = len(chunks)
|
|
176
|
-
task_id = str(uuid.uuid4())
|
|
177
|
-
opts['has_parent'] = True
|
|
178
|
-
opts['enable_duplicate_check'] = False
|
|
179
|
-
opts['results'] = results
|
|
180
|
-
sig = type(task).si(chunk, **opts).set(queue=type(task).profile, task_id=task_id)
|
|
181
|
-
full_name = f'{task.name}_{ix + 1}'
|
|
182
|
-
task.add_subtask(task_id, task.name, f'{task.name}_{ix + 1}')
|
|
183
|
-
info = Info(message=f'Celery chunked task created: {task_id}', _source=full_name, _uuid=str(uuid.uuid4()))
|
|
184
|
-
task.add_result(info)
|
|
185
|
-
sigs.append(sig)
|
|
186
|
-
|
|
187
|
-
# Build Celery workflow
|
|
188
|
-
workflow = chord(
|
|
189
|
-
tuple(sigs),
|
|
190
|
-
handle_runner_error.s(runner=task).set(queue='results')
|
|
191
|
-
)
|
|
192
|
-
return workflow
|
|
193
|
-
|
|
194
|
-
|
|
195
139
|
@app.task(bind=True)
|
|
196
140
|
def run_task(self, args=[], kwargs={}):
|
|
197
|
-
print('run task')
|
|
198
141
|
console.print(Info(message=f'Running task {self.request.id}'))
|
|
142
|
+
if 'context' not in kwargs:
|
|
143
|
+
kwargs['context'] = {}
|
|
199
144
|
kwargs['context']['celery_id'] = self.request.id
|
|
200
145
|
task = Task(*args, **kwargs)
|
|
201
146
|
task.run()
|
|
@@ -204,6 +149,8 @@ def run_task(self, args=[], kwargs={}):
|
|
|
204
149
|
@app.task(bind=True)
|
|
205
150
|
def run_workflow(self, args=[], kwargs={}):
|
|
206
151
|
console.print(Info(message=f'Running workflow {self.request.id}'))
|
|
152
|
+
if 'context' not in kwargs:
|
|
153
|
+
kwargs['context'] = {}
|
|
207
154
|
kwargs['context']['celery_id'] = self.request.id
|
|
208
155
|
workflow = Workflow(*args, **kwargs)
|
|
209
156
|
workflow.run()
|
|
@@ -241,12 +188,17 @@ def run_command(self, results, name, targets, opts={}):
|
|
|
241
188
|
sync = not IN_CELERY_WORKER_PROCESS
|
|
242
189
|
task_cls = Task.get_task_class(name)
|
|
243
190
|
task = task_cls(targets, **opts)
|
|
191
|
+
task.started = True
|
|
192
|
+
task.run_hooks('on_start')
|
|
244
193
|
update_state(self, task, force=True)
|
|
245
194
|
|
|
246
195
|
# Chunk task if needed
|
|
247
|
-
if
|
|
248
|
-
|
|
249
|
-
|
|
196
|
+
if task.needs_chunking(sync):
|
|
197
|
+
if IN_CELERY_WORKER_PROCESS:
|
|
198
|
+
console.print(Info(message=f'Task {name} requires chunking, breaking into {len(targets)} tasks'))
|
|
199
|
+
tasks = break_task(task, opts, results=results)
|
|
200
|
+
update_state(self, task, force=True)
|
|
201
|
+
return self.replace(tasks)
|
|
250
202
|
|
|
251
203
|
# Update state live
|
|
252
204
|
[update_state(self, task) for _ in task]
|
|
@@ -268,9 +220,56 @@ def forward_results(results):
|
|
|
268
220
|
results = results['results']
|
|
269
221
|
results = flatten(results)
|
|
270
222
|
results = deduplicate(results, attr='_uuid')
|
|
271
|
-
|
|
223
|
+
if IN_CELERY_WORKER_PROCESS:
|
|
224
|
+
console.print(Info(message=f'Forwarding {len(results)} results ...'))
|
|
272
225
|
return results
|
|
273
226
|
|
|
227
|
+
|
|
228
|
+
@app.task
|
|
229
|
+
def mark_runner_started(runner):
|
|
230
|
+
"""Mark a runner as started and run on_start hooks.
|
|
231
|
+
|
|
232
|
+
Args:
|
|
233
|
+
runner (Runner): Secator runner instance
|
|
234
|
+
|
|
235
|
+
Returns:
|
|
236
|
+
list: Runner results
|
|
237
|
+
"""
|
|
238
|
+
runner.started = True
|
|
239
|
+
# runner.start_time = time()
|
|
240
|
+
runner.run_hooks('on_start')
|
|
241
|
+
return runner.results
|
|
242
|
+
|
|
243
|
+
|
|
244
|
+
@app.task
|
|
245
|
+
def mark_runner_complete(results, runner):
|
|
246
|
+
"""Mark a runner as completed and run on_end hooks.
|
|
247
|
+
|
|
248
|
+
Args:
|
|
249
|
+
results (list): Task results
|
|
250
|
+
runner (Runner): Secator runner instance
|
|
251
|
+
|
|
252
|
+
Returns:
|
|
253
|
+
list: Final results
|
|
254
|
+
"""
|
|
255
|
+
results = forward_results(results)
|
|
256
|
+
|
|
257
|
+
# If sync mode, don't update the runner as it's already done
|
|
258
|
+
if runner.sync:
|
|
259
|
+
return results
|
|
260
|
+
|
|
261
|
+
# Run final processing
|
|
262
|
+
runner.results = results
|
|
263
|
+
runner.done = True
|
|
264
|
+
runner.progress = 100
|
|
265
|
+
if not runner.no_process:
|
|
266
|
+
runner.mark_duplicates()
|
|
267
|
+
runner.results = runner.filter_results()
|
|
268
|
+
runner.log_results()
|
|
269
|
+
runner.run_hooks('on_end')
|
|
270
|
+
return runner.results
|
|
271
|
+
|
|
272
|
+
|
|
274
273
|
#--------------#
|
|
275
274
|
# Celery utils #
|
|
276
275
|
#--------------#
|
|
@@ -285,3 +284,50 @@ def is_celery_worker_alive():
|
|
|
285
284
|
else:
|
|
286
285
|
console.print(Info(message='No Celery worker available, running locally'))
|
|
287
286
|
return result
|
|
287
|
+
|
|
288
|
+
|
|
289
|
+
def break_task(task, task_opts, results=[]):
|
|
290
|
+
"""Break a task into multiple of the same type."""
|
|
291
|
+
chunks = task.inputs
|
|
292
|
+
if task.input_chunk_size > 1:
|
|
293
|
+
chunks = list(chunker(task.inputs, task.input_chunk_size))
|
|
294
|
+
debug(
|
|
295
|
+
'',
|
|
296
|
+
obj={task.unique_name: 'CHUNKED', 'chunk_size': task.input_chunk_size, 'chunks': len(chunks), 'target_count': len(task.inputs)}, # noqa: E501
|
|
297
|
+
obj_after=False,
|
|
298
|
+
sub='celery.state',
|
|
299
|
+
verbose=True
|
|
300
|
+
)
|
|
301
|
+
|
|
302
|
+
# Clone opts
|
|
303
|
+
opts = task_opts.copy()
|
|
304
|
+
|
|
305
|
+
# Build signatures
|
|
306
|
+
sigs = []
|
|
307
|
+
task.ids_map = {}
|
|
308
|
+
for ix, chunk in enumerate(chunks):
|
|
309
|
+
if not isinstance(chunk, list):
|
|
310
|
+
chunk = [chunk]
|
|
311
|
+
if len(chunks) > 0: # add chunk to task opts for tracking chunks exec
|
|
312
|
+
opts['chunk'] = ix + 1
|
|
313
|
+
opts['chunk_count'] = len(chunks)
|
|
314
|
+
task_id = str(uuid.uuid4())
|
|
315
|
+
opts['has_parent'] = True
|
|
316
|
+
opts['enable_duplicate_check'] = False
|
|
317
|
+
opts['results'] = results
|
|
318
|
+
sig = type(task).si(chunk, **opts).set(queue=type(task).profile, task_id=task_id)
|
|
319
|
+
full_name = f'{task.name}_{ix + 1}'
|
|
320
|
+
task.add_subtask(task_id, task.name, f'{task.name}_{ix + 1}')
|
|
321
|
+
info = Info(message=f'Celery chunked task created: {task_id}', _source=full_name, _uuid=str(uuid.uuid4()))
|
|
322
|
+
task.add_result(info)
|
|
323
|
+
sigs.append(sig)
|
|
324
|
+
|
|
325
|
+
# Mark main task as async since it's being chunked
|
|
326
|
+
task.sync = False
|
|
327
|
+
|
|
328
|
+
# Build Celery workflow
|
|
329
|
+
workflow = chord(
|
|
330
|
+
tuple(sigs),
|
|
331
|
+
mark_runner_complete.s(runner=task).set(queue='results')
|
|
332
|
+
)
|
|
333
|
+
return workflow
|
|
@@ -1,5 +1,6 @@
|
|
|
1
1
|
import os
|
|
2
2
|
import signal
|
|
3
|
+
import sys
|
|
3
4
|
import threading
|
|
4
5
|
from pathlib import Path
|
|
5
6
|
|
|
@@ -10,6 +11,7 @@ from secator.output_types import Info
|
|
|
10
11
|
from secator.rich import console
|
|
11
12
|
|
|
12
13
|
IDLE_TIMEOUT = CONFIG.celery.worker_kill_after_idle_seconds
|
|
14
|
+
IN_CELERY_WORKER_PROCESS = sys.argv and ('secator.celery.app' in sys.argv or 'worker' in sys.argv)
|
|
13
15
|
|
|
14
16
|
# File-based state management system
|
|
15
17
|
STATE_DIR = Path("/tmp/celery_state")
|
|
@@ -12,7 +12,7 @@ from rich.padding import Padding
|
|
|
12
12
|
from rich.progress import Progress as RichProgress, SpinnerColumn, TextColumn, TimeElapsedColumn
|
|
13
13
|
from secator.config import CONFIG
|
|
14
14
|
from secator.definitions import STATE_COLORS
|
|
15
|
-
from secator.output_types import Error
|
|
15
|
+
from secator.output_types import Error, Info, State
|
|
16
16
|
from secator.rich import console
|
|
17
17
|
from secator.utils import debug, traceback_as_string
|
|
18
18
|
|
|
@@ -76,10 +76,31 @@ class CeleryData(object):
|
|
|
76
76
|
|
|
77
77
|
# Get live results and print progress
|
|
78
78
|
for data in CeleryData.poll(result, ids_map, refresh_interval):
|
|
79
|
-
|
|
79
|
+
for result in data['results']:
|
|
80
|
+
|
|
81
|
+
# Add dynamic subtask to ids_map
|
|
82
|
+
if isinstance(result, Info):
|
|
83
|
+
message = result.message
|
|
84
|
+
if message.startswith('Celery chunked task created: '):
|
|
85
|
+
task_id = message.split(' ')[-1]
|
|
86
|
+
ids_map[task_id] = {
|
|
87
|
+
'id': task_id,
|
|
88
|
+
'name': result._source,
|
|
89
|
+
'full_name': result._source,
|
|
90
|
+
'descr': '',
|
|
91
|
+
'state': 'PENDING',
|
|
92
|
+
'count': 0,
|
|
93
|
+
'progress': 0
|
|
94
|
+
}
|
|
95
|
+
yield result
|
|
80
96
|
|
|
81
97
|
if print_remote_info:
|
|
82
98
|
task_id = data['id']
|
|
99
|
+
if task_id not in progress_cache:
|
|
100
|
+
if CONFIG.runners.show_subtasks:
|
|
101
|
+
progress_cache[task_id] = progress.add_task('', advance=0, **data)
|
|
102
|
+
else:
|
|
103
|
+
continue
|
|
83
104
|
progress_id = progress_cache[task_id]
|
|
84
105
|
CeleryData.update_progress(progress, progress_id, data)
|
|
85
106
|
|
|
@@ -117,9 +138,24 @@ class CeleryData(object):
|
|
|
117
138
|
"""
|
|
118
139
|
while True:
|
|
119
140
|
try:
|
|
141
|
+
main_task = State(
|
|
142
|
+
task_id=result.id,
|
|
143
|
+
state=result.state,
|
|
144
|
+
_source='celery'
|
|
145
|
+
)
|
|
146
|
+
debug(f"Main task state: {result.id} - {result.state}", sub='celery.poll', verbose=True)
|
|
147
|
+
yield {'id': result.id, 'results': [main_task]}
|
|
120
148
|
yield from CeleryData.get_all_data(result, ids_map)
|
|
149
|
+
|
|
121
150
|
if result.ready():
|
|
122
151
|
debug('result is ready', sub='celery.poll', id=result.id)
|
|
152
|
+
main_task = State(
|
|
153
|
+
task_id=result.id,
|
|
154
|
+
state=result.state,
|
|
155
|
+
_source='celery'
|
|
156
|
+
)
|
|
157
|
+
debug(f"Final main task state: {result.id} - {result.state}", sub='celery.poll', verbose=True)
|
|
158
|
+
yield {'id': result.id, 'results': [main_task]}
|
|
123
159
|
yield from CeleryData.get_all_data(result, ids_map)
|
|
124
160
|
break
|
|
125
161
|
except (KeyboardInterrupt, GreenletExit):
|
|
@@ -7,6 +7,7 @@ __all__ = [
|
|
|
7
7
|
'Progress',
|
|
8
8
|
'Record',
|
|
9
9
|
'Stat',
|
|
10
|
+
'State',
|
|
10
11
|
'Subdomain',
|
|
11
12
|
'Url',
|
|
12
13
|
'UserAccount',
|
|
@@ -29,9 +30,10 @@ from secator.output_types.info import Info
|
|
|
29
30
|
from secator.output_types.warning import Warning
|
|
30
31
|
from secator.output_types.error import Error
|
|
31
32
|
from secator.output_types.stat import Stat
|
|
33
|
+
from secator.output_types.state import State
|
|
32
34
|
|
|
33
35
|
EXECUTION_TYPES = [
|
|
34
|
-
Target, Progress, Info, Warning, Error
|
|
36
|
+
Target, Progress, Info, Warning, Error, State
|
|
35
37
|
]
|
|
36
38
|
STAT_TYPES = [
|
|
37
39
|
Stat
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
import time
|
|
2
|
+
from dataclasses import dataclass, field
|
|
3
|
+
|
|
4
|
+
from secator.output_types._base import OutputType
|
|
5
|
+
from secator.utils import rich_to_ansi
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
@dataclass
|
|
9
|
+
class State(OutputType):
|
|
10
|
+
"""Represents the state of a Celery task."""
|
|
11
|
+
|
|
12
|
+
task_id: str
|
|
13
|
+
state: str
|
|
14
|
+
_type: str = field(default='state', repr=True)
|
|
15
|
+
_source: str = field(default='', repr=True)
|
|
16
|
+
_timestamp: int = field(default_factory=lambda: time.time(), compare=False)
|
|
17
|
+
_uuid: str = field(default='', repr=True, compare=False)
|
|
18
|
+
_context: dict = field(default_factory=dict, repr=True, compare=False)
|
|
19
|
+
_tagged: bool = field(default=False, repr=True, compare=False)
|
|
20
|
+
_duplicate: bool = field(default=False, repr=True, compare=False)
|
|
21
|
+
_related: list = field(default_factory=list, compare=False)
|
|
22
|
+
_icon = '📊'
|
|
23
|
+
_color = 'bright_blue'
|
|
24
|
+
|
|
25
|
+
def __str__(self) -> str:
|
|
26
|
+
return f"Task {self.task_id} is {self.state}"
|
|
27
|
+
|
|
28
|
+
def __repr__(self) -> str:
|
|
29
|
+
return rich_to_ansi(f"{self._icon} [bold {self._color}]{self.state}[/] {self.task_id}")
|
|
@@ -12,7 +12,7 @@ import humanize
|
|
|
12
12
|
from secator.definitions import ADDONS_ENABLED
|
|
13
13
|
from secator.celery_utils import CeleryData
|
|
14
14
|
from secator.config import CONFIG
|
|
15
|
-
from secator.output_types import FINDING_TYPES, OutputType, Progress, Info, Warning, Error, Target
|
|
15
|
+
from secator.output_types import FINDING_TYPES, OutputType, Progress, Info, Warning, Error, Target, State
|
|
16
16
|
from secator.report import Report
|
|
17
17
|
from secator.rich import console, console_stdout
|
|
18
18
|
from secator.runners._helpers import (get_task_folder_id, process_extractor, run_extractors)
|
|
@@ -97,6 +97,7 @@ class Runner:
|
|
|
97
97
|
self.threads = []
|
|
98
98
|
self.no_poll = self.run_opts.get('no_poll', False)
|
|
99
99
|
self.quiet = self.run_opts.get('quiet', False)
|
|
100
|
+
self.started = False
|
|
100
101
|
|
|
101
102
|
# Runner process options
|
|
102
103
|
self.no_process = self.run_opts.get('no_process', False)
|
|
@@ -117,10 +118,21 @@ class Runner:
|
|
|
117
118
|
self.raise_on_error = self.run_opts.get('raise_on_error', False)
|
|
118
119
|
self.print_opts = {k: v for k, v in self.__dict__.items() if k.startswith('print_') if v}
|
|
119
120
|
|
|
121
|
+
# Chunks
|
|
122
|
+
self.has_parent = self.run_opts.get('has_parent', False)
|
|
123
|
+
self.has_children = self.run_opts.get('has_children', False)
|
|
124
|
+
self.chunk = self.run_opts.get('chunk', None)
|
|
125
|
+
self.chunk_count = self.run_opts.get('chunk_count', None)
|
|
126
|
+
self.unique_name = self.name.replace('/', '_')
|
|
127
|
+
self.unique_name = f'{self.unique_name}_{self.chunk}' if self.chunk else self.unique_name
|
|
128
|
+
|
|
129
|
+
# Add prior results to runner results
|
|
130
|
+
[self.add_result(result, print=False, output=False) for result in results]
|
|
131
|
+
|
|
120
132
|
# Determine inputs
|
|
121
133
|
inputs = [inputs] if not isinstance(inputs, list) else inputs
|
|
122
|
-
if results:
|
|
123
|
-
inputs, run_opts, errors = run_extractors(results, run_opts, inputs)
|
|
134
|
+
if not self.chunk and self.results:
|
|
135
|
+
inputs, run_opts, errors = run_extractors(self.results, run_opts, inputs)
|
|
124
136
|
for error in errors:
|
|
125
137
|
self.add_result(error, print=True)
|
|
126
138
|
self.inputs = inputs
|
|
@@ -163,18 +175,6 @@ class Runner:
|
|
|
163
175
|
self.validators = {name: [] for name in VALIDATORS + getattr(self, 'validators', [])}
|
|
164
176
|
self.register_validators(validators)
|
|
165
177
|
|
|
166
|
-
# Chunks
|
|
167
|
-
self.has_parent = self.run_opts.get('has_parent', False)
|
|
168
|
-
self.has_children = self.run_opts.get('has_children', False)
|
|
169
|
-
self.chunk = self.run_opts.get('chunk', None)
|
|
170
|
-
self.chunk_count = self.run_opts.get('chunk_count', None)
|
|
171
|
-
self.unique_name = self.name.replace('/', '_')
|
|
172
|
-
self.unique_name = f'{self.unique_name}_{self.chunk}' if self.chunk else self.unique_name
|
|
173
|
-
|
|
174
|
-
# Process prior results
|
|
175
|
-
for result in results:
|
|
176
|
-
list(self._process_item(result, print=False, output=False))
|
|
177
|
-
|
|
178
178
|
# Input post-process
|
|
179
179
|
self.run_hooks('before_init')
|
|
180
180
|
|
|
@@ -238,6 +238,8 @@ class Runner:
|
|
|
238
238
|
|
|
239
239
|
@property
|
|
240
240
|
def status(self):
|
|
241
|
+
if not self.started:
|
|
242
|
+
return 'PENDING'
|
|
241
243
|
if not self.done:
|
|
242
244
|
return 'RUNNING'
|
|
243
245
|
return 'FAILURE' if len(self.self_errors) > 0 else 'SUCCESS'
|
|
@@ -283,11 +285,8 @@ class Runner:
|
|
|
283
285
|
self.run_hooks('on_end')
|
|
284
286
|
return
|
|
285
287
|
|
|
286
|
-
# Choose yielder
|
|
287
|
-
yielder = self.yielder_celery if self.celery_result else self.yielder
|
|
288
|
-
|
|
289
288
|
# Loop and process items
|
|
290
|
-
for item in yielder():
|
|
289
|
+
for item in self.yielder():
|
|
291
290
|
yield from self._process_item(item)
|
|
292
291
|
self.run_hooks('on_interval')
|
|
293
292
|
|
|
@@ -326,16 +325,18 @@ class Runner:
|
|
|
326
325
|
self.add_result(error, print=True)
|
|
327
326
|
yield error
|
|
328
327
|
|
|
329
|
-
def add_result(self, item, print=False):
|
|
328
|
+
def add_result(self, item, print=False, output=True):
|
|
330
329
|
"""Add item to runner results.
|
|
331
330
|
|
|
332
331
|
Args:
|
|
333
332
|
item (OutputType): Item.
|
|
334
333
|
print (bool): Whether to print it or not.
|
|
334
|
+
output (bool): Whether to add it to the output or not.
|
|
335
335
|
"""
|
|
336
336
|
self.uuids.append(item._uuid)
|
|
337
337
|
self.results.append(item)
|
|
338
|
-
|
|
338
|
+
if output:
|
|
339
|
+
self.output += repr(item) + '\n'
|
|
339
340
|
if print:
|
|
340
341
|
self._print_item(item)
|
|
341
342
|
|
|
@@ -481,16 +482,52 @@ class Runner:
|
|
|
481
482
|
dupe = self.run_hooks('on_duplicate', dupe)
|
|
482
483
|
|
|
483
484
|
def yielder(self):
|
|
484
|
-
"""
|
|
485
|
-
|
|
485
|
+
"""Base yielder implementation.
|
|
486
|
+
|
|
487
|
+
This should be overridden by derived classes if they need custom behavior.
|
|
488
|
+
Otherwise, they can implement build_celery_workflow() and get standard behavior.
|
|
486
489
|
|
|
487
|
-
|
|
488
|
-
|
|
489
|
-
|
|
490
|
-
|
|
491
|
-
|
|
492
|
-
|
|
493
|
-
|
|
490
|
+
Yields:
|
|
491
|
+
secator.output_types.OutputType: Secator output type.
|
|
492
|
+
"""
|
|
493
|
+
# Build Celery workflow
|
|
494
|
+
workflow = self.build_celery_workflow()
|
|
495
|
+
|
|
496
|
+
# Run workflow and get results
|
|
497
|
+
if self.sync:
|
|
498
|
+
self.print_item = False
|
|
499
|
+
self.started = True
|
|
500
|
+
results = workflow.apply().get()
|
|
501
|
+
yield from results
|
|
502
|
+
else:
|
|
503
|
+
self.celery_result = workflow()
|
|
504
|
+
self.celery_ids.append(str(self.celery_result.id))
|
|
505
|
+
yield Info(
|
|
506
|
+
message=f'Celery task created: {self.celery_result.id}',
|
|
507
|
+
task_id=self.celery_result.id
|
|
508
|
+
)
|
|
509
|
+
if self.no_poll:
|
|
510
|
+
return
|
|
511
|
+
results = CeleryData.iter_results(
|
|
512
|
+
self.celery_result,
|
|
513
|
+
ids_map=self.celery_ids_map,
|
|
514
|
+
description=True,
|
|
515
|
+
print_remote_info=self.print_remote_info,
|
|
516
|
+
print_remote_title=f'[bold gold3]{self.__class__.__name__.capitalize()}[/] [bold magenta]{self.name}[/] results'
|
|
517
|
+
)
|
|
518
|
+
|
|
519
|
+
# Yield results
|
|
520
|
+
yield from results
|
|
521
|
+
|
|
522
|
+
def build_celery_workflow(self):
|
|
523
|
+
"""Build Celery workflow.
|
|
524
|
+
|
|
525
|
+
This should be implemented by derived classes.
|
|
526
|
+
|
|
527
|
+
Returns:
|
|
528
|
+
celery.Signature: Celery task signature.
|
|
529
|
+
"""
|
|
530
|
+
raise NotImplementedError("Derived classes must implement build_celery_workflow()")
|
|
494
531
|
|
|
495
532
|
def toDict(self):
|
|
496
533
|
"""Dict representation of the runner."""
|
|
@@ -651,6 +688,7 @@ class Runner:
|
|
|
651
688
|
"""Log runner results."""
|
|
652
689
|
if self.no_poll:
|
|
653
690
|
return
|
|
691
|
+
self.started = True
|
|
654
692
|
self.done = True
|
|
655
693
|
self.progress = 100
|
|
656
694
|
self.end_time = datetime.fromtimestamp(time())
|
|
@@ -831,20 +869,32 @@ class Runner:
|
|
|
831
869
|
# Update item context
|
|
832
870
|
item._context.update(self.context)
|
|
833
871
|
|
|
834
|
-
# Return if already seen
|
|
835
|
-
if item._uuid in self.uuids:
|
|
836
|
-
return
|
|
837
|
-
|
|
838
872
|
# Add uuid to item
|
|
839
873
|
if not item._uuid:
|
|
840
874
|
item._uuid = str(uuid.uuid4())
|
|
841
875
|
|
|
876
|
+
# Return if already seen
|
|
877
|
+
if item._uuid in self.uuids:
|
|
878
|
+
return
|
|
879
|
+
|
|
842
880
|
# Add source to item
|
|
843
881
|
if not item._source:
|
|
844
882
|
item._source = self.unique_name
|
|
845
883
|
|
|
884
|
+
# Check for state updates
|
|
885
|
+
if isinstance(item, State) and self.celery_result and item.task_id == self.celery_result.id:
|
|
886
|
+
self.debug(f'Updating runner state from Celery: {item.state}', sub='state')
|
|
887
|
+
if item.state in ['FAILURE', 'SUCCESS', 'REVOKED']:
|
|
888
|
+
self.started = True
|
|
889
|
+
self.done = True
|
|
890
|
+
elif item.state in ['RUNNING']:
|
|
891
|
+
self.started = True
|
|
892
|
+
self.debug(f'Runner {self.unique_name} is {self.status} (started: {self.started}, done: {self.done})', sub='state')
|
|
893
|
+
self.last_updated_celery = item._timestamp
|
|
894
|
+
return
|
|
895
|
+
|
|
846
896
|
# If progress item, update runner progress
|
|
847
|
-
|
|
897
|
+
elif isinstance(item, Progress) and item._source == self.unique_name:
|
|
848
898
|
self.progress = item.percent
|
|
849
899
|
if not should_update(CONFIG.runners.progress_update_frequency, self.last_updated_progress, item._timestamp):
|
|
850
900
|
return
|
|
@@ -193,11 +193,10 @@ class Command(Runner):
|
|
|
193
193
|
})
|
|
194
194
|
return res
|
|
195
195
|
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
has_file_flag = cls.file_flag is not None
|
|
196
|
+
def needs_chunking(self, sync):
|
|
197
|
+
many_targets = len(self.inputs) > 1
|
|
198
|
+
targets_over_chunk_size = self.input_chunk_size and len(self.inputs) > self.input_chunk_size
|
|
199
|
+
has_file_flag = self.file_flag is not None
|
|
201
200
|
chunk_it = (sync and many_targets and not has_file_flag) or (not sync and many_targets and targets_over_chunk_size)
|
|
202
201
|
return chunk_it
|
|
203
202
|
|
|
@@ -0,0 +1,55 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
|
|
3
|
+
from secator.config import CONFIG
|
|
4
|
+
from secator.runners._base import Runner
|
|
5
|
+
from secator.runners.workflow import Workflow
|
|
6
|
+
from secator.utils import merge_opts
|
|
7
|
+
|
|
8
|
+
logger = logging.getLogger(__name__)
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class Scan(Runner):
|
|
12
|
+
|
|
13
|
+
default_exporters = CONFIG.scans.exporters
|
|
14
|
+
|
|
15
|
+
@classmethod
|
|
16
|
+
def delay(cls, *args, **kwargs):
|
|
17
|
+
from secator.celery import run_scan
|
|
18
|
+
return run_scan.delay(args=args, kwargs=kwargs)
|
|
19
|
+
|
|
20
|
+
def build_celery_workflow(self):
|
|
21
|
+
"""Build Celery workflow for scan execution.
|
|
22
|
+
|
|
23
|
+
Returns:
|
|
24
|
+
celery.Signature: Celery task signature.
|
|
25
|
+
"""
|
|
26
|
+
from celery import chain
|
|
27
|
+
from secator.celery import mark_runner_started, mark_runner_complete
|
|
28
|
+
from secator.template import TemplateLoader
|
|
29
|
+
|
|
30
|
+
scan_opts = self.config.options
|
|
31
|
+
|
|
32
|
+
# Build chain of workflows
|
|
33
|
+
sigs = []
|
|
34
|
+
for name, workflow_opts in self.config.workflows.items():
|
|
35
|
+
run_opts = self.run_opts.copy()
|
|
36
|
+
opts = merge_opts(scan_opts, workflow_opts, run_opts)
|
|
37
|
+
config = TemplateLoader(name=f'workflows/{name}')
|
|
38
|
+
workflow = Workflow(
|
|
39
|
+
config,
|
|
40
|
+
self.inputs,
|
|
41
|
+
results=self.results,
|
|
42
|
+
run_opts=opts,
|
|
43
|
+
hooks=self._hooks,
|
|
44
|
+
context=self.context.copy()
|
|
45
|
+
)
|
|
46
|
+
celery_workflow = workflow.build_celery_workflow()
|
|
47
|
+
for task_id, task_info in workflow.celery_ids_map.items():
|
|
48
|
+
self.add_subtask(task_id, task_info['name'], task_info['descr'])
|
|
49
|
+
sigs.append(celery_workflow)
|
|
50
|
+
|
|
51
|
+
return chain(
|
|
52
|
+
mark_runner_started.si(self).set(queue='results'),
|
|
53
|
+
*sigs,
|
|
54
|
+
mark_runner_complete.s(self).set(queue='results'),
|
|
55
|
+
)
|