secator 0.10.0__tar.gz → 0.10.1a1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of secator might be problematic. Click here for more details.

Files changed (191) hide show
  1. {secator-0.10.0 → secator-0.10.1a1}/PKG-INFO +1 -1
  2. {secator-0.10.0 → secator-0.10.1a1}/pyproject.toml +1 -1
  3. {secator-0.10.0 → secator-0.10.1a1}/secator/celery.py +46 -112
  4. secator-0.10.1a1/secator/celery_signals.py +141 -0
  5. {secator-0.10.0 → secator-0.10.1a1}/secator/celery_utils.py +1 -1
  6. {secator-0.10.0 → secator-0.10.1a1}/secator/cli.py +2 -1
  7. {secator-0.10.0 → secator-0.10.1a1}/secator/hooks/mongodb.py +26 -15
  8. {secator-0.10.0 → secator-0.10.1a1}/secator/output_types/error.py +5 -2
  9. {secator-0.10.0 → secator-0.10.1a1}/secator/runners/_base.py +51 -37
  10. {secator-0.10.0 → secator-0.10.1a1}/secator/runners/_helpers.py +18 -8
  11. {secator-0.10.0 → secator-0.10.1a1}/secator/runners/command.py +10 -0
  12. {secator-0.10.0 → secator-0.10.1a1}/secator/runners/scan.py +0 -4
  13. {secator-0.10.0 → secator-0.10.1a1}/secator/tasks/fping.py +0 -1
  14. {secator-0.10.0 → secator-0.10.1a1}/secator/tasks/gospider.py +0 -1
  15. {secator-0.10.0 → secator-0.10.1a1}/secator/tasks/naabu.py +2 -2
  16. {secator-0.10.0 → secator-0.10.1a1}/secator/tasks/nuclei.py +0 -1
  17. {secator-0.10.0 → secator-0.10.1a1}/secator/tasks/wpscan.py +0 -1
  18. {secator-0.10.0 → secator-0.10.1a1}/secator/utils.py +2 -0
  19. secator-0.10.0/secator/celery_signals.py +0 -103
  20. {secator-0.10.0 → secator-0.10.1a1}/.docker/Dockerfile.alpine +0 -0
  21. {secator-0.10.0 → secator-0.10.1a1}/.docker/Dockerfile.arch +0 -0
  22. {secator-0.10.0 → secator-0.10.1a1}/.docker/Dockerfile.debian +0 -0
  23. {secator-0.10.0 → secator-0.10.1a1}/.docker/Dockerfile.kali +0 -0
  24. {secator-0.10.0 → secator-0.10.1a1}/.docker/Dockerfile.osx +0 -0
  25. {secator-0.10.0 → secator-0.10.1a1}/.docker/Dockerfile.ubuntu +0 -0
  26. {secator-0.10.0 → secator-0.10.1a1}/.docker/build_all.sh +0 -0
  27. {secator-0.10.0 → secator-0.10.1a1}/.dockerignore +0 -0
  28. {secator-0.10.0 → secator-0.10.1a1}/.flake8 +0 -0
  29. {secator-0.10.0 → secator-0.10.1a1}/.gitignore +0 -0
  30. {secator-0.10.0 → secator-0.10.1a1}/CHANGELOG.md +0 -0
  31. {secator-0.10.0 → secator-0.10.1a1}/CONTRIBUTING.md +0 -0
  32. {secator-0.10.0 → secator-0.10.1a1}/Dockerfile +0 -0
  33. {secator-0.10.0 → secator-0.10.1a1}/LICENSE +0 -0
  34. {secator-0.10.0 → secator-0.10.1a1}/README.md +0 -0
  35. {secator-0.10.0 → secator-0.10.1a1}/SECURITY.md +0 -0
  36. {secator-0.10.0 → secator-0.10.1a1}/cloudbuild.yaml +0 -0
  37. {secator-0.10.0 → secator-0.10.1a1}/helm/.helmignore +0 -0
  38. {secator-0.10.0 → secator-0.10.1a1}/helm/Chart.yaml +0 -0
  39. {secator-0.10.0 → secator-0.10.1a1}/helm/templates/redis-service.yaml +0 -0
  40. {secator-0.10.0 → secator-0.10.1a1}/helm/templates/redis.yaml +0 -0
  41. {secator-0.10.0 → secator-0.10.1a1}/helm/templates/secator-manager.yaml +0 -0
  42. {secator-0.10.0 → secator-0.10.1a1}/helm/templates/secator-worker.yaml +0 -0
  43. {secator-0.10.0 → secator-0.10.1a1}/helm/values.yaml +0 -0
  44. {secator-0.10.0 → secator-0.10.1a1}/scripts/download_cves.sh +0 -0
  45. {secator-0.10.0 → secator-0.10.1a1}/scripts/install.sh +0 -0
  46. {secator-0.10.0 → secator-0.10.1a1}/scripts/install_asciinema.sh +0 -0
  47. {secator-0.10.0 → secator-0.10.1a1}/scripts/install_go.sh +0 -0
  48. {secator-0.10.0 → secator-0.10.1a1}/scripts/install_ruby.sh +0 -0
  49. {secator-0.10.0 → secator-0.10.1a1}/scripts/msf/exploit_cve.rc +0 -0
  50. {secator-0.10.0 → secator-0.10.1a1}/scripts/msf/ftp_anonymous.rc +0 -0
  51. {secator-0.10.0 → secator-0.10.1a1}/scripts/msf/ftp_version.rc +0 -0
  52. {secator-0.10.0 → secator-0.10.1a1}/scripts/msf/ftp_vsftpd_234_backdoor.rc +0 -0
  53. {secator-0.10.0 → secator-0.10.1a1}/scripts/msf/redis.rc +0 -0
  54. {secator-0.10.0 → secator-0.10.1a1}/scripts/stories/STORY.md +0 -0
  55. {secator-0.10.0 → secator-0.10.1a1}/scripts/stories/aliases.sh +0 -0
  56. {secator-0.10.0 → secator-0.10.1a1}/scripts/stories/demo.sh +0 -0
  57. {secator-0.10.0 → secator-0.10.1a1}/scripts/stories/fmt.sh +0 -0
  58. {secator-0.10.0 → secator-0.10.1a1}/scripts/stories/input.sh +0 -0
  59. {secator-0.10.0 → secator-0.10.1a1}/scripts/stories/pipe.sh +0 -0
  60. {secator-0.10.0 → secator-0.10.1a1}/scripts/stories/short_demo.sh +0 -0
  61. {secator-0.10.0 → secator-0.10.1a1}/secator/.gitignore +0 -0
  62. {secator-0.10.0 → secator-0.10.1a1}/secator/__init__.py +0 -0
  63. {secator-0.10.0 → secator-0.10.1a1}/secator/config.py +0 -0
  64. {secator-0.10.0 → secator-0.10.1a1}/secator/configs/__init__.py +0 -0
  65. {secator-0.10.0 → secator-0.10.1a1}/secator/configs/profiles/__init__.py +0 -0
  66. {secator-0.10.0 → secator-0.10.1a1}/secator/configs/profiles/aggressive.yaml +0 -0
  67. {secator-0.10.0 → secator-0.10.1a1}/secator/configs/profiles/default.yaml +0 -0
  68. {secator-0.10.0 → secator-0.10.1a1}/secator/configs/profiles/stealth.yaml +0 -0
  69. {secator-0.10.0 → secator-0.10.1a1}/secator/configs/scans/__init__.py +0 -0
  70. {secator-0.10.0 → secator-0.10.1a1}/secator/configs/scans/domain.yaml +0 -0
  71. {secator-0.10.0 → secator-0.10.1a1}/secator/configs/scans/host.yaml +0 -0
  72. {secator-0.10.0 → secator-0.10.1a1}/secator/configs/scans/network.yaml +0 -0
  73. {secator-0.10.0 → secator-0.10.1a1}/secator/configs/scans/subdomain.yaml +0 -0
  74. {secator-0.10.0 → secator-0.10.1a1}/secator/configs/scans/url.yaml +0 -0
  75. {secator-0.10.0 → secator-0.10.1a1}/secator/configs/workflows/__init__.py +0 -0
  76. {secator-0.10.0 → secator-0.10.1a1}/secator/configs/workflows/cidr_recon.yaml +0 -0
  77. {secator-0.10.0 → secator-0.10.1a1}/secator/configs/workflows/code_scan.yaml +0 -0
  78. {secator-0.10.0 → secator-0.10.1a1}/secator/configs/workflows/host_recon.yaml +0 -0
  79. {secator-0.10.0 → secator-0.10.1a1}/secator/configs/workflows/port_scan.yaml +0 -0
  80. {secator-0.10.0 → secator-0.10.1a1}/secator/configs/workflows/subdomain_recon.yaml +0 -0
  81. {secator-0.10.0 → secator-0.10.1a1}/secator/configs/workflows/url_bypass.yaml +0 -0
  82. {secator-0.10.0 → secator-0.10.1a1}/secator/configs/workflows/url_crawl.yaml +0 -0
  83. {secator-0.10.0 → secator-0.10.1a1}/secator/configs/workflows/url_dirsearch.yaml +0 -0
  84. {secator-0.10.0 → secator-0.10.1a1}/secator/configs/workflows/url_fuzz.yaml +0 -0
  85. {secator-0.10.0 → secator-0.10.1a1}/secator/configs/workflows/url_nuclei.yaml +0 -0
  86. {secator-0.10.0 → secator-0.10.1a1}/secator/configs/workflows/url_vuln.yaml +0 -0
  87. {secator-0.10.0 → secator-0.10.1a1}/secator/configs/workflows/user_hunt.yaml +0 -0
  88. {secator-0.10.0 → secator-0.10.1a1}/secator/configs/workflows/wordpress.yaml +0 -0
  89. {secator-0.10.0 → secator-0.10.1a1}/secator/decorators.py +0 -0
  90. {secator-0.10.0 → secator-0.10.1a1}/secator/definitions.py +0 -0
  91. {secator-0.10.0 → secator-0.10.1a1}/secator/exporters/__init__.py +0 -0
  92. {secator-0.10.0 → secator-0.10.1a1}/secator/exporters/_base.py +0 -0
  93. {secator-0.10.0 → secator-0.10.1a1}/secator/exporters/console.py +0 -0
  94. {secator-0.10.0 → secator-0.10.1a1}/secator/exporters/csv.py +0 -0
  95. {secator-0.10.0 → secator-0.10.1a1}/secator/exporters/gdrive.py +0 -0
  96. {secator-0.10.0 → secator-0.10.1a1}/secator/exporters/json.py +0 -0
  97. {secator-0.10.0 → secator-0.10.1a1}/secator/exporters/table.py +0 -0
  98. {secator-0.10.0 → secator-0.10.1a1}/secator/exporters/txt.py +0 -0
  99. {secator-0.10.0 → secator-0.10.1a1}/secator/hooks/__init__.py +0 -0
  100. {secator-0.10.0 → secator-0.10.1a1}/secator/hooks/gcs.py +0 -0
  101. {secator-0.10.0 → secator-0.10.1a1}/secator/installer.py +0 -0
  102. {secator-0.10.0 → secator-0.10.1a1}/secator/output_types/__init__.py +0 -0
  103. {secator-0.10.0 → secator-0.10.1a1}/secator/output_types/_base.py +0 -0
  104. {secator-0.10.0 → secator-0.10.1a1}/secator/output_types/exploit.py +0 -0
  105. {secator-0.10.0 → secator-0.10.1a1}/secator/output_types/info.py +0 -0
  106. {secator-0.10.0 → secator-0.10.1a1}/secator/output_types/ip.py +0 -0
  107. {secator-0.10.0 → secator-0.10.1a1}/secator/output_types/port.py +0 -0
  108. {secator-0.10.0 → secator-0.10.1a1}/secator/output_types/progress.py +0 -0
  109. {secator-0.10.0 → secator-0.10.1a1}/secator/output_types/record.py +0 -0
  110. {secator-0.10.0 → secator-0.10.1a1}/secator/output_types/stat.py +0 -0
  111. {secator-0.10.0 → secator-0.10.1a1}/secator/output_types/subdomain.py +0 -0
  112. {secator-0.10.0 → secator-0.10.1a1}/secator/output_types/tag.py +0 -0
  113. {secator-0.10.0 → secator-0.10.1a1}/secator/output_types/target.py +0 -0
  114. {secator-0.10.0 → secator-0.10.1a1}/secator/output_types/url.py +0 -0
  115. {secator-0.10.0 → secator-0.10.1a1}/secator/output_types/user_account.py +0 -0
  116. {secator-0.10.0 → secator-0.10.1a1}/secator/output_types/vulnerability.py +0 -0
  117. {secator-0.10.0 → secator-0.10.1a1}/secator/output_types/warning.py +0 -0
  118. {secator-0.10.0 → secator-0.10.1a1}/secator/report.py +0 -0
  119. {secator-0.10.0 → secator-0.10.1a1}/secator/rich.py +0 -0
  120. {secator-0.10.0 → secator-0.10.1a1}/secator/runners/__init__.py +0 -0
  121. {secator-0.10.0 → secator-0.10.1a1}/secator/runners/celery.py +0 -0
  122. {secator-0.10.0 → secator-0.10.1a1}/secator/runners/task.py +0 -0
  123. {secator-0.10.0 → secator-0.10.1a1}/secator/runners/workflow.py +0 -0
  124. {secator-0.10.0 → secator-0.10.1a1}/secator/scans/__init__.py +0 -0
  125. {secator-0.10.0 → secator-0.10.1a1}/secator/serializers/__init__.py +0 -0
  126. {secator-0.10.0 → secator-0.10.1a1}/secator/serializers/dataclass.py +0 -0
  127. {secator-0.10.0 → secator-0.10.1a1}/secator/serializers/json.py +0 -0
  128. {secator-0.10.0 → secator-0.10.1a1}/secator/serializers/regex.py +0 -0
  129. {secator-0.10.0 → secator-0.10.1a1}/secator/tasks/__init__.py +0 -0
  130. {secator-0.10.0 → secator-0.10.1a1}/secator/tasks/_categories.py +0 -0
  131. {secator-0.10.0 → secator-0.10.1a1}/secator/tasks/bbot.py +0 -0
  132. {secator-0.10.0 → secator-0.10.1a1}/secator/tasks/bup.py +0 -0
  133. {secator-0.10.0 → secator-0.10.1a1}/secator/tasks/cariddi.py +0 -0
  134. {secator-0.10.0 → secator-0.10.1a1}/secator/tasks/dalfox.py +0 -0
  135. {secator-0.10.0 → secator-0.10.1a1}/secator/tasks/dirsearch.py +0 -0
  136. {secator-0.10.0 → secator-0.10.1a1}/secator/tasks/dnsx.py +0 -0
  137. {secator-0.10.0 → secator-0.10.1a1}/secator/tasks/dnsxbrute.py +0 -0
  138. {secator-0.10.0 → secator-0.10.1a1}/secator/tasks/feroxbuster.py +0 -0
  139. {secator-0.10.0 → secator-0.10.1a1}/secator/tasks/ffuf.py +0 -0
  140. {secator-0.10.0 → secator-0.10.1a1}/secator/tasks/gau.py +0 -0
  141. {secator-0.10.0 → secator-0.10.1a1}/secator/tasks/gf.py +0 -0
  142. {secator-0.10.0 → secator-0.10.1a1}/secator/tasks/grype.py +0 -0
  143. {secator-0.10.0 → secator-0.10.1a1}/secator/tasks/h8mail.py +0 -0
  144. {secator-0.10.0 → secator-0.10.1a1}/secator/tasks/httpx.py +0 -0
  145. {secator-0.10.0 → secator-0.10.1a1}/secator/tasks/katana.py +0 -0
  146. {secator-0.10.0 → secator-0.10.1a1}/secator/tasks/maigret.py +0 -0
  147. {secator-0.10.0 → secator-0.10.1a1}/secator/tasks/mapcidr.py +0 -0
  148. {secator-0.10.0 → secator-0.10.1a1}/secator/tasks/msfconsole.py +0 -0
  149. {secator-0.10.0 → secator-0.10.1a1}/secator/tasks/nmap.py +0 -0
  150. {secator-0.10.0 → secator-0.10.1a1}/secator/tasks/searchsploit.py +0 -0
  151. {secator-0.10.0 → secator-0.10.1a1}/secator/tasks/subfinder.py +0 -0
  152. {secator-0.10.0 → secator-0.10.1a1}/secator/template.py +0 -0
  153. {secator-0.10.0 → secator-0.10.1a1}/secator/thread.py +0 -0
  154. {secator-0.10.0 → secator-0.10.1a1}/secator/utils_test.py +0 -0
  155. {secator-0.10.0 → secator-0.10.1a1}/secator/workflows/__init__.py +0 -0
  156. {secator-0.10.0 → secator-0.10.1a1}/tests/__init__.py +0 -0
  157. {secator-0.10.0 → secator-0.10.1a1}/tests/fixtures/h8mail_breach.txt +0 -0
  158. {secator-0.10.0 → secator-0.10.1a1}/tests/fixtures/ls.py +0 -0
  159. {secator-0.10.0 → secator-0.10.1a1}/tests/fixtures/msfconsole_input.rc +0 -0
  160. {secator-0.10.0 → secator-0.10.1a1}/tests/fixtures/nmap_output.xml +0 -0
  161. {secator-0.10.0 → secator-0.10.1a1}/tests/integration/__init__.py +0 -0
  162. {secator-0.10.0 → secator-0.10.1a1}/tests/integration/inputs.py +0 -0
  163. {secator-0.10.0 → secator-0.10.1a1}/tests/integration/outputs.py +0 -0
  164. {secator-0.10.0 → secator-0.10.1a1}/tests/integration/setup.sh +0 -0
  165. {secator-0.10.0 → secator-0.10.1a1}/tests/integration/teardown.sh +0 -0
  166. {secator-0.10.0 → secator-0.10.1a1}/tests/integration/test_addons.py +0 -0
  167. {secator-0.10.0 → secator-0.10.1a1}/tests/integration/test_celery.py +0 -0
  168. {secator-0.10.0 → secator-0.10.1a1}/tests/integration/test_scans.py +0 -0
  169. {secator-0.10.0 → secator-0.10.1a1}/tests/integration/test_tasks.py +0 -0
  170. {secator-0.10.0 → secator-0.10.1a1}/tests/integration/test_tasks_categories.py +0 -0
  171. {secator-0.10.0 → secator-0.10.1a1}/tests/integration/test_worker.py +0 -0
  172. {secator-0.10.0 → secator-0.10.1a1}/tests/integration/test_workflows.py +0 -0
  173. {secator-0.10.0 → secator-0.10.1a1}/tests/integration/wordlist.txt +0 -0
  174. {secator-0.10.0 → secator-0.10.1a1}/tests/integration/wordlist_dns.txt +0 -0
  175. {secator-0.10.0 → secator-0.10.1a1}/tests/integration/wordpress_toolbox/Dockerfile +0 -0
  176. {secator-0.10.0 → secator-0.10.1a1}/tests/integration/wordpress_toolbox/Makefile +0 -0
  177. {secator-0.10.0 → secator-0.10.1a1}/tests/performance/__init__.py +0 -0
  178. {secator-0.10.0 → secator-0.10.1a1}/tests/performance/loadtester.py +0 -0
  179. {secator-0.10.0 → secator-0.10.1a1}/tests/performance/test_worker.py +0 -0
  180. {secator-0.10.0 → secator-0.10.1a1}/tests/unit/__init__.py +0 -0
  181. {secator-0.10.0 → secator-0.10.1a1}/tests/unit/test_celery.py +0 -0
  182. {secator-0.10.0 → secator-0.10.1a1}/tests/unit/test_cli.py +0 -0
  183. {secator-0.10.0 → secator-0.10.1a1}/tests/unit/test_config.py +0 -0
  184. {secator-0.10.0 → secator-0.10.1a1}/tests/unit/test_offline.py +0 -0
  185. {secator-0.10.0 → secator-0.10.1a1}/tests/unit/test_runners.py +0 -0
  186. {secator-0.10.0 → secator-0.10.1a1}/tests/unit/test_scans.py +0 -0
  187. {secator-0.10.0 → secator-0.10.1a1}/tests/unit/test_serializers.py +0 -0
  188. {secator-0.10.0 → secator-0.10.1a1}/tests/unit/test_tasks.py +0 -0
  189. {secator-0.10.0 → secator-0.10.1a1}/tests/unit/test_tasks_categories.py +0 -0
  190. {secator-0.10.0 → secator-0.10.1a1}/tests/unit/test_template.py +0 -0
  191. {secator-0.10.0 → secator-0.10.1a1}/tests/unit/test_utils.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: secator
3
- Version: 0.10.0
3
+ Version: 0.10.1a1
4
4
  Summary: The pentester's swiss knife.
5
5
  Project-URL: Homepage, https://github.com/freelabz/secator
6
6
  Project-URL: Issues, https://github.com/freelabz/secator/issues
@@ -4,7 +4,7 @@ build-backend = 'hatchling.build'
4
4
 
5
5
  [project]
6
6
  name = 'secator'
7
- version = "0.10.0"
7
+ version = "0.10.1a1"
8
8
  authors = [{ name = 'FreeLabz', email = 'sales@freelabz.com' }]
9
9
  readme = 'README.md'
10
10
  description = "The pentester's swiss knife."
@@ -7,7 +7,7 @@ import uuid
7
7
 
8
8
  from time import time
9
9
 
10
- from celery import Celery, chain, chord
10
+ from celery import Celery, chord
11
11
  from celery.app import trace
12
12
 
13
13
  from rich.logging import RichHandler
@@ -15,10 +15,9 @@ from retry import retry
15
15
 
16
16
  from secator.celery_signals import setup_handlers
17
17
  from secator.config import CONFIG
18
- from secator.output_types import Info, Error
18
+ from secator.output_types import Info
19
19
  from secator.rich import console
20
20
  from secator.runners import Scan, Task, Workflow
21
- from secator.runners._helpers import run_extractors
22
21
  from secator.utils import (debug, deduplicate, flatten, should_update)
23
22
 
24
23
  IN_CELERY_WORKER_PROCESS = sys.argv and ('secator.celery.app' in sys.argv or 'worker' in sys.argv)
@@ -103,7 +102,7 @@ if IN_CELERY_WORKER_PROCESS:
103
102
  @retry(Exception, tries=3, delay=2)
104
103
  def update_state(celery_task, task, force=False):
105
104
  """Update task state to add metadata information."""
106
- if task.sync:
105
+ if not IN_CELERY_WORKER_PROCESS:
107
106
  return
108
107
  if not force and not should_update(CONFIG.runners.backend_update_frequency, task.last_updated_celery):
109
108
  return
@@ -139,6 +138,16 @@ def chunker(seq, size):
139
138
  return (seq[pos:pos + size] for pos in range(0, len(seq), size))
140
139
 
141
140
 
141
+ @app.task(bind=True)
142
+ def handle_runner_error(self, results, runner):
143
+ """Handle errors in Celery workflows (chunked tasks or runners)."""
144
+ results = forward_results(results)
145
+ runner.results = results
146
+ runner.log_results()
147
+ runner.run_hooks('on_end')
148
+ return runner.results
149
+
150
+
142
151
  def break_task(task, task_opts, targets, results=[], chunk_size=1):
143
152
  """Break a task into multiple of the same type."""
144
153
  chunks = targets
@@ -167,7 +176,8 @@ def break_task(task, task_opts, targets, results=[], chunk_size=1):
167
176
  task_id = str(uuid.uuid4())
168
177
  opts['has_parent'] = True
169
178
  opts['enable_duplicate_check'] = False
170
- sig = type(task).s(chunk, **opts).set(queue=type(task).profile, task_id=task_id)
179
+ opts['results'] = results
180
+ sig = type(task).si(chunk, **opts).set(queue=type(task).profile, task_id=task_id)
171
181
  full_name = f'{task.name}_{ix + 1}'
172
182
  task.add_subtask(task_id, task.name, f'{task.name}_{ix + 1}')
173
183
  info = Info(message=f'Celery chunked task created: {task_id}', _source=full_name, _uuid=str(uuid.uuid4()))
@@ -175,23 +185,16 @@ def break_task(task, task_opts, targets, results=[], chunk_size=1):
175
185
  sigs.append(sig)
176
186
 
177
187
  # Build Celery workflow
178
- workflow = chain(
179
- forward_results.s(results).set(queue='results'),
180
- chord(
181
- tuple(sigs),
182
- forward_results.s().set(queue='results'),
183
- )
188
+ workflow = chord(
189
+ tuple(sigs),
190
+ handle_runner_error.s(runner=task).set(queue='results')
184
191
  )
185
- if task.sync:
186
- task.print_item = False
187
- task.results = workflow.apply().get()
188
- else:
189
- result = workflow.apply_async()
190
- task.celery_result = result
192
+ return workflow
191
193
 
192
194
 
193
195
  @app.task(bind=True)
194
196
  def run_task(self, args=[], kwargs={}):
197
+ print('run task')
195
198
  console.print(Info(message=f'Running task {self.request.id}'))
196
199
  kwargs['context']['celery_id'] = self.request.id
197
200
  task = Task(*args, **kwargs)
@@ -218,111 +221,41 @@ def run_scan(self, args=[], kwargs={}):
218
221
 
219
222
  @app.task(bind=True)
220
223
  def run_command(self, results, name, targets, opts={}):
221
- chunk = opts.get('chunk')
222
- sync = opts.get('sync', True)
224
+ if IN_CELERY_WORKER_PROCESS:
225
+ opts.update({'print_item': True, 'print_line': True, 'print_cmd': True})
226
+ routing_key = self.request.delivery_info['routing_key']
227
+ console.print(Info(message=f'Task "{name}" running with routing key "{routing_key}"'))
228
+
229
+ # Flatten + dedupe + filter results
230
+ results = forward_results(results)
223
231
 
224
232
  # Set Celery request id in context
225
233
  context = opts.get('context', {})
226
234
  context['celery_id'] = self.request.id
227
235
  context['worker_name'] = os.environ.get('WORKER_NAME', 'unknown')
228
236
  opts['context'] = context
229
- opts['print_remote_info'] = False
230
237
  opts['results'] = results
238
+ opts['sync'] = True
231
239
 
232
- # If we are in a Celery worker, print everything, always
233
- if IN_CELERY_WORKER_PROCESS:
234
- opts.update({
235
- 'print_item': True,
236
- 'print_line': True,
237
- 'print_cmd': True
238
- })
239
- routing_key = self.request.delivery_info['routing_key']
240
- console.print(Info(message=f'Task "{name}" running with routing key "{routing_key}"'))
240
+ # Initialize task
241
+ sync = not IN_CELERY_WORKER_PROCESS
242
+ task_cls = Task.get_task_class(name)
243
+ task = task_cls(targets, **opts)
244
+ update_state(self, task, force=True)
241
245
 
242
- # Flatten + dedupe results
243
- results = flatten(results)
244
- results = deduplicate(results, attr='_uuid')
246
+ # Chunk task if needed
247
+ if task_cls.needs_chunking(targets, sync):
248
+ console.print(Info(message=f'Task {name} requires chunking, breaking into {len(targets)} tasks'))
249
+ return self.replace(break_task(task, opts, targets, results=results))
250
+
251
+ # Update state live
252
+ [update_state(self, task) for _ in task]
253
+ update_state(self, task, force=True)
254
+
255
+ # Garbage collection to save RAM
256
+ gc.collect()
245
257
 
246
- # Get expanded targets
247
- if not chunk and results:
248
- targets, opts = run_extractors(results, opts, targets)
249
- debug('after extractors', obj={'targets': targets, 'opts': opts}, sub='celery.state')
250
-
251
- task = None
252
-
253
- try:
254
-
255
- # Get task class
256
- task_cls = Task.get_task_class(name)
257
-
258
- # Check if chunkable
259
- many_targets = len(targets) > 1
260
- targets_over_chunk_size = task_cls.input_chunk_size and len(targets) > task_cls.input_chunk_size
261
- has_file_flag = task_cls.file_flag is not None
262
- chunk_it = (sync and many_targets and not has_file_flag) or (not sync and many_targets and targets_over_chunk_size)
263
- task_opts = opts.copy()
264
- task_opts.update({
265
- 'print_remote_info': False,
266
- 'has_children': chunk_it,
267
- })
268
-
269
- if IN_CELERY_WORKER_PROCESS and chunk_it and routing_key != 'poll':
270
- console.print(Info(message=f'Task {name} is chunkable but not running on "poll" queue, re-routing to "poll" queue'))
271
- raise self.replace(run_command.si(results, name, targets, opts=opts).set(queue='poll', task_id=self.request.id))
272
-
273
- if chunk_it:
274
- task_opts['print_cmd'] = False
275
-
276
- task = task_cls(targets, **task_opts)
277
- debug(
278
- '',
279
- obj={
280
- f'{task.unique_name}': 'CHUNK STATUS',
281
- 'chunk_it': chunk_it,
282
- 'sync': task.sync,
283
- 'many_targets': many_targets,
284
- 'targets_over_chunk_size': targets_over_chunk_size,
285
- },
286
- obj_after=False,
287
- id=self.request.id,
288
- sub='celery.state',
289
- verbose=True
290
- )
291
-
292
- # Chunk task if needed
293
- if chunk_it:
294
- chunk_size = task_cls.input_chunk_size if has_file_flag else 1
295
- break_task(
296
- task,
297
- opts,
298
- targets,
299
- results=results,
300
- chunk_size=chunk_size)
301
- console.print(Info(message=f'Task "{name}" starts polling for chunked results'))
302
-
303
- # Update state before starting
304
- update_state(self, task)
305
-
306
- # Update state for each item found
307
- for _ in task:
308
- update_state(self, task)
309
-
310
- except BaseException as e:
311
- if not task:
312
- raise e
313
- error = Error.from_exception(e)
314
- error._source = task.unique_name
315
- error._uuid = str(uuid.uuid4())
316
- task.add_result(error, print=True)
317
- task.stop_celery_tasks()
318
-
319
- finally:
320
- if not task:
321
- raise
322
- update_state(self, task, force=True)
323
- gc.collect()
324
- debug('', obj={task.unique_name: task.status, 'results': task.results}, sub='celery.results', verbose=True)
325
- return task.results
258
+ return task.results
326
259
 
327
260
 
328
261
  @app.task
@@ -335,6 +268,7 @@ def forward_results(results):
335
268
  results = results['results']
336
269
  results = flatten(results)
337
270
  results = deduplicate(results, attr='_uuid')
271
+ console.print(Info(message=f'Forwarding {len(results)} results ...'))
338
272
  return results
339
273
 
340
274
  #--------------#
@@ -0,0 +1,141 @@
1
+ import os
2
+ import signal
3
+ import threading
4
+ from pathlib import Path
5
+
6
+ from celery import signals
7
+
8
+ from secator.config import CONFIG
9
+ from secator.output_types import Info
10
+ from secator.rich import console
11
+
12
+ IDLE_TIMEOUT = CONFIG.celery.worker_kill_after_idle_seconds
13
+
14
+ # File-based state management system
15
+ STATE_DIR = Path("/tmp/celery_state")
16
+ STATE_DIR.mkdir(exist_ok=True, parents=True)
17
+
18
+
19
+ def get_lock_file_path():
20
+ worker_name = os.environ.get("WORKER_NAME", f"unknown_{os.getpid()}")
21
+ return Path(f"/tmp/celery_worker_{worker_name}.lock")
22
+
23
+
24
+ def set_task_running(task_id):
25
+ """Mark that a task is running in current worker"""
26
+ with open(get_lock_file_path(), "w") as f:
27
+ f.write(task_id)
28
+
29
+
30
+ def clear_task_running():
31
+ """Clear the task running state"""
32
+ lock_file = get_lock_file_path()
33
+ if lock_file.exists():
34
+ lock_file.unlink()
35
+
36
+
37
+ def is_task_running():
38
+ """Check if a task is currently running"""
39
+ return get_lock_file_path().exists()
40
+
41
+
42
+ def kill_worker(parent=False):
43
+ """Kill current worker using its pid by sending a SIGTERM to Celery master process."""
44
+ worker_name = os.environ.get('WORKER_NAME', 'unknown')
45
+
46
+ # Check if a task is running via the lock file
47
+ if not is_task_running():
48
+ pid = os.getppid() if parent else os.getpid()
49
+ console.print(Info(message=f'Sending SIGTERM to worker {worker_name} with pid {pid}'))
50
+ os.kill(pid, signal.SIGTERM)
51
+ else:
52
+ console.print(Info(message=f'Cancelling worker shutdown of {worker_name} since a task is running'))
53
+
54
+
55
+ def setup_idle_timer(timeout):
56
+ """Setup a timer to kill the worker after being idle"""
57
+ if timeout == -1:
58
+ return
59
+
60
+ console.print(Info(message=f'Starting inactivity timer for {timeout} seconds ...'))
61
+ timer = threading.Timer(timeout, kill_worker)
62
+ timer.daemon = True # Make sure timer is killed when worker exits
63
+ timer.start()
64
+
65
+
66
+ def maybe_override_logging():
67
+ def decorator(func):
68
+ if CONFIG.celery.override_default_logging:
69
+ return signals.setup_logging.connect(func)
70
+ else:
71
+ return func
72
+ return decorator
73
+
74
+
75
+ @maybe_override_logging()
76
+ def setup_logging(*args, **kwargs):
77
+ """Override celery's logging setup to prevent it from altering our settings.
78
+ github.com/celery/celery/issues/1867
79
+ """
80
+ pass
81
+
82
+
83
+ def capture_worker_name(sender, instance, **kwargs):
84
+ os.environ["WORKER_NAME"] = '{0}'.format(sender)
85
+
86
+
87
+ def worker_init_handler(**kwargs):
88
+ if IDLE_TIMEOUT != -1:
89
+ setup_idle_timer(IDLE_TIMEOUT)
90
+
91
+
92
+ def task_prerun_handler(task_id, **kwargs):
93
+ # Mark that a task is running
94
+ set_task_running(task_id)
95
+
96
+
97
+ def task_postrun_handler(**kwargs):
98
+ # Mark that no task is running
99
+ clear_task_running()
100
+
101
+ # Get sender name from kwargs
102
+ sender_name = kwargs['sender'].name
103
+
104
+ if CONFIG.celery.worker_kill_after_task and sender_name.startswith('secator.'):
105
+ worker_name = os.environ.get('WORKER_NAME', 'unknown')
106
+ console.print(Info(message=f'Shutdown worker {worker_name} since config celery.worker_kill_after_task is set.'))
107
+ kill_worker(parent=True)
108
+ return
109
+
110
+ # Set up a new idle timer
111
+ if IDLE_TIMEOUT != -1:
112
+ console.print(Info(message=f'Reset inactivity timer to {IDLE_TIMEOUT} seconds'))
113
+ setup_idle_timer(IDLE_TIMEOUT)
114
+
115
+
116
+ def task_revoked_handler(request=None, **kwargs):
117
+ """Handle revoked tasks by clearing the task running state"""
118
+ console.print(Info(message='Task was revoked, clearing running state'))
119
+ clear_task_running()
120
+
121
+ # Set up a new idle timer
122
+ if IDLE_TIMEOUT != -1:
123
+ console.print(Info(message=f'Reset inactivity timer to {IDLE_TIMEOUT} seconds after task revocation'))
124
+ setup_idle_timer(IDLE_TIMEOUT)
125
+
126
+
127
+ def worker_shutdown_handler(**kwargs):
128
+ """Cleanup lock files when worker shuts down"""
129
+ lock_file = get_lock_file_path()
130
+ if lock_file.exists():
131
+ lock_file.unlink()
132
+
133
+
134
+ def setup_handlers():
135
+ signals.celeryd_after_setup.connect(capture_worker_name)
136
+ signals.setup_logging.connect(setup_logging)
137
+ signals.task_prerun.connect(task_prerun_handler)
138
+ signals.task_postrun.connect(task_postrun_handler)
139
+ signals.task_revoked.connect(task_revoked_handler)
140
+ signals.worker_ready.connect(worker_init_handler)
141
+ signals.worker_shutdown.connect(worker_shutdown_handler)
@@ -264,5 +264,5 @@ class CeleryData(object):
264
264
  CeleryData.get_task_ids(result.parent, ids=ids)
265
265
 
266
266
  except kombu.exceptions.DecodeError:
267
- debug('kombu decode error', sub='celery.data.get_task_ids')
267
+ debug('kombu decode error', sub='celery.data')
268
268
  return
@@ -171,7 +171,8 @@ def worker(hostname, concurrency, reload, queue, pool, check, dev, stop, show):
171
171
  patterns = "celery.py;tasks/*.py;runners/*.py;serializers/*.py;output_types/*.py;hooks/*.py;exporters/*.py"
172
172
  cmd = f'watchmedo auto-restart --directory=./ --patterns="{patterns}" --recursive -- {cmd}'
173
173
 
174
- Command.execute(cmd, name='secator_worker')
174
+ ret = Command.execute(cmd, name='secator_worker')
175
+ sys.exit(ret.return_code)
175
176
 
176
177
 
177
178
  #-------#
@@ -20,11 +20,19 @@ MONGODB_MAX_POOL_SIZE = CONFIG.addons.mongodb.max_pool_size
20
20
 
21
21
  logger = logging.getLogger(__name__)
22
22
 
23
- client = pymongo.MongoClient(
24
- escape_mongodb_url(MONGODB_URL),
25
- maxPoolSize=MONGODB_MAX_POOL_SIZE,
26
- serverSelectionTimeoutMS=MONGODB_CONNECT_TIMEOUT
27
- )
23
+ _mongodb_client = None
24
+
25
+
26
+ def get_mongodb_client():
27
+ """Get or create MongoDB client"""
28
+ global _mongodb_client
29
+ if _mongodb_client is None:
30
+ _mongodb_client = pymongo.MongoClient(
31
+ escape_mongodb_url(MONGODB_URL),
32
+ maxPoolSize=MONGODB_MAX_POOL_SIZE,
33
+ serverSelectionTimeoutMS=MONGODB_CONNECT_TIMEOUT
34
+ )
35
+ return _mongodb_client
28
36
 
29
37
 
30
38
  def get_runner_dbg(runner):
@@ -39,6 +47,7 @@ def get_runner_dbg(runner):
39
47
 
40
48
 
41
49
  def update_runner(self):
50
+ client = get_mongodb_client()
42
51
  db = client.main
43
52
  type = self.config.type
44
53
  collection = f'{type}s'
@@ -72,6 +81,7 @@ def update_finding(self, item):
72
81
  if type(item) not in FINDING_TYPES:
73
82
  return item
74
83
  start_time = time.time()
84
+ client = get_mongodb_client()
75
85
  db = client.main
76
86
  update = item.toDict()
77
87
  _type = item._type
@@ -97,15 +107,14 @@ def update_finding(self, item):
97
107
 
98
108
 
99
109
  def find_duplicates(self):
110
+ from secator.celery import IN_CELERY_WORKER_PROCESS
100
111
  ws_id = self.toDict().get('context', {}).get('workspace_id')
101
112
  if not ws_id:
102
113
  return
103
- if self.sync:
104
- debug(f'running duplicate check on workspace {ws_id}', sub='hooks.mongodb')
114
+ if not IN_CELERY_WORKER_PROCESS:
105
115
  tag_duplicates(ws_id)
106
116
  else:
107
- celery_id = tag_duplicates.delay(ws_id)
108
- debug(f'running duplicate check on workspace {ws_id}', id=celery_id, sub='hooks.mongodb')
117
+ tag_duplicates.delay(ws_id)
109
118
 
110
119
 
111
120
  def load_finding(obj):
@@ -132,6 +141,8 @@ def tag_duplicates(ws_id: str = None):
132
141
  Args:
133
142
  ws_id (str): Workspace id.
134
143
  """
144
+ debug(f'running duplicate check on workspace {ws_id}', sub='hooks.mongodb')
145
+ client = get_mongodb_client()
135
146
  db = client.main
136
147
  workspace_query = list(
137
148
  db.findings.find({'_context.workspace_id': str(ws_id), '_tagged': True}).sort('_timestamp', -1))
@@ -172,19 +183,19 @@ def tag_duplicates(ws_id: str = None):
172
183
  'seen dupes': len(seen_dupes)
173
184
  },
174
185
  id=ws_id,
175
- sub='hooks.mongodb.duplicates',
186
+ sub='hooks.mongodb',
176
187
  verbose=True)
177
188
  tmp_duplicates_ids = list(dict.fromkeys([i._uuid for i in tmp_duplicates]))
178
- debug(f'duplicate ids: {tmp_duplicates_ids}', id=ws_id, sub='hooks.mongodb.duplicates', verbose=True)
189
+ debug(f'duplicate ids: {tmp_duplicates_ids}', id=ws_id, sub='hooks.mongodb', verbose=True)
179
190
 
180
191
  # Update latest object as non-duplicate
181
192
  if tmp_duplicates:
182
193
  duplicates.extend([f for f in tmp_duplicates])
183
194
  db.findings.update_one({'_id': ObjectId(item._uuid)}, {'$set': {'_related': tmp_duplicates_ids}})
184
- debug(f'adding {item._uuid} as non-duplicate', id=ws_id, sub='hooks.mongodb.duplicates', verbose=True)
195
+ debug(f'adding {item._uuid} as non-duplicate', id=ws_id, sub='hooks.mongodb', verbose=True)
185
196
  non_duplicates.append(item)
186
197
  else:
187
- debug(f'adding {item._uuid} as non-duplicate', id=ws_id, sub='hooks.mongodb.duplicates', verbose=True)
198
+ debug(f'adding {item._uuid} as non-duplicate', id=ws_id, sub='hooks.mongodb', verbose=True)
188
199
  non_duplicates.append(item)
189
200
 
190
201
  # debug(f'found {len(duplicates)} total duplicates')
@@ -208,7 +219,7 @@ def tag_duplicates(ws_id: str = None):
208
219
  'duplicates': len(duplicates_ids),
209
220
  'non-duplicates': len(non_duplicates_ids)
210
221
  },
211
- sub='hooks.mongodb.duplicates')
222
+ sub='hooks.mongodb')
212
223
 
213
224
 
214
225
  HOOKS = {
@@ -232,6 +243,6 @@ HOOKS = {
232
243
  'on_item': [update_finding],
233
244
  'on_duplicate': [update_finding],
234
245
  'on_interval': [update_runner],
235
- 'on_end': [update_runner, find_duplicates]
246
+ 'on_end': [update_runner]
236
247
  }
237
248
  }
@@ -21,10 +21,13 @@ class Error(OutputType):
21
21
  _sort_by = ('_timestamp',)
22
22
 
23
23
  def from_exception(e, **kwargs):
24
- message = type(e).__name__
24
+ errtype = type(e).__name__
25
+ message = errtype
25
26
  if str(e):
26
27
  message += f': {str(e)}'
27
- return Error(message=message, traceback=traceback_as_string(e), **kwargs)
28
+ traceback = traceback_as_string(e) if errtype not in ['KeyboardInterrupt', 'GreenletExit'] else ''
29
+ error = Error(message=message, traceback=traceback, **kwargs)
30
+ return error
28
31
 
29
32
  def __str__(self):
30
33
  return self.message