secator 0.9.4__tar.gz → 0.10.1a0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of secator might be problematic. Click here for more details.

Files changed (190) hide show
  1. {secator-0.9.4 → secator-0.10.1a0}/CHANGELOG.md +16 -0
  2. {secator-0.9.4 → secator-0.10.1a0}/PKG-INFO +1 -1
  3. {secator-0.9.4 → secator-0.10.1a0}/pyproject.toml +1 -1
  4. {secator-0.9.4 → secator-0.10.1a0}/secator/celery.py +56 -114
  5. secator-0.10.1a0/secator/celery_signals.py +141 -0
  6. {secator-0.9.4 → secator-0.10.1a0}/secator/celery_utils.py +1 -1
  7. {secator-0.9.4 → secator-0.10.1a0}/secator/cli.py +3 -2
  8. {secator-0.9.4 → secator-0.10.1a0}/secator/config.py +4 -2
  9. {secator-0.9.4 → secator-0.10.1a0}/secator/decorators.py +1 -0
  10. {secator-0.9.4 → secator-0.10.1a0}/secator/hooks/mongodb.py +26 -15
  11. {secator-0.9.4 → secator-0.10.1a0}/secator/output_types/error.py +5 -2
  12. {secator-0.9.4 → secator-0.10.1a0}/secator/runners/_base.py +54 -37
  13. {secator-0.9.4 → secator-0.10.1a0}/secator/runners/_helpers.py +18 -8
  14. {secator-0.9.4 → secator-0.10.1a0}/secator/runners/command.py +11 -1
  15. {secator-0.9.4 → secator-0.10.1a0}/secator/runners/task.py +3 -0
  16. {secator-0.9.4 → secator-0.10.1a0}/secator/runners/workflow.py +5 -2
  17. {secator-0.9.4 → secator-0.10.1a0}/secator/tasks/fping.py +0 -1
  18. {secator-0.9.4 → secator-0.10.1a0}/secator/tasks/gospider.py +0 -1
  19. {secator-0.9.4 → secator-0.10.1a0}/secator/tasks/katana.py +2 -1
  20. {secator-0.9.4 → secator-0.10.1a0}/secator/tasks/naabu.py +2 -2
  21. {secator-0.9.4 → secator-0.10.1a0}/secator/tasks/nuclei.py +0 -2
  22. {secator-0.9.4 → secator-0.10.1a0}/secator/tasks/wpscan.py +0 -1
  23. {secator-0.9.4 → secator-0.10.1a0}/secator/template.py +4 -2
  24. {secator-0.9.4 → secator-0.10.1a0}/secator/utils.py +22 -0
  25. {secator-0.9.4 → secator-0.10.1a0}/.docker/Dockerfile.alpine +0 -0
  26. {secator-0.9.4 → secator-0.10.1a0}/.docker/Dockerfile.arch +0 -0
  27. {secator-0.9.4 → secator-0.10.1a0}/.docker/Dockerfile.debian +0 -0
  28. {secator-0.9.4 → secator-0.10.1a0}/.docker/Dockerfile.kali +0 -0
  29. {secator-0.9.4 → secator-0.10.1a0}/.docker/Dockerfile.osx +0 -0
  30. {secator-0.9.4 → secator-0.10.1a0}/.docker/Dockerfile.ubuntu +0 -0
  31. {secator-0.9.4 → secator-0.10.1a0}/.docker/build_all.sh +0 -0
  32. {secator-0.9.4 → secator-0.10.1a0}/.dockerignore +0 -0
  33. {secator-0.9.4 → secator-0.10.1a0}/.flake8 +0 -0
  34. {secator-0.9.4 → secator-0.10.1a0}/.gitignore +0 -0
  35. {secator-0.9.4 → secator-0.10.1a0}/CONTRIBUTING.md +0 -0
  36. {secator-0.9.4 → secator-0.10.1a0}/Dockerfile +0 -0
  37. {secator-0.9.4 → secator-0.10.1a0}/LICENSE +0 -0
  38. {secator-0.9.4 → secator-0.10.1a0}/README.md +0 -0
  39. {secator-0.9.4 → secator-0.10.1a0}/SECURITY.md +0 -0
  40. {secator-0.9.4 → secator-0.10.1a0}/cloudbuild.yaml +0 -0
  41. {secator-0.9.4 → secator-0.10.1a0}/helm/.helmignore +0 -0
  42. {secator-0.9.4 → secator-0.10.1a0}/helm/Chart.yaml +0 -0
  43. {secator-0.9.4 → secator-0.10.1a0}/helm/templates/redis-service.yaml +0 -0
  44. {secator-0.9.4 → secator-0.10.1a0}/helm/templates/redis.yaml +0 -0
  45. {secator-0.9.4 → secator-0.10.1a0}/helm/templates/secator-manager.yaml +0 -0
  46. {secator-0.9.4 → secator-0.10.1a0}/helm/templates/secator-worker.yaml +0 -0
  47. {secator-0.9.4 → secator-0.10.1a0}/helm/values.yaml +0 -0
  48. {secator-0.9.4 → secator-0.10.1a0}/scripts/download_cves.sh +0 -0
  49. {secator-0.9.4 → secator-0.10.1a0}/scripts/install.sh +0 -0
  50. {secator-0.9.4 → secator-0.10.1a0}/scripts/install_asciinema.sh +0 -0
  51. {secator-0.9.4 → secator-0.10.1a0}/scripts/install_go.sh +0 -0
  52. {secator-0.9.4 → secator-0.10.1a0}/scripts/install_ruby.sh +0 -0
  53. {secator-0.9.4 → secator-0.10.1a0}/scripts/msf/exploit_cve.rc +0 -0
  54. {secator-0.9.4 → secator-0.10.1a0}/scripts/msf/ftp_anonymous.rc +0 -0
  55. {secator-0.9.4 → secator-0.10.1a0}/scripts/msf/ftp_version.rc +0 -0
  56. {secator-0.9.4 → secator-0.10.1a0}/scripts/msf/ftp_vsftpd_234_backdoor.rc +0 -0
  57. {secator-0.9.4 → secator-0.10.1a0}/scripts/msf/redis.rc +0 -0
  58. {secator-0.9.4 → secator-0.10.1a0}/scripts/stories/STORY.md +0 -0
  59. {secator-0.9.4 → secator-0.10.1a0}/scripts/stories/aliases.sh +0 -0
  60. {secator-0.9.4 → secator-0.10.1a0}/scripts/stories/demo.sh +0 -0
  61. {secator-0.9.4 → secator-0.10.1a0}/scripts/stories/fmt.sh +0 -0
  62. {secator-0.9.4 → secator-0.10.1a0}/scripts/stories/input.sh +0 -0
  63. {secator-0.9.4 → secator-0.10.1a0}/scripts/stories/pipe.sh +0 -0
  64. {secator-0.9.4 → secator-0.10.1a0}/scripts/stories/short_demo.sh +0 -0
  65. {secator-0.9.4 → secator-0.10.1a0}/secator/.gitignore +0 -0
  66. {secator-0.9.4 → secator-0.10.1a0}/secator/__init__.py +0 -0
  67. {secator-0.9.4 → secator-0.10.1a0}/secator/configs/__init__.py +0 -0
  68. {secator-0.9.4 → secator-0.10.1a0}/secator/configs/profiles/__init__.py +0 -0
  69. {secator-0.9.4 → secator-0.10.1a0}/secator/configs/profiles/aggressive.yaml +0 -0
  70. {secator-0.9.4 → secator-0.10.1a0}/secator/configs/profiles/default.yaml +0 -0
  71. {secator-0.9.4 → secator-0.10.1a0}/secator/configs/profiles/stealth.yaml +0 -0
  72. {secator-0.9.4 → secator-0.10.1a0}/secator/configs/scans/__init__.py +0 -0
  73. {secator-0.9.4 → secator-0.10.1a0}/secator/configs/scans/domain.yaml +0 -0
  74. {secator-0.9.4 → secator-0.10.1a0}/secator/configs/scans/host.yaml +0 -0
  75. {secator-0.9.4 → secator-0.10.1a0}/secator/configs/scans/network.yaml +0 -0
  76. {secator-0.9.4 → secator-0.10.1a0}/secator/configs/scans/subdomain.yaml +0 -0
  77. {secator-0.9.4 → secator-0.10.1a0}/secator/configs/scans/url.yaml +0 -0
  78. {secator-0.9.4 → secator-0.10.1a0}/secator/configs/workflows/__init__.py +0 -0
  79. {secator-0.9.4 → secator-0.10.1a0}/secator/configs/workflows/cidr_recon.yaml +0 -0
  80. {secator-0.9.4 → secator-0.10.1a0}/secator/configs/workflows/code_scan.yaml +0 -0
  81. {secator-0.9.4 → secator-0.10.1a0}/secator/configs/workflows/host_recon.yaml +0 -0
  82. {secator-0.9.4 → secator-0.10.1a0}/secator/configs/workflows/port_scan.yaml +0 -0
  83. {secator-0.9.4 → secator-0.10.1a0}/secator/configs/workflows/subdomain_recon.yaml +0 -0
  84. {secator-0.9.4 → secator-0.10.1a0}/secator/configs/workflows/url_bypass.yaml +0 -0
  85. {secator-0.9.4 → secator-0.10.1a0}/secator/configs/workflows/url_crawl.yaml +0 -0
  86. {secator-0.9.4 → secator-0.10.1a0}/secator/configs/workflows/url_dirsearch.yaml +0 -0
  87. {secator-0.9.4 → secator-0.10.1a0}/secator/configs/workflows/url_fuzz.yaml +0 -0
  88. {secator-0.9.4 → secator-0.10.1a0}/secator/configs/workflows/url_nuclei.yaml +0 -0
  89. {secator-0.9.4 → secator-0.10.1a0}/secator/configs/workflows/url_vuln.yaml +0 -0
  90. {secator-0.9.4 → secator-0.10.1a0}/secator/configs/workflows/user_hunt.yaml +0 -0
  91. {secator-0.9.4 → secator-0.10.1a0}/secator/configs/workflows/wordpress.yaml +0 -0
  92. {secator-0.9.4 → secator-0.10.1a0}/secator/definitions.py +0 -0
  93. {secator-0.9.4 → secator-0.10.1a0}/secator/exporters/__init__.py +0 -0
  94. {secator-0.9.4 → secator-0.10.1a0}/secator/exporters/_base.py +0 -0
  95. {secator-0.9.4 → secator-0.10.1a0}/secator/exporters/console.py +0 -0
  96. {secator-0.9.4 → secator-0.10.1a0}/secator/exporters/csv.py +0 -0
  97. {secator-0.9.4 → secator-0.10.1a0}/secator/exporters/gdrive.py +0 -0
  98. {secator-0.9.4 → secator-0.10.1a0}/secator/exporters/json.py +0 -0
  99. {secator-0.9.4 → secator-0.10.1a0}/secator/exporters/table.py +0 -0
  100. {secator-0.9.4 → secator-0.10.1a0}/secator/exporters/txt.py +0 -0
  101. {secator-0.9.4 → secator-0.10.1a0}/secator/hooks/__init__.py +0 -0
  102. {secator-0.9.4 → secator-0.10.1a0}/secator/hooks/gcs.py +0 -0
  103. {secator-0.9.4 → secator-0.10.1a0}/secator/installer.py +0 -0
  104. {secator-0.9.4 → secator-0.10.1a0}/secator/output_types/__init__.py +0 -0
  105. {secator-0.9.4 → secator-0.10.1a0}/secator/output_types/_base.py +0 -0
  106. {secator-0.9.4 → secator-0.10.1a0}/secator/output_types/exploit.py +0 -0
  107. {secator-0.9.4 → secator-0.10.1a0}/secator/output_types/info.py +0 -0
  108. {secator-0.9.4 → secator-0.10.1a0}/secator/output_types/ip.py +0 -0
  109. {secator-0.9.4 → secator-0.10.1a0}/secator/output_types/port.py +0 -0
  110. {secator-0.9.4 → secator-0.10.1a0}/secator/output_types/progress.py +0 -0
  111. {secator-0.9.4 → secator-0.10.1a0}/secator/output_types/record.py +0 -0
  112. {secator-0.9.4 → secator-0.10.1a0}/secator/output_types/stat.py +0 -0
  113. {secator-0.9.4 → secator-0.10.1a0}/secator/output_types/subdomain.py +0 -0
  114. {secator-0.9.4 → secator-0.10.1a0}/secator/output_types/tag.py +0 -0
  115. {secator-0.9.4 → secator-0.10.1a0}/secator/output_types/target.py +0 -0
  116. {secator-0.9.4 → secator-0.10.1a0}/secator/output_types/url.py +0 -0
  117. {secator-0.9.4 → secator-0.10.1a0}/secator/output_types/user_account.py +0 -0
  118. {secator-0.9.4 → secator-0.10.1a0}/secator/output_types/vulnerability.py +0 -0
  119. {secator-0.9.4 → secator-0.10.1a0}/secator/output_types/warning.py +0 -0
  120. {secator-0.9.4 → secator-0.10.1a0}/secator/report.py +0 -0
  121. {secator-0.9.4 → secator-0.10.1a0}/secator/rich.py +0 -0
  122. {secator-0.9.4 → secator-0.10.1a0}/secator/runners/__init__.py +0 -0
  123. {secator-0.9.4 → secator-0.10.1a0}/secator/runners/celery.py +0 -0
  124. {secator-0.9.4 → secator-0.10.1a0}/secator/runners/scan.py +0 -0
  125. {secator-0.9.4 → secator-0.10.1a0}/secator/scans/__init__.py +0 -0
  126. {secator-0.9.4 → secator-0.10.1a0}/secator/serializers/__init__.py +0 -0
  127. {secator-0.9.4 → secator-0.10.1a0}/secator/serializers/dataclass.py +0 -0
  128. {secator-0.9.4 → secator-0.10.1a0}/secator/serializers/json.py +0 -0
  129. {secator-0.9.4 → secator-0.10.1a0}/secator/serializers/regex.py +0 -0
  130. {secator-0.9.4 → secator-0.10.1a0}/secator/tasks/__init__.py +0 -0
  131. {secator-0.9.4 → secator-0.10.1a0}/secator/tasks/_categories.py +0 -0
  132. {secator-0.9.4 → secator-0.10.1a0}/secator/tasks/bbot.py +0 -0
  133. {secator-0.9.4 → secator-0.10.1a0}/secator/tasks/bup.py +0 -0
  134. {secator-0.9.4 → secator-0.10.1a0}/secator/tasks/cariddi.py +0 -0
  135. {secator-0.9.4 → secator-0.10.1a0}/secator/tasks/dalfox.py +0 -0
  136. {secator-0.9.4 → secator-0.10.1a0}/secator/tasks/dirsearch.py +0 -0
  137. {secator-0.9.4 → secator-0.10.1a0}/secator/tasks/dnsx.py +0 -0
  138. {secator-0.9.4 → secator-0.10.1a0}/secator/tasks/dnsxbrute.py +0 -0
  139. {secator-0.9.4 → secator-0.10.1a0}/secator/tasks/feroxbuster.py +0 -0
  140. {secator-0.9.4 → secator-0.10.1a0}/secator/tasks/ffuf.py +0 -0
  141. {secator-0.9.4 → secator-0.10.1a0}/secator/tasks/gau.py +0 -0
  142. {secator-0.9.4 → secator-0.10.1a0}/secator/tasks/gf.py +0 -0
  143. {secator-0.9.4 → secator-0.10.1a0}/secator/tasks/grype.py +0 -0
  144. {secator-0.9.4 → secator-0.10.1a0}/secator/tasks/h8mail.py +0 -0
  145. {secator-0.9.4 → secator-0.10.1a0}/secator/tasks/httpx.py +0 -0
  146. {secator-0.9.4 → secator-0.10.1a0}/secator/tasks/maigret.py +0 -0
  147. {secator-0.9.4 → secator-0.10.1a0}/secator/tasks/mapcidr.py +0 -0
  148. {secator-0.9.4 → secator-0.10.1a0}/secator/tasks/msfconsole.py +0 -0
  149. {secator-0.9.4 → secator-0.10.1a0}/secator/tasks/nmap.py +0 -0
  150. {secator-0.9.4 → secator-0.10.1a0}/secator/tasks/searchsploit.py +0 -0
  151. {secator-0.9.4 → secator-0.10.1a0}/secator/tasks/subfinder.py +0 -0
  152. {secator-0.9.4 → secator-0.10.1a0}/secator/thread.py +0 -0
  153. {secator-0.9.4 → secator-0.10.1a0}/secator/utils_test.py +0 -0
  154. {secator-0.9.4 → secator-0.10.1a0}/secator/workflows/__init__.py +0 -0
  155. {secator-0.9.4 → secator-0.10.1a0}/tests/__init__.py +0 -0
  156. {secator-0.9.4 → secator-0.10.1a0}/tests/fixtures/h8mail_breach.txt +0 -0
  157. {secator-0.9.4 → secator-0.10.1a0}/tests/fixtures/ls.py +0 -0
  158. {secator-0.9.4 → secator-0.10.1a0}/tests/fixtures/msfconsole_input.rc +0 -0
  159. {secator-0.9.4 → secator-0.10.1a0}/tests/fixtures/nmap_output.xml +0 -0
  160. {secator-0.9.4 → secator-0.10.1a0}/tests/integration/__init__.py +0 -0
  161. {secator-0.9.4 → secator-0.10.1a0}/tests/integration/inputs.py +0 -0
  162. {secator-0.9.4 → secator-0.10.1a0}/tests/integration/outputs.py +0 -0
  163. {secator-0.9.4 → secator-0.10.1a0}/tests/integration/setup.sh +0 -0
  164. {secator-0.9.4 → secator-0.10.1a0}/tests/integration/teardown.sh +0 -0
  165. {secator-0.9.4 → secator-0.10.1a0}/tests/integration/test_addons.py +0 -0
  166. {secator-0.9.4 → secator-0.10.1a0}/tests/integration/test_celery.py +0 -0
  167. {secator-0.9.4 → secator-0.10.1a0}/tests/integration/test_scans.py +0 -0
  168. {secator-0.9.4 → secator-0.10.1a0}/tests/integration/test_tasks.py +0 -0
  169. {secator-0.9.4 → secator-0.10.1a0}/tests/integration/test_tasks_categories.py +0 -0
  170. {secator-0.9.4 → secator-0.10.1a0}/tests/integration/test_worker.py +0 -0
  171. {secator-0.9.4 → secator-0.10.1a0}/tests/integration/test_workflows.py +0 -0
  172. {secator-0.9.4 → secator-0.10.1a0}/tests/integration/wordlist.txt +0 -0
  173. {secator-0.9.4 → secator-0.10.1a0}/tests/integration/wordlist_dns.txt +0 -0
  174. {secator-0.9.4 → secator-0.10.1a0}/tests/integration/wordpress_toolbox/Dockerfile +0 -0
  175. {secator-0.9.4 → secator-0.10.1a0}/tests/integration/wordpress_toolbox/Makefile +0 -0
  176. {secator-0.9.4 → secator-0.10.1a0}/tests/performance/__init__.py +0 -0
  177. {secator-0.9.4 → secator-0.10.1a0}/tests/performance/loadtester.py +0 -0
  178. {secator-0.9.4 → secator-0.10.1a0}/tests/performance/test_worker.py +0 -0
  179. {secator-0.9.4 → secator-0.10.1a0}/tests/unit/__init__.py +0 -0
  180. {secator-0.9.4 → secator-0.10.1a0}/tests/unit/test_celery.py +0 -0
  181. {secator-0.9.4 → secator-0.10.1a0}/tests/unit/test_cli.py +0 -0
  182. {secator-0.9.4 → secator-0.10.1a0}/tests/unit/test_config.py +0 -0
  183. {secator-0.9.4 → secator-0.10.1a0}/tests/unit/test_offline.py +0 -0
  184. {secator-0.9.4 → secator-0.10.1a0}/tests/unit/test_runners.py +0 -0
  185. {secator-0.9.4 → secator-0.10.1a0}/tests/unit/test_scans.py +0 -0
  186. {secator-0.9.4 → secator-0.10.1a0}/tests/unit/test_serializers.py +0 -0
  187. {secator-0.9.4 → secator-0.10.1a0}/tests/unit/test_tasks.py +0 -0
  188. {secator-0.9.4 → secator-0.10.1a0}/tests/unit/test_tasks_categories.py +0 -0
  189. {secator-0.9.4 → secator-0.10.1a0}/tests/unit/test_template.py +0 -0
  190. {secator-0.9.4 → secator-0.10.1a0}/tests/unit/test_utils.py +0 -0
@@ -1,5 +1,21 @@
1
1
  # Changelog
2
2
 
3
+ ## [0.10.0](https://github.com/freelabz/secator/compare/v0.9.4...v0.10.0) (2025-03-04)
4
+
5
+
6
+ ### Features
7
+
8
+ * **celery:** add single pool job processing options ([#558](https://github.com/freelabz/secator/issues/558)) ([cca9797](https://github.com/freelabz/secator/commit/cca9797339ed95c15ef42604ba9bf897a76da766))
9
+
10
+
11
+ ### Bug Fixes
12
+
13
+ * add poll queue and re-route tasks to it when chunked ([#562](https://github.com/freelabz/secator/issues/562)) ([b9e7576](https://github.com/freelabz/secator/commit/b9e7576dcd46db0e95df09dc8beafb762314cc39))
14
+ * **config:** disable key not found error message (too verbose) ([#563](https://github.com/freelabz/secator/issues/563)) ([3b044da](https://github.com/freelabz/secator/commit/3b044da14f3e399e03e10c97566a25e858e7e7d6))
15
+ * **katana:** add -no-sandbox option ([#566](https://github.com/freelabz/secator/issues/566)) ([50cf2fe](https://github.com/freelabz/secator/commit/50cf2fef5eaa1b7cf67c691360e3349b7bf7bfbe))
16
+ * mongodb document bug ([#564](https://github.com/freelabz/secator/issues/564)) ([29c6af4](https://github.com/freelabz/secator/commit/29c6af4f3a2a49ac973f7193ca334f56e7b178d5))
17
+ * reset nuclei input chunk size ([#561](https://github.com/freelabz/secator/issues/561)) ([683b9ef](https://github.com/freelabz/secator/commit/683b9efb28d2425961e2e64c31b9af5cb07c0923))
18
+
3
19
  ## [0.9.4](https://github.com/freelabz/secator/compare/v0.9.3...v0.9.4) (2025-02-25)
4
20
 
5
21
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: secator
3
- Version: 0.9.4
3
+ Version: 0.10.1a0
4
4
  Summary: The pentester's swiss knife.
5
5
  Project-URL: Homepage, https://github.com/freelabz/secator
6
6
  Project-URL: Issues, https://github.com/freelabz/secator/issues
@@ -4,7 +4,7 @@ build-backend = 'hatchling.build'
4
4
 
5
5
  [project]
6
6
  name = 'secator'
7
- version = "0.9.4"
7
+ version = "0.10.1a0"
8
8
  authors = [{ name = 'FreeLabz', email = 'sales@freelabz.com' }]
9
9
  readme = 'README.md'
10
10
  description = "The pentester's swiss knife."
@@ -1,22 +1,23 @@
1
1
  import gc
2
2
  import json
3
3
  import logging
4
+ import os
4
5
  import sys
5
6
  import uuid
6
7
 
7
8
  from time import time
8
9
 
9
- from celery import Celery, chain, chord, signals
10
+ from celery import Celery, chord
10
11
  from celery.app import trace
11
12
 
12
13
  from rich.logging import RichHandler
13
14
  from retry import retry
14
15
 
16
+ from secator.celery_signals import setup_handlers
15
17
  from secator.config import CONFIG
16
- from secator.output_types import Info, Error
18
+ from secator.output_types import Info
17
19
  from secator.rich import console
18
20
  from secator.runners import Scan, Task, Workflow
19
- from secator.runners._helpers import run_extractors
20
21
  from secator.utils import (debug, deduplicate, flatten, should_update)
21
22
 
22
23
  IN_CELERY_WORKER_PROCESS = sys.argv and ('secator.celery.app' in sys.argv or 'worker' in sys.argv)
@@ -78,7 +79,8 @@ app.conf.update({
78
79
  'secator.celery.run_workflow': {'queue': 'celery'},
79
80
  'secator.celery.run_scan': {'queue': 'celery'},
80
81
  'secator.celery.run_task': {'queue': 'celery'},
81
- 'secator.hooks.mongodb.tag_duplicates': {'queue': 'mongodb'}
82
+ 'secator.celery.forward_results': {'queue': 'results'},
83
+ 'secator.hooks.mongodb.*': {'queue': 'mongodb'}
82
84
  },
83
85
  'task_store_eager_result': True,
84
86
  'task_send_sent_event': CONFIG.celery.task_send_sent_event,
@@ -93,29 +95,14 @@ app.conf.update({
93
95
  'worker_send_task_events': CONFIG.celery.worker_send_task_events
94
96
  })
95
97
  app.autodiscover_tasks(['secator.hooks.mongodb'], related_name=None)
96
-
97
-
98
- def maybe_override_logging():
99
- def decorator(func):
100
- if CONFIG.celery.override_default_logging:
101
- return signals.setup_logging.connect(func)
102
- else:
103
- return func
104
- return decorator
105
-
106
-
107
- @maybe_override_logging()
108
- def void(*args, **kwargs):
109
- """Override celery's logging setup to prevent it from altering our settings.
110
- github.com/celery/celery/issues/1867
111
- """
112
- pass
98
+ if IN_CELERY_WORKER_PROCESS:
99
+ setup_handlers()
113
100
 
114
101
 
115
102
  @retry(Exception, tries=3, delay=2)
116
103
  def update_state(celery_task, task, force=False):
117
104
  """Update task state to add metadata information."""
118
- if task.sync:
105
+ if not IN_CELERY_WORKER_PROCESS:
119
106
  return
120
107
  if not force and not should_update(CONFIG.runners.backend_update_frequency, task.last_updated_celery):
121
108
  return
@@ -151,6 +138,16 @@ def chunker(seq, size):
151
138
  return (seq[pos:pos + size] for pos in range(0, len(seq), size))
152
139
 
153
140
 
141
+ @app.task(bind=True)
142
+ def handle_runner_error(self, results, runner):
143
+ """Handle errors in Celery workflows (chunked tasks or runners)."""
144
+ results = forward_results(results)
145
+ runner.results = results
146
+ runner.log_results()
147
+ runner.run_hooks('on_end')
148
+ return runner.results
149
+
150
+
154
151
  def break_task(task, task_opts, targets, results=[], chunk_size=1):
155
152
  """Break a task into multiple of the same type."""
156
153
  chunks = targets
@@ -179,7 +176,8 @@ def break_task(task, task_opts, targets, results=[], chunk_size=1):
179
176
  task_id = str(uuid.uuid4())
180
177
  opts['has_parent'] = True
181
178
  opts['enable_duplicate_check'] = False
182
- sig = type(task).s(chunk, **opts).set(queue=type(task).profile, task_id=task_id)
179
+ opts['results'] = results
180
+ sig = type(task).si(chunk, **opts).set(queue=type(task).profile, task_id=task_id)
183
181
  full_name = f'{task.name}_{ix + 1}'
184
182
  task.add_subtask(task_id, task.name, f'{task.name}_{ix + 1}')
185
183
  info = Info(message=f'Celery chunked task created: {task_id}', _source=full_name, _uuid=str(uuid.uuid4()))
@@ -187,23 +185,17 @@ def break_task(task, task_opts, targets, results=[], chunk_size=1):
187
185
  sigs.append(sig)
188
186
 
189
187
  # Build Celery workflow
190
- workflow = chain(
191
- forward_results.s(results).set(queue='io'),
192
- chord(
193
- tuple(sigs),
194
- forward_results.s().set(queue='io'),
195
- )
188
+ workflow = chord(
189
+ tuple(sigs),
190
+ handle_runner_error.s(runner=task).set(queue='results')
196
191
  )
197
- if task.sync:
198
- task.print_item = False
199
- task.results = workflow.apply().get()
200
- else:
201
- result = workflow.apply_async()
202
- task.celery_result = result
192
+ return workflow
203
193
 
204
194
 
205
195
  @app.task(bind=True)
206
196
  def run_task(self, args=[], kwargs={}):
197
+ print('run task')
198
+ console.print(Info(message=f'Running task {self.request.id}'))
207
199
  kwargs['context']['celery_id'] = self.request.id
208
200
  task = Task(*args, **kwargs)
209
201
  task.run()
@@ -211,6 +203,7 @@ def run_task(self, args=[], kwargs={}):
211
203
 
212
204
  @app.task(bind=True)
213
205
  def run_workflow(self, args=[], kwargs={}):
206
+ console.print(Info(message=f'Running workflow {self.request.id}'))
214
207
  kwargs['context']['celery_id'] = self.request.id
215
208
  workflow = Workflow(*args, **kwargs)
216
209
  workflow.run()
@@ -218,6 +211,7 @@ def run_workflow(self, args=[], kwargs={}):
218
211
 
219
212
  @app.task(bind=True)
220
213
  def run_scan(self, args=[], kwargs={}):
214
+ console.print(Info(message=f'Running scan {self.request.id}'))
221
215
  if 'context' not in kwargs:
222
216
  kwargs['context'] = {}
223
217
  kwargs['context']['celery_id'] = self.request.id
@@ -227,94 +221,41 @@ def run_scan(self, args=[], kwargs={}):
227
221
 
228
222
  @app.task(bind=True)
229
223
  def run_command(self, results, name, targets, opts={}):
230
- chunk = opts.get('chunk')
231
- sync = opts.get('sync', True)
224
+ if IN_CELERY_WORKER_PROCESS:
225
+ opts.update({'print_item': True, 'print_line': True, 'print_cmd': True})
226
+ routing_key = self.request.delivery_info['routing_key']
227
+ console.print(Info(message=f'Task "{name}" running with routing key "{routing_key}"'))
228
+
229
+ # Flatten + dedupe + filter results
230
+ results = forward_results(results)
232
231
 
233
232
  # Set Celery request id in context
234
233
  context = opts.get('context', {})
235
234
  context['celery_id'] = self.request.id
235
+ context['worker_name'] = os.environ.get('WORKER_NAME', 'unknown')
236
236
  opts['context'] = context
237
- opts['print_remote_info'] = False
238
237
  opts['results'] = results
238
+ opts['sync'] = True
239
239
 
240
- # If we are in a Celery worker, print everything, always
241
- if IN_CELERY_WORKER_PROCESS:
242
- opts.update({
243
- 'print_item': True,
244
- 'print_line': True,
245
- 'print_cmd': True
246
- })
240
+ # Initialize task
241
+ sync = not IN_CELERY_WORKER_PROCESS
242
+ task_cls = Task.get_task_class(name)
243
+ task = task_cls(targets, **opts)
244
+ update_state(self, task, force=True)
247
245
 
248
- # Flatten + dedupe results
249
- results = flatten(results)
250
- results = deduplicate(results, attr='_uuid')
246
+ # Chunk task if needed
247
+ if task_cls.needs_chunking(targets, sync):
248
+ console.print(Info(message=f'Task {name} requires chunking, breaking into {len(targets)} tasks'))
249
+ return self.replace(break_task(task, opts, targets, results=results))
250
+
251
+ # Update state live
252
+ [update_state(self, task) for _ in task]
253
+ update_state(self, task, force=True)
254
+
255
+ # Garbage collection to save RAM
256
+ gc.collect()
251
257
 
252
- # Get expanded targets
253
- if not chunk and results:
254
- targets, opts = run_extractors(results, opts, targets)
255
- debug('after extractors', obj={'targets': targets, 'opts': opts}, sub='celery.state')
256
-
257
- try:
258
- # Get task class
259
- task_cls = Task.get_task_class(name)
260
-
261
- # Check if chunkable
262
- many_targets = len(targets) > 1
263
- targets_over_chunk_size = task_cls.input_chunk_size and len(targets) > task_cls.input_chunk_size
264
- has_file_flag = task_cls.file_flag is not None
265
- chunk_it = (sync and many_targets and not has_file_flag) or (not sync and many_targets and targets_over_chunk_size)
266
- task_opts = opts.copy()
267
- task_opts.update({
268
- 'print_remote_info': False,
269
- 'has_children': chunk_it,
270
- })
271
- if chunk_it:
272
- task_opts['print_cmd'] = False
273
- task = task_cls(targets, **task_opts)
274
- debug(
275
- '',
276
- obj={
277
- f'{task.unique_name}': 'CHUNK STATUS',
278
- 'chunk_it': chunk_it,
279
- 'sync': task.sync,
280
- 'many_targets': many_targets,
281
- 'targets_over_chunk_size': targets_over_chunk_size,
282
- },
283
- obj_after=False,
284
- id=self.request.id,
285
- sub='celery.state',
286
- verbose=True
287
- )
288
-
289
- # Chunk task if needed
290
- if chunk_it:
291
- chunk_size = task_cls.input_chunk_size if has_file_flag else 1
292
- break_task(
293
- task,
294
- opts,
295
- targets,
296
- results=results,
297
- chunk_size=chunk_size)
298
-
299
- # Update state before starting
300
- update_state(self, task)
301
-
302
- # Update state for each item found
303
- for _ in task:
304
- update_state(self, task)
305
-
306
- except BaseException as e:
307
- error = Error.from_exception(e)
308
- error._source = task.unique_name
309
- error._uuid = str(uuid.uuid4())
310
- task.add_result(error, print=True)
311
- task.stop_celery_tasks()
312
-
313
- finally:
314
- update_state(self, task, force=True)
315
- gc.collect()
316
- debug('', obj={task.unique_name: task.status, 'results': task.results}, sub='celery.results', verbose=True)
317
- return task.results
258
+ return task.results
318
259
 
319
260
 
320
261
  @app.task
@@ -327,6 +268,7 @@ def forward_results(results):
327
268
  results = results['results']
328
269
  results = flatten(results)
329
270
  results = deduplicate(results, attr='_uuid')
271
+ console.print(Info(message=f'Forwarding {len(results)} results ...'))
330
272
  return results
331
273
 
332
274
  #--------------#
@@ -0,0 +1,141 @@
1
+ import os
2
+ import signal
3
+ import threading
4
+ from pathlib import Path
5
+
6
+ from celery import signals
7
+
8
+ from secator.config import CONFIG
9
+ from secator.output_types import Info
10
+ from secator.rich import console
11
+
12
+ IDLE_TIMEOUT = CONFIG.celery.worker_kill_after_idle_seconds
13
+
14
+ # File-based state management system
15
+ STATE_DIR = Path("/tmp/celery_state")
16
+ STATE_DIR.mkdir(exist_ok=True, parents=True)
17
+
18
+
19
+ def get_lock_file_path():
20
+ worker_name = os.environ.get("WORKER_NAME", f"unknown_{os.getpid()}")
21
+ return Path(f"/tmp/celery_worker_{worker_name}.lock")
22
+
23
+
24
+ def set_task_running(task_id):
25
+ """Mark that a task is running in current worker"""
26
+ with open(get_lock_file_path(), "w") as f:
27
+ f.write(task_id)
28
+
29
+
30
+ def clear_task_running():
31
+ """Clear the task running state"""
32
+ lock_file = get_lock_file_path()
33
+ if lock_file.exists():
34
+ lock_file.unlink()
35
+
36
+
37
+ def is_task_running():
38
+ """Check if a task is currently running"""
39
+ return get_lock_file_path().exists()
40
+
41
+
42
+ def kill_worker(parent=False):
43
+ """Kill current worker using its pid by sending a SIGTERM to Celery master process."""
44
+ worker_name = os.environ.get('WORKER_NAME', 'unknown')
45
+
46
+ # Check if a task is running via the lock file
47
+ if not is_task_running():
48
+ pid = os.getppid() if parent else os.getpid()
49
+ console.print(Info(message=f'Sending SIGTERM to worker {worker_name} with pid {pid}'))
50
+ os.kill(pid, signal.SIGTERM)
51
+ else:
52
+ console.print(Info(message=f'Cancelling worker shutdown of {worker_name} since a task is running'))
53
+
54
+
55
+ def setup_idle_timer(timeout):
56
+ """Setup a timer to kill the worker after being idle"""
57
+ if timeout == -1:
58
+ return
59
+
60
+ console.print(Info(message=f'Starting inactivity timer for {timeout} seconds ...'))
61
+ timer = threading.Timer(timeout, kill_worker)
62
+ timer.daemon = True # Make sure timer is killed when worker exits
63
+ timer.start()
64
+
65
+
66
+ def maybe_override_logging():
67
+ def decorator(func):
68
+ if CONFIG.celery.override_default_logging:
69
+ return signals.setup_logging.connect(func)
70
+ else:
71
+ return func
72
+ return decorator
73
+
74
+
75
+ @maybe_override_logging()
76
+ def setup_logging(*args, **kwargs):
77
+ """Override celery's logging setup to prevent it from altering our settings.
78
+ github.com/celery/celery/issues/1867
79
+ """
80
+ pass
81
+
82
+
83
+ def capture_worker_name(sender, instance, **kwargs):
84
+ os.environ["WORKER_NAME"] = '{0}'.format(sender)
85
+
86
+
87
+ def worker_init_handler(**kwargs):
88
+ if IDLE_TIMEOUT != -1:
89
+ setup_idle_timer(IDLE_TIMEOUT)
90
+
91
+
92
+ def task_prerun_handler(task_id, **kwargs):
93
+ # Mark that a task is running
94
+ set_task_running(task_id)
95
+
96
+
97
+ def task_postrun_handler(**kwargs):
98
+ # Mark that no task is running
99
+ clear_task_running()
100
+
101
+ # Get sender name from kwargs
102
+ sender_name = kwargs['sender'].name
103
+
104
+ if CONFIG.celery.worker_kill_after_task and sender_name.startswith('secator.'):
105
+ worker_name = os.environ.get('WORKER_NAME', 'unknown')
106
+ console.print(Info(message=f'Shutdown worker {worker_name} since config celery.worker_kill_after_task is set.'))
107
+ kill_worker(parent=True)
108
+ return
109
+
110
+ # Set up a new idle timer
111
+ if IDLE_TIMEOUT != -1:
112
+ console.print(Info(message=f'Reset inactivity timer to {IDLE_TIMEOUT} seconds'))
113
+ setup_idle_timer(IDLE_TIMEOUT)
114
+
115
+
116
+ def task_revoked_handler(request=None, **kwargs):
117
+ """Handle revoked tasks by clearing the task running state"""
118
+ console.print(Info(message='Task was revoked, clearing running state'))
119
+ clear_task_running()
120
+
121
+ # Set up a new idle timer
122
+ if IDLE_TIMEOUT != -1:
123
+ console.print(Info(message=f'Reset inactivity timer to {IDLE_TIMEOUT} seconds after task revocation'))
124
+ setup_idle_timer(IDLE_TIMEOUT)
125
+
126
+
127
+ def worker_shutdown_handler(**kwargs):
128
+ """Cleanup lock files when worker shuts down"""
129
+ lock_file = get_lock_file_path()
130
+ if lock_file.exists():
131
+ lock_file.unlink()
132
+
133
+
134
+ def setup_handlers():
135
+ signals.celeryd_after_setup.connect(capture_worker_name)
136
+ signals.setup_logging.connect(setup_logging)
137
+ signals.task_prerun.connect(task_prerun_handler)
138
+ signals.task_postrun.connect(task_postrun_handler)
139
+ signals.task_revoked.connect(task_revoked_handler)
140
+ signals.worker_ready.connect(worker_init_handler)
141
+ signals.worker_shutdown.connect(worker_shutdown_handler)
@@ -264,5 +264,5 @@ class CeleryData(object):
264
264
  CeleryData.get_task_ids(result.parent, ids=ids)
265
265
 
266
266
  except kombu.exceptions.DecodeError:
267
- debug('kombu decode error', sub='celery.data.get_task_ids')
267
+ debug('kombu decode error', sub='celery.data')
268
268
  return
@@ -148,7 +148,7 @@ def worker(hostname, concurrency, reload, queue, pool, check, dev, stop, show):
148
148
  return
149
149
 
150
150
  if not queue:
151
- queue = 'io,cpu,' + ','.join([r['queue'] for r in app.conf.task_routes.values()])
151
+ queue = 'io,cpu,poll,' + ','.join(set([r['queue'] for r in app.conf.task_routes.values()]))
152
152
 
153
153
  app_str = 'secator.celery.app'
154
154
  celery = f'{sys.executable} -m celery'
@@ -171,7 +171,8 @@ def worker(hostname, concurrency, reload, queue, pool, check, dev, stop, show):
171
171
  patterns = "celery.py;tasks/*.py;runners/*.py;serializers/*.py;output_types/*.py;hooks/*.py;exporters/*.py"
172
172
  cmd = f'watchmedo auto-restart --directory=./ --patterns="{patterns}" --recursive -- {cmd}'
173
173
 
174
- Command.execute(cmd, name='secator_worker')
174
+ ret = Command.execute(cmd, name='secator_worker')
175
+ sys.exit(ret.return_code)
175
176
 
176
177
 
177
178
  #-------#
@@ -73,6 +73,8 @@ class Celery(StrictModel):
73
73
  worker_max_tasks_per_child: int = 20
74
74
  worker_prefetch_multiplier: int = 1
75
75
  worker_send_task_events: bool = False
76
+ worker_kill_after_task: bool = False
77
+ worker_kill_after_idle_seconds: int = -1
76
78
 
77
79
 
78
80
  class Cli(StrictModel):
@@ -499,8 +501,8 @@ class Config(DotMap):
499
501
  self.set(path, value, set_partial=False)
500
502
  if not self.validate(print_errors=False) and print_errors:
501
503
  console.print(f'[bold red]{var} (override failed)[/]')
502
- elif print_errors:
503
- console.print(f'[bold red]{var} (override failed: key not found)[/]')
504
+ # elif print_errors:
505
+ # console.print(f'[bold red]{var} (override failed: key not found)[/]')
504
506
 
505
507
 
506
508
  def download_files(data: dict, target_folder: Path, offline_mode: bool, type: str):
@@ -28,6 +28,7 @@ RUNNER_OPTS = {
28
28
  RUNNER_GLOBAL_OPTS = {
29
29
  'sync': {'is_flag': True, 'help': 'Run tasks synchronously (automatic if no worker is alive)'},
30
30
  'worker': {'is_flag': True, 'default': False, 'help': 'Run tasks in worker'},
31
+ 'no_poll': {'is_flag': True, 'default': False, 'help': 'Do not live poll for tasks results when running in worker'},
31
32
  'proxy': {'type': str, 'help': 'HTTP proxy'},
32
33
  'driver': {'type': str, 'help': 'Export real-time results. E.g: "mongodb"'}
33
34
  # 'debug': {'type': int, 'default': 0, 'help': 'Debug mode'},
@@ -20,11 +20,19 @@ MONGODB_MAX_POOL_SIZE = CONFIG.addons.mongodb.max_pool_size
20
20
 
21
21
  logger = logging.getLogger(__name__)
22
22
 
23
- client = pymongo.MongoClient(
24
- escape_mongodb_url(MONGODB_URL),
25
- maxPoolSize=MONGODB_MAX_POOL_SIZE,
26
- serverSelectionTimeoutMS=MONGODB_CONNECT_TIMEOUT
27
- )
23
+ _mongodb_client = None
24
+
25
+
26
+ def get_mongodb_client():
27
+ """Get or create MongoDB client"""
28
+ global _mongodb_client
29
+ if _mongodb_client is None:
30
+ _mongodb_client = pymongo.MongoClient(
31
+ escape_mongodb_url(MONGODB_URL),
32
+ maxPoolSize=MONGODB_MAX_POOL_SIZE,
33
+ serverSelectionTimeoutMS=MONGODB_CONNECT_TIMEOUT
34
+ )
35
+ return _mongodb_client
28
36
 
29
37
 
30
38
  def get_runner_dbg(runner):
@@ -39,6 +47,7 @@ def get_runner_dbg(runner):
39
47
 
40
48
 
41
49
  def update_runner(self):
50
+ client = get_mongodb_client()
42
51
  db = client.main
43
52
  type = self.config.type
44
53
  collection = f'{type}s'
@@ -72,6 +81,7 @@ def update_finding(self, item):
72
81
  if type(item) not in FINDING_TYPES:
73
82
  return item
74
83
  start_time = time.time()
84
+ client = get_mongodb_client()
75
85
  db = client.main
76
86
  update = item.toDict()
77
87
  _type = item._type
@@ -97,15 +107,14 @@ def update_finding(self, item):
97
107
 
98
108
 
99
109
  def find_duplicates(self):
110
+ from secator.celery import IN_CELERY_WORKER_PROCESS
100
111
  ws_id = self.toDict().get('context', {}).get('workspace_id')
101
112
  if not ws_id:
102
113
  return
103
- if self.sync:
104
- debug(f'running duplicate check on workspace {ws_id}', sub='hooks.mongodb')
114
+ if not IN_CELERY_WORKER_PROCESS:
105
115
  tag_duplicates(ws_id)
106
116
  else:
107
- celery_id = tag_duplicates.delay(ws_id)
108
- debug(f'running duplicate check on workspace {ws_id}', id=celery_id, sub='hooks.mongodb')
117
+ tag_duplicates.delay(ws_id)
109
118
 
110
119
 
111
120
  def load_finding(obj):
@@ -132,6 +141,8 @@ def tag_duplicates(ws_id: str = None):
132
141
  Args:
133
142
  ws_id (str): Workspace id.
134
143
  """
144
+ debug(f'running duplicate check on workspace {ws_id}', sub='hooks.mongodb')
145
+ client = get_mongodb_client()
135
146
  db = client.main
136
147
  workspace_query = list(
137
148
  db.findings.find({'_context.workspace_id': str(ws_id), '_tagged': True}).sort('_timestamp', -1))
@@ -172,19 +183,19 @@ def tag_duplicates(ws_id: str = None):
172
183
  'seen dupes': len(seen_dupes)
173
184
  },
174
185
  id=ws_id,
175
- sub='hooks.mongodb.duplicates',
186
+ sub='hooks.mongodb',
176
187
  verbose=True)
177
188
  tmp_duplicates_ids = list(dict.fromkeys([i._uuid for i in tmp_duplicates]))
178
- debug(f'duplicate ids: {tmp_duplicates_ids}', id=ws_id, sub='hooks.mongodb.duplicates', verbose=True)
189
+ debug(f'duplicate ids: {tmp_duplicates_ids}', id=ws_id, sub='hooks.mongodb', verbose=True)
179
190
 
180
191
  # Update latest object as non-duplicate
181
192
  if tmp_duplicates:
182
193
  duplicates.extend([f for f in tmp_duplicates])
183
194
  db.findings.update_one({'_id': ObjectId(item._uuid)}, {'$set': {'_related': tmp_duplicates_ids}})
184
- debug(f'adding {item._uuid} as non-duplicate', id=ws_id, sub='hooks.mongodb.duplicates', verbose=True)
195
+ debug(f'adding {item._uuid} as non-duplicate', id=ws_id, sub='hooks.mongodb', verbose=True)
185
196
  non_duplicates.append(item)
186
197
  else:
187
- debug(f'adding {item._uuid} as non-duplicate', id=ws_id, sub='hooks.mongodb.duplicates', verbose=True)
198
+ debug(f'adding {item._uuid} as non-duplicate', id=ws_id, sub='hooks.mongodb', verbose=True)
188
199
  non_duplicates.append(item)
189
200
 
190
201
  # debug(f'found {len(duplicates)} total duplicates')
@@ -208,7 +219,7 @@ def tag_duplicates(ws_id: str = None):
208
219
  'duplicates': len(duplicates_ids),
209
220
  'non-duplicates': len(non_duplicates_ids)
210
221
  },
211
- sub='hooks.mongodb.duplicates')
222
+ sub='hooks.mongodb')
212
223
 
213
224
 
214
225
  HOOKS = {
@@ -232,6 +243,6 @@ HOOKS = {
232
243
  'on_item': [update_finding],
233
244
  'on_duplicate': [update_finding],
234
245
  'on_interval': [update_runner],
235
- 'on_end': [update_runner, find_duplicates]
246
+ 'on_end': [update_runner]
236
247
  }
237
248
  }
@@ -21,10 +21,13 @@ class Error(OutputType):
21
21
  _sort_by = ('_timestamp',)
22
22
 
23
23
  def from_exception(e, **kwargs):
24
- message = type(e).__name__
24
+ errtype = type(e).__name__
25
+ message = errtype
25
26
  if str(e):
26
27
  message += f': {str(e)}'
27
- return Error(message=message, traceback=traceback_as_string(e), **kwargs)
28
+ traceback = traceback_as_string(e) if errtype not in ['KeyboardInterrupt', 'GreenletExit'] else ''
29
+ error = Error(message=message, traceback=traceback, **kwargs)
30
+ return error
28
31
 
29
32
  def __str__(self):
30
33
  return self.message