secator 0.17.0__tar.gz → 0.19.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of secator might be problematic. Click here for more details.

Files changed (217) hide show
  1. {secator-0.17.0 → secator-0.19.0}/.docker/build_all.sh +13 -1
  2. secator-0.19.0/.dockerignore +4 -0
  3. {secator-0.17.0 → secator-0.19.0}/.gitignore +3 -0
  4. {secator-0.17.0 → secator-0.19.0}/CHANGELOG.md +26 -0
  5. {secator-0.17.0 → secator-0.19.0}/PKG-INFO +1 -1
  6. {secator-0.17.0 → secator-0.19.0}/helm/templates/redis-service.yaml +0 -1
  7. {secator-0.17.0 → secator-0.19.0}/helm/templates/redis.yaml +6 -2
  8. secator-0.19.0/helm/templates/secator-manager.yaml +22 -0
  9. {secator-0.17.0 → secator-0.19.0}/helm/templates/secator-worker.yaml +8 -3
  10. secator-0.19.0/helm/values.yaml +58 -0
  11. {secator-0.17.0 → secator-0.19.0}/pyproject.toml +1 -1
  12. {secator-0.17.0 → secator-0.19.0}/secator/config.py +3 -2
  13. secator-0.19.0/secator/configs/workflows/user_hunt.yaml +24 -0
  14. {secator-0.17.0 → secator-0.19.0}/secator/configs/workflows/wordpress.yaml +13 -0
  15. {secator-0.17.0 → secator-0.19.0}/secator/installer.py +1 -1
  16. {secator-0.17.0 → secator-0.19.0}/secator/output_types/stat.py +8 -3
  17. {secator-0.17.0 → secator-0.19.0}/secator/runners/_base.py +8 -10
  18. {secator-0.17.0 → secator-0.19.0}/secator/runners/command.py +140 -19
  19. {secator-0.17.0 → secator-0.19.0}/secator/tasks/bup.py +1 -1
  20. {secator-0.17.0 → secator-0.19.0}/secator/tasks/fping.py +24 -7
  21. {secator-0.17.0 → secator-0.19.0}/secator/tasks/maigret.py +7 -2
  22. {secator-0.17.0 → secator-0.19.0}/secator/tasks/wpscan.py +15 -4
  23. {secator-0.17.0 → secator-0.19.0}/tests/integration/inputs.py +1 -1
  24. {secator-0.17.0 → secator-0.19.0}/tests/integration/outputs.py +1 -1
  25. secator-0.17.0/.dockerignore +0 -1
  26. secator-0.17.0/helm/templates/secator-manager.yaml +0 -18
  27. secator-0.17.0/helm/values.yaml +0 -34
  28. secator-0.17.0/secator/configs/workflows/user_hunt.yaml +0 -12
  29. {secator-0.17.0 → secator-0.19.0}/.coderabbit.yaml +0 -0
  30. {secator-0.17.0 → secator-0.19.0}/.docker/Dockerfile.alpine +0 -0
  31. {secator-0.17.0 → secator-0.19.0}/.docker/Dockerfile.arch +0 -0
  32. {secator-0.17.0 → secator-0.19.0}/.docker/Dockerfile.debian +0 -0
  33. {secator-0.17.0 → secator-0.19.0}/.docker/Dockerfile.kali +0 -0
  34. {secator-0.17.0 → secator-0.19.0}/.docker/Dockerfile.osx +0 -0
  35. {secator-0.17.0 → secator-0.19.0}/.docker/Dockerfile.ubuntu +0 -0
  36. {secator-0.17.0 → secator-0.19.0}/.flake8 +0 -0
  37. {secator-0.17.0 → secator-0.19.0}/CONTRIBUTING.md +0 -0
  38. {secator-0.17.0 → secator-0.19.0}/Dockerfile +0 -0
  39. {secator-0.17.0 → secator-0.19.0}/LICENSE +0 -0
  40. {secator-0.17.0 → secator-0.19.0}/README.md +0 -0
  41. {secator-0.17.0 → secator-0.19.0}/SECURITY.md +0 -0
  42. {secator-0.17.0 → secator-0.19.0}/cloudbuild.yaml +0 -0
  43. {secator-0.17.0 → secator-0.19.0}/helm/.helmignore +0 -0
  44. {secator-0.17.0 → secator-0.19.0}/helm/Chart.yaml +0 -0
  45. {secator-0.17.0 → secator-0.19.0}/scripts/download_cves.sh +0 -0
  46. {secator-0.17.0 → secator-0.19.0}/scripts/generate_tools_md_table.py +0 -0
  47. {secator-0.17.0 → secator-0.19.0}/scripts/install.sh +0 -0
  48. {secator-0.17.0 → secator-0.19.0}/scripts/install_asciinema.sh +0 -0
  49. {secator-0.17.0 → secator-0.19.0}/scripts/install_go.sh +0 -0
  50. {secator-0.17.0 → secator-0.19.0}/scripts/install_ruby.sh +0 -0
  51. {secator-0.17.0 → secator-0.19.0}/scripts/msf/exploit_cve.rc +0 -0
  52. {secator-0.17.0 → secator-0.19.0}/scripts/msf/ftp_anonymous.rc +0 -0
  53. {secator-0.17.0 → secator-0.19.0}/scripts/msf/ftp_version.rc +0 -0
  54. {secator-0.17.0 → secator-0.19.0}/scripts/msf/ftp_vsftpd_234_backdoor.rc +0 -0
  55. {secator-0.17.0 → secator-0.19.0}/scripts/msf/redis.rc +0 -0
  56. {secator-0.17.0 → secator-0.19.0}/scripts/stories/STORY.md +0 -0
  57. {secator-0.17.0 → secator-0.19.0}/scripts/stories/aliases.sh +0 -0
  58. {secator-0.17.0 → secator-0.19.0}/scripts/stories/demo.sh +0 -0
  59. {secator-0.17.0 → secator-0.19.0}/scripts/stories/fmt.sh +0 -0
  60. {secator-0.17.0 → secator-0.19.0}/scripts/stories/input.sh +0 -0
  61. {secator-0.17.0 → secator-0.19.0}/scripts/stories/pipe.sh +0 -0
  62. {secator-0.17.0 → secator-0.19.0}/scripts/stories/short_demo.sh +0 -0
  63. {secator-0.17.0 → secator-0.19.0}/scripts/update_tools.sh +0 -0
  64. {secator-0.17.0 → secator-0.19.0}/secator/.gitignore +0 -0
  65. {secator-0.17.0 → secator-0.19.0}/secator/__init__.py +0 -0
  66. {secator-0.17.0 → secator-0.19.0}/secator/celery.py +0 -0
  67. {secator-0.17.0 → secator-0.19.0}/secator/celery_signals.py +0 -0
  68. {secator-0.17.0 → secator-0.19.0}/secator/celery_utils.py +0 -0
  69. {secator-0.17.0 → secator-0.19.0}/secator/cli.py +0 -0
  70. {secator-0.17.0 → secator-0.19.0}/secator/cli_helper.py +0 -0
  71. {secator-0.17.0 → secator-0.19.0}/secator/click.py +0 -0
  72. {secator-0.17.0 → secator-0.19.0}/secator/configs/__init__.py +0 -0
  73. {secator-0.17.0 → secator-0.19.0}/secator/configs/profiles/__init__.py +0 -0
  74. {secator-0.17.0 → secator-0.19.0}/secator/configs/profiles/aggressive.yaml +0 -0
  75. {secator-0.17.0 → secator-0.19.0}/secator/configs/profiles/http_headless.yaml +0 -0
  76. {secator-0.17.0 → secator-0.19.0}/secator/configs/profiles/http_record.yaml +0 -0
  77. {secator-0.17.0 → secator-0.19.0}/secator/configs/profiles/insane.yaml +0 -0
  78. {secator-0.17.0 → secator-0.19.0}/secator/configs/profiles/paranoid.yaml +0 -0
  79. {secator-0.17.0 → secator-0.19.0}/secator/configs/profiles/polite.yaml +0 -0
  80. {secator-0.17.0 → secator-0.19.0}/secator/configs/profiles/sneaky.yaml +0 -0
  81. {secator-0.17.0 → secator-0.19.0}/secator/configs/profiles/tor.yaml +0 -0
  82. {secator-0.17.0 → secator-0.19.0}/secator/configs/scans/__init__.py +0 -0
  83. {secator-0.17.0 → secator-0.19.0}/secator/configs/scans/domain.yaml +0 -0
  84. {secator-0.17.0 → secator-0.19.0}/secator/configs/scans/host.yaml +0 -0
  85. {secator-0.17.0 → secator-0.19.0}/secator/configs/scans/network.yaml +0 -0
  86. {secator-0.17.0 → secator-0.19.0}/secator/configs/scans/subdomain.yaml +0 -0
  87. {secator-0.17.0 → secator-0.19.0}/secator/configs/scans/url.yaml +0 -0
  88. {secator-0.17.0 → secator-0.19.0}/secator/configs/workflows/__init__.py +0 -0
  89. {secator-0.17.0 → secator-0.19.0}/secator/configs/workflows/cidr_recon.yaml +0 -0
  90. {secator-0.17.0 → secator-0.19.0}/secator/configs/workflows/code_scan.yaml +0 -0
  91. {secator-0.17.0 → secator-0.19.0}/secator/configs/workflows/host_recon.yaml +0 -0
  92. {secator-0.17.0 → secator-0.19.0}/secator/configs/workflows/subdomain_recon.yaml +0 -0
  93. {secator-0.17.0 → secator-0.19.0}/secator/configs/workflows/url_bypass.yaml +0 -0
  94. {secator-0.17.0 → secator-0.19.0}/secator/configs/workflows/url_crawl.yaml +0 -0
  95. {secator-0.17.0 → secator-0.19.0}/secator/configs/workflows/url_dirsearch.yaml +0 -0
  96. {secator-0.17.0 → secator-0.19.0}/secator/configs/workflows/url_fuzz.yaml +0 -0
  97. {secator-0.17.0 → secator-0.19.0}/secator/configs/workflows/url_params_fuzz.yaml +0 -0
  98. {secator-0.17.0 → secator-0.19.0}/secator/configs/workflows/url_vuln.yaml +0 -0
  99. {secator-0.17.0 → secator-0.19.0}/secator/cve.py +0 -0
  100. {secator-0.17.0 → secator-0.19.0}/secator/decorators.py +0 -0
  101. {secator-0.17.0 → secator-0.19.0}/secator/definitions.py +0 -0
  102. {secator-0.17.0 → secator-0.19.0}/secator/exporters/__init__.py +0 -0
  103. {secator-0.17.0 → secator-0.19.0}/secator/exporters/_base.py +0 -0
  104. {secator-0.17.0 → secator-0.19.0}/secator/exporters/console.py +0 -0
  105. {secator-0.17.0 → secator-0.19.0}/secator/exporters/csv.py +0 -0
  106. {secator-0.17.0 → secator-0.19.0}/secator/exporters/gdrive.py +0 -0
  107. {secator-0.17.0 → secator-0.19.0}/secator/exporters/json.py +0 -0
  108. {secator-0.17.0 → secator-0.19.0}/secator/exporters/table.py +0 -0
  109. {secator-0.17.0 → secator-0.19.0}/secator/exporters/txt.py +0 -0
  110. {secator-0.17.0 → secator-0.19.0}/secator/hooks/__init__.py +0 -0
  111. {secator-0.17.0 → secator-0.19.0}/secator/hooks/gcs.py +0 -0
  112. {secator-0.17.0 → secator-0.19.0}/secator/hooks/mongodb.py +0 -0
  113. {secator-0.17.0 → secator-0.19.0}/secator/loader.py +0 -0
  114. {secator-0.17.0 → secator-0.19.0}/secator/output_types/__init__.py +0 -0
  115. {secator-0.17.0 → secator-0.19.0}/secator/output_types/_base.py +0 -0
  116. {secator-0.17.0 → secator-0.19.0}/secator/output_types/certificate.py +0 -0
  117. {secator-0.17.0 → secator-0.19.0}/secator/output_types/error.py +0 -0
  118. {secator-0.17.0 → secator-0.19.0}/secator/output_types/exploit.py +0 -0
  119. {secator-0.17.0 → secator-0.19.0}/secator/output_types/info.py +0 -0
  120. {secator-0.17.0 → secator-0.19.0}/secator/output_types/ip.py +0 -0
  121. {secator-0.17.0 → secator-0.19.0}/secator/output_types/port.py +0 -0
  122. {secator-0.17.0 → secator-0.19.0}/secator/output_types/progress.py +0 -0
  123. {secator-0.17.0 → secator-0.19.0}/secator/output_types/record.py +0 -0
  124. {secator-0.17.0 → secator-0.19.0}/secator/output_types/state.py +0 -0
  125. {secator-0.17.0 → secator-0.19.0}/secator/output_types/subdomain.py +0 -0
  126. {secator-0.17.0 → secator-0.19.0}/secator/output_types/tag.py +0 -0
  127. {secator-0.17.0 → secator-0.19.0}/secator/output_types/target.py +0 -0
  128. {secator-0.17.0 → secator-0.19.0}/secator/output_types/url.py +0 -0
  129. {secator-0.17.0 → secator-0.19.0}/secator/output_types/user_account.py +0 -0
  130. {secator-0.17.0 → secator-0.19.0}/secator/output_types/vulnerability.py +0 -0
  131. {secator-0.17.0 → secator-0.19.0}/secator/output_types/warning.py +0 -0
  132. {secator-0.17.0 → secator-0.19.0}/secator/report.py +0 -0
  133. {secator-0.17.0 → secator-0.19.0}/secator/rich.py +0 -0
  134. {secator-0.17.0 → secator-0.19.0}/secator/runners/__init__.py +0 -0
  135. {secator-0.17.0 → secator-0.19.0}/secator/runners/_helpers.py +0 -0
  136. {secator-0.17.0 → secator-0.19.0}/secator/runners/celery.py +0 -0
  137. {secator-0.17.0 → secator-0.19.0}/secator/runners/scan.py +0 -0
  138. {secator-0.17.0 → secator-0.19.0}/secator/runners/task.py +0 -0
  139. {secator-0.17.0 → secator-0.19.0}/secator/runners/workflow.py +0 -0
  140. {secator-0.17.0 → secator-0.19.0}/secator/scans/__init__.py +0 -0
  141. {secator-0.17.0 → secator-0.19.0}/secator/serializers/__init__.py +0 -0
  142. {secator-0.17.0 → secator-0.19.0}/secator/serializers/dataclass.py +0 -0
  143. {secator-0.17.0 → secator-0.19.0}/secator/serializers/json.py +0 -0
  144. {secator-0.17.0 → secator-0.19.0}/secator/serializers/regex.py +0 -0
  145. {secator-0.17.0 → secator-0.19.0}/secator/tasks/__init__.py +0 -0
  146. {secator-0.17.0 → secator-0.19.0}/secator/tasks/_categories.py +0 -0
  147. {secator-0.17.0 → secator-0.19.0}/secator/tasks/arjun.py +0 -0
  148. {secator-0.17.0 → secator-0.19.0}/secator/tasks/bbot.py +0 -0
  149. {secator-0.17.0 → secator-0.19.0}/secator/tasks/cariddi.py +0 -0
  150. {secator-0.17.0 → secator-0.19.0}/secator/tasks/dalfox.py +0 -0
  151. {secator-0.17.0 → secator-0.19.0}/secator/tasks/dirsearch.py +0 -0
  152. {secator-0.17.0 → secator-0.19.0}/secator/tasks/dnsx.py +0 -0
  153. {secator-0.17.0 → secator-0.19.0}/secator/tasks/feroxbuster.py +0 -0
  154. {secator-0.17.0 → secator-0.19.0}/secator/tasks/ffuf.py +0 -0
  155. {secator-0.17.0 → secator-0.19.0}/secator/tasks/gau.py +0 -0
  156. {secator-0.17.0 → secator-0.19.0}/secator/tasks/gf.py +0 -0
  157. {secator-0.17.0 → secator-0.19.0}/secator/tasks/gitleaks.py +0 -0
  158. {secator-0.17.0 → secator-0.19.0}/secator/tasks/gospider.py +0 -0
  159. {secator-0.17.0 → secator-0.19.0}/secator/tasks/grype.py +0 -0
  160. {secator-0.17.0 → secator-0.19.0}/secator/tasks/h8mail.py +0 -0
  161. {secator-0.17.0 → secator-0.19.0}/secator/tasks/httpx.py +0 -0
  162. {secator-0.17.0 → secator-0.19.0}/secator/tasks/katana.py +0 -0
  163. {secator-0.17.0 → secator-0.19.0}/secator/tasks/mapcidr.py +0 -0
  164. {secator-0.17.0 → secator-0.19.0}/secator/tasks/msfconsole.py +0 -0
  165. {secator-0.17.0 → secator-0.19.0}/secator/tasks/naabu.py +0 -0
  166. {secator-0.17.0 → secator-0.19.0}/secator/tasks/nmap.py +0 -0
  167. {secator-0.17.0 → secator-0.19.0}/secator/tasks/nuclei.py +0 -0
  168. {secator-0.17.0 → secator-0.19.0}/secator/tasks/searchsploit.py +0 -0
  169. {secator-0.17.0 → secator-0.19.0}/secator/tasks/subfinder.py +0 -0
  170. {secator-0.17.0 → secator-0.19.0}/secator/tasks/testssl.py +0 -0
  171. {secator-0.17.0 → secator-0.19.0}/secator/tasks/trivy.py +0 -0
  172. {secator-0.17.0 → secator-0.19.0}/secator/tasks/wafw00f.py +0 -0
  173. {secator-0.17.0 → secator-0.19.0}/secator/tasks/wpprobe.py +0 -0
  174. {secator-0.17.0 → secator-0.19.0}/secator/template.py +0 -0
  175. {secator-0.17.0 → secator-0.19.0}/secator/thread.py +0 -0
  176. {secator-0.17.0 → secator-0.19.0}/secator/tree.py +0 -0
  177. {secator-0.17.0 → secator-0.19.0}/secator/utils.py +0 -0
  178. {secator-0.17.0 → secator-0.19.0}/secator/utils_test.py +0 -0
  179. {secator-0.17.0 → secator-0.19.0}/secator/workflows/__init__.py +0 -0
  180. {secator-0.17.0 → secator-0.19.0}/tests/__init__.py +0 -0
  181. {secator-0.17.0 → secator-0.19.0}/tests/fixtures/h8mail_breach.txt +0 -0
  182. {secator-0.17.0 → secator-0.19.0}/tests/fixtures/ls.py +0 -0
  183. {secator-0.17.0 → secator-0.19.0}/tests/fixtures/msfconsole_input.rc +0 -0
  184. {secator-0.17.0 → secator-0.19.0}/tests/fixtures/nmap_output.xml +0 -0
  185. {secator-0.17.0 → secator-0.19.0}/tests/integration/__init__.py +0 -0
  186. {secator-0.17.0 → secator-0.19.0}/tests/integration/all.yaml +0 -0
  187. {secator-0.17.0 → secator-0.19.0}/tests/integration/setup.sh +0 -0
  188. {secator-0.17.0 → secator-0.19.0}/tests/integration/teardown.sh +0 -0
  189. {secator-0.17.0 → secator-0.19.0}/tests/integration/test_addons.py +0 -0
  190. {secator-0.17.0 → secator-0.19.0}/tests/integration/test_celery.py +0 -0
  191. {secator-0.17.0 → secator-0.19.0}/tests/integration/test_scans.py +0 -0
  192. {secator-0.17.0 → secator-0.19.0}/tests/integration/test_tasks.py +0 -0
  193. {secator-0.17.0 → secator-0.19.0}/tests/integration/test_tasks_categories.py +0 -0
  194. {secator-0.17.0 → secator-0.19.0}/tests/integration/test_worker.py +0 -0
  195. {secator-0.17.0 → secator-0.19.0}/tests/integration/test_workflows.py +0 -0
  196. {secator-0.17.0 → secator-0.19.0}/tests/integration/wordlist.txt +0 -0
  197. {secator-0.17.0 → secator-0.19.0}/tests/integration/wordlist_dns.txt +0 -0
  198. {secator-0.17.0 → secator-0.19.0}/tests/integration/wordpress_toolbox/Dockerfile +0 -0
  199. {secator-0.17.0 → secator-0.19.0}/tests/integration/wordpress_toolbox/Makefile +0 -0
  200. {secator-0.17.0 → secator-0.19.0}/tests/performance/__init__.py +0 -0
  201. {secator-0.17.0 → secator-0.19.0}/tests/performance/loadtester.py +0 -0
  202. {secator-0.17.0 → secator-0.19.0}/tests/performance/test_worker.py +0 -0
  203. {secator-0.17.0 → secator-0.19.0}/tests/template/test_templates.py +0 -0
  204. {secator-0.17.0 → secator-0.19.0}/tests/unit/__init__.py +0 -0
  205. {secator-0.17.0 → secator-0.19.0}/tests/unit/test_celery.py +0 -0
  206. {secator-0.17.0 → secator-0.19.0}/tests/unit/test_cli.py +0 -0
  207. {secator-0.17.0 → secator-0.19.0}/tests/unit/test_command.py +0 -0
  208. {secator-0.17.0 → secator-0.19.0}/tests/unit/test_config.py +0 -0
  209. {secator-0.17.0 → secator-0.19.0}/tests/unit/test_offline.py +0 -0
  210. {secator-0.17.0 → secator-0.19.0}/tests/unit/test_runners.py +0 -0
  211. {secator-0.17.0 → secator-0.19.0}/tests/unit/test_runners_helpers.py +0 -0
  212. {secator-0.17.0 → secator-0.19.0}/tests/unit/test_scans.py +0 -0
  213. {secator-0.17.0 → secator-0.19.0}/tests/unit/test_serializers.py +0 -0
  214. {secator-0.17.0 → secator-0.19.0}/tests/unit/test_tasks.py +0 -0
  215. {secator-0.17.0 → secator-0.19.0}/tests/unit/test_tasks_categories.py +0 -0
  216. {secator-0.17.0 → secator-0.19.0}/tests/unit/test_template.py +0 -0
  217. {secator-0.17.0 → secator-0.19.0}/tests/unit/test_utils.py +0 -0
@@ -2,16 +2,28 @@
2
2
 
3
3
  # Define an array of distributions
4
4
  DISTROS=("alpine" "arch" "debian" "kali" "osx" "ubuntu")
5
+ BUILDER=$(which docker || which podman || which buildah)
6
+
7
+ if [ -z "$BUILDER" ]; then
8
+ echo "Error: No container builder found (docker, podman, or buildah required)"
9
+ exit 1
10
+ fi
11
+
12
+ echo "Using builder: $BUILDER"
13
+
14
+ mkdir -p .docker/logs/
5
15
 
6
16
  # Function to build an image
7
17
  build_image() {
8
18
  local DISTRO=$1
9
19
  local DOCKERFILE=".docker/Dockerfile.${DISTRO}"
20
+ local STDOUT_LOG=".docker/logs/${DISTRO}.stdout"
21
+ local STDERR_LOG=".docker/logs/${DISTRO}.stderr"
10
22
  local IMAGE_NAME="secator-${DISTRO}"
11
23
 
12
24
  if [ -f "$DOCKERFILE" ]; then
13
25
  echo "🚀 Building $IMAGE_NAME using $DOCKERFILE..."
14
- docker build -t "$IMAGE_NAME" -f "$DOCKERFILE" . && \
26
+ $BUILDER build -t "$IMAGE_NAME" -f "$DOCKERFILE" . > $STDOUT_LOG 2> $STDERR_LOG && \
15
27
  echo "✅ Successfully built $IMAGE_NAME" || \
16
28
  echo "❌ Failed to build $IMAGE_NAME"
17
29
  else
@@ -0,0 +1,4 @@
1
+ .docker/logs
2
+ .git
3
+ .github
4
+ .gitignore
@@ -1,3 +1,6 @@
1
+ # Build logs
2
+ .docker/logs
3
+
1
4
  # Byte-compiled / optimized / DLL files
2
5
  __pycache__/
3
6
  *.py[cod]
@@ -1,5 +1,31 @@
1
1
  # Changelog
2
2
 
3
+ ## [0.19.0](https://github.com/freelabz/secator/compare/v0.18.0...v0.19.0) (2025-10-23)
4
+
5
+
6
+ ### Features
7
+
8
+ * add monitor thread ([#727](https://github.com/freelabz/secator/issues/727)) ([5377e77](https://github.com/freelabz/secator/commit/5377e77186c88b1331b7ddc4a5304a610ad3e253))
9
+
10
+ ## [0.18.0](https://github.com/freelabz/secator/compare/v0.17.0...v0.18.0) (2025-10-22)
11
+
12
+
13
+ ### Features
14
+
15
+ * **workflow:** update user hunt ([#688](https://github.com/freelabz/secator/issues/688)) ([e4f3e29](https://github.com/freelabz/secator/commit/e4f3e294c1668c66ed893305b0147df37cb4453a))
16
+
17
+
18
+ ### Bug Fixes
19
+
20
+ * broken sudo prompt because of os.setsid ([#722](https://github.com/freelabz/secator/issues/722)) ([5c77957](https://github.com/freelabz/secator/commit/5c77957cb78533b7e819c01c19de6e46eae0d2af))
21
+ * bugs and warnings in task data ([#714](https://github.com/freelabz/secator/issues/714)) ([845415b](https://github.com/freelabz/secator/commit/845415bb337f77867d49be66bbd23eaf307e5848))
22
+ * bup file flag ([#725](https://github.com/freelabz/secator/issues/725)) ([0622ef5](https://github.com/freelabz/secator/commit/0622ef509b1dc33996dd5e89aea5cd802160744d))
23
+ * context propagation for subtasks ([#720](https://github.com/freelabz/secator/issues/720)) ([d636aa6](https://github.com/freelabz/secator/commit/d636aa66562fd030299fce20f3abaeca283eaf74))
24
+ * **fping:** remove bad options, add new ones ([#713](https://github.com/freelabz/secator/issues/713)) ([4356b4f](https://github.com/freelabz/secator/commit/4356b4f120043e2f94973a9c0061345a06efaeda))
25
+ * maigret version pin ([#721](https://github.com/freelabz/secator/issues/721)) ([488b5a1](https://github.com/freelabz/secator/commit/488b5a1b994e6d3dfadc409d09aaaeb93ff60fb4))
26
+ * subfinder integration tests ([#724](https://github.com/freelabz/secator/issues/724)) ([8163152](https://github.com/freelabz/secator/commit/8163152efb09f3854ec9fa7125b16084eda4c98f))
27
+ * wpscan errors and wordpress workflow ([#723](https://github.com/freelabz/secator/issues/723)) ([8ee32aa](https://github.com/freelabz/secator/commit/8ee32aacb1310cfb833bdcf696477ae29eb271c7))
28
+
3
29
  ## [0.17.0](https://github.com/freelabz/secator/compare/v0.16.5...v0.17.0) (2025-09-06)
4
30
 
5
31
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: secator
3
- Version: 0.17.0
3
+ Version: 0.19.0
4
4
  Summary: The pentester's swiss knife.
5
5
  Project-URL: Homepage, https://github.com/freelabz/secator
6
6
  Project-URL: Issues, https://github.com/freelabz/secator/issues
@@ -2,7 +2,6 @@ apiVersion: v1
2
2
  kind: Service
3
3
  metadata:
4
4
  name: {{ .Values.redis.name }}
5
- namespace: {{ .Values.namespace }}
6
5
  spec:
7
6
  type: ClusterIP
8
7
  ports:
@@ -2,7 +2,6 @@ apiVersion: apps/v1
2
2
  kind: StatefulSet
3
3
  metadata:
4
4
  name: {{ .Values.redis.name }}
5
- namespace: {{ .Values.namespace }}
6
5
  spec:
7
6
  selector:
8
7
  matchLabels:
@@ -13,10 +12,15 @@ spec:
13
12
  metadata:
14
13
  labels:
15
14
  app: redis
16
- spec:
15
+ spec:
16
+ automountServiceAccountToken: {{ .Values.automountServiceAccountToken }}
17
17
  containers:
18
18
  - name: {{ .Values.redis.name }}
19
19
  image: {{ .Values.redis.image }}
20
+ securityContext:
21
+ {{- toYaml .Values.redis.securityContext | nindent 10 }}
22
+ resources:
23
+ {{- toYaml .Values.redis.resources | nindent 10 }}
20
24
  ports:
21
25
  - containerPort: {{ .Values.redis.port }}
22
26
  name: client
@@ -0,0 +1,22 @@
1
+ apiVersion: v1
2
+ kind: Pod
3
+ metadata:
4
+ name: {{ .Values.secatorManager.name }}
5
+ labels:
6
+ name: secator-manager
7
+ spec:
8
+ automountServiceAccountToken: {{ .Values.automountServiceAccountToken }}
9
+ containers:
10
+ - name: secator-manager
11
+ image: {{ .Values.secatorManager.image }}
12
+ securityContext:
13
+ {{- toYaml .Values.secatorManager.securityContext | nindent 6 }}
14
+ resources:
15
+ {{- toYaml .Values.secatorManager.resources | nindent 6 }}
16
+ command: ["sleep"]
17
+ args: ["infinity"]
18
+ env:
19
+ - name: SECATOR_CELERY_BROKER_URL
20
+ value: "redis://{{ .Values.redis.name }}:{{ .Values.redis.port }}/0"
21
+ - name: SECATOR_CELERY_RESULT_BACKEND
22
+ value: "redis://{{ .Values.redis.name }}:{{ .Values.redis.port }}/0"
@@ -2,8 +2,8 @@ apiVersion: apps/v1
2
2
  kind: Deployment
3
3
  metadata:
4
4
  name: {{ .Values.secatorWorker.name }}
5
- namespace: {{ .Values.namespace }}
6
5
  spec:
6
+ replicas: {{ .Values.secatorWorker.replicas }}
7
7
  selector:
8
8
  matchLabels:
9
9
  app: secator-worker
@@ -12,13 +12,18 @@ spec:
12
12
  labels:
13
13
  app: secator-worker
14
14
  spec:
15
+ automountServiceAccountToken: {{ .Values.automountServiceAccountToken }}
15
16
  containers:
16
17
  - name: {{ .Values.secatorWorker.name }}
17
18
  image: {{ .Values.secatorWorker.image }}
19
+ securityContext:
20
+ {{- toYaml .Values.secatorWorker.securityContext | nindent 10 }}
21
+ resources:
22
+ {{- toYaml .Values.secatorWorker.resources | nindent 10 }}
18
23
  command: ["secator"]
19
24
  args: ["worker"]
20
25
  env:
21
26
  - name: SECATOR_CELERY_BROKER_URL
22
- value: "redis://{{ .Values.redis.name }}:6379/0"
27
+ value: "redis://{{ .Values.redis.name }}:{{ .Values.redis.port }}/0"
23
28
  - name: SECATOR_CELERY_RESULT_BACKEND
24
- value: "redis://{{ .Values.redis.name }}:6379/0"
29
+ value: "redis://{{ .Values.redis.name }}:{{ .Values.redis.port }}/0"
@@ -0,0 +1,58 @@
1
+ # Default values for Secator
2
+ # This is a YAML-formatted file.
3
+ # Declare variables to be passed into your templates.
4
+
5
+ # TODO: Create service accounts instead. Assess for RBAC. Add tooling for kubernetes to image, or side-car.
6
+ # For instance, the secator Manager pod could have sufficient access to list/get/set worker replicas for scaling from
7
+ # from the pod itself. Perhaps another add-on?
8
+ automountServiceAccountToken: false
9
+
10
+ secatorManager:
11
+ name: secator-manager
12
+ image: "freelabz/secator"
13
+ securityContext: {}
14
+
15
+ # resources: cpu/memory requests and limits for managers
16
+ resources: {}
17
+
18
+ # Empty if using default repository
19
+ repository:
20
+ # Empty if using tag "latest"
21
+ tag:
22
+
23
+ secatorWorker:
24
+ name: secator-worker
25
+ image: "freelabz/secator"
26
+ securityContext: {}
27
+
28
+ # resources: cpu/memory requests and limits for workers
29
+ resources: {}
30
+ replicas: 1
31
+
32
+ # Empty if using default repository
33
+ repository:
34
+ # Empty if using tag "latest"
35
+ tag:
36
+
37
+ redis:
38
+ name: redis
39
+ image: "redis:alpine"
40
+ # Run redis with least privilege.
41
+ securityContext:
42
+ runAsUser: 999
43
+ runAsGroup: 1000
44
+ runAsNonRoot: true
45
+ allowPrivilegeEscalation: false
46
+ readOnlyRootFilesystem: true
47
+ capabilities:
48
+ drop:
49
+ - ALL
50
+ # resources: cpu/memory requests and limits for redis
51
+ resources: {}
52
+
53
+ # Empty if using default repository
54
+ repository:
55
+ # Empty if using tag "latest"
56
+ tag:
57
+ replicas: 1
58
+ port: 6379
@@ -4,7 +4,7 @@ build-backend = 'hatchling.build'
4
4
 
5
5
  [project]
6
6
  name = 'secator'
7
- version = "0.17.0"
7
+ version = "0.19.0"
8
8
  authors = [{ name = 'FreeLabz', email = 'sales@freelabz.com' }]
9
9
  readme = 'README.md'
10
10
  description = "The pentester's swiss knife."
@@ -67,6 +67,8 @@ class Celery(StrictModel):
67
67
  task_acks_late: bool = False
68
68
  task_send_sent_event: bool = False
69
69
  task_reject_on_worker_lost: bool = False
70
+ task_max_timeout: int = -1
71
+ task_memory_limit_mb: int = -1
70
72
  worker_max_tasks_per_child: int = 20
71
73
  worker_prefetch_multiplier: int = 1
72
74
  worker_send_task_events: bool = False
@@ -100,7 +102,6 @@ class Security(StrictModel):
100
102
  allow_local_file_access: bool = True
101
103
  auto_install_commands: bool = True
102
104
  force_source_install: bool = False
103
- memory_limit_mb: int = -1
104
105
 
105
106
 
106
107
  class HTTP(StrictModel):
@@ -145,7 +146,7 @@ class Wordlists(StrictModel):
145
146
  templates: Dict[str, str] = {
146
147
  'bo0m_fuzz': 'https://raw.githubusercontent.com/Bo0oM/fuzz.txt/master/fuzz.txt',
147
148
  'combined_subdomains': 'https://raw.githubusercontent.com/danielmiessler/SecLists/master/Discovery/DNS/combined_subdomains.txt', # noqa: E501
148
- 'directory_list_small': 'https://raw.githubusercontent.com/danielmiessler/SecLists/refs/heads/master/Discovery/Web-Content/directory-list-2.3-small.txt', # noqa: E501
149
+ 'directory_list_small': 'https://gist.githubusercontent.com/sl4v/c087e36164e74233514b/raw/c51a811c70bbdd87f4725521420cc30e7232b36d/directory-list-2.3-small.txt', # noqa: E501
149
150
  }
150
151
  lists: Dict[str, List[str]] = {}
151
152
 
@@ -0,0 +1,24 @@
1
+ type: workflow
2
+ name: user_hunt
3
+ alias: userhunt
4
+ description: User account search
5
+ tags: [user_account]
6
+ input_types:
7
+ - slug
8
+ - string
9
+ - email
10
+
11
+ tasks:
12
+ _group/hunt_users:
13
+ maigret:
14
+ description: Hunt user accounts
15
+ targets_:
16
+ - type: target
17
+ field: name
18
+ condition: target.type != 'email'
19
+ h8mail:
20
+ description: Find password leaks
21
+ targets_:
22
+ - type: target
23
+ field: name
24
+ condition: target.type == 'email'
@@ -5,15 +5,28 @@ description: Wordpress vulnerability scan
5
5
  tags: [http, wordpress, vulnerability]
6
6
  input_types:
7
7
  - url
8
+ - ip
9
+ - host
10
+ - host:port
8
11
 
9
12
  tasks:
13
+ httpx:
14
+ description: URL probe
15
+ tech_detect: True
16
+
10
17
  _group/hunt_wordpress:
11
18
  nuclei:
12
19
  description: Nuclei Wordpress scan
13
20
  tags: [wordpress]
21
+ targets_:
22
+ - url.url
14
23
 
15
24
  wpscan:
16
25
  description: WPScan
26
+ targets_:
27
+ - url.url
17
28
 
18
29
  wpprobe:
19
30
  description: WPProbe
31
+ targets_:
32
+ - url.url
@@ -150,7 +150,7 @@ class PackageInstaller:
150
150
 
151
151
  # Installer cmd
152
152
  cmd = distribution.pm_installer
153
- if CONFIG.security.autoinstall_commands and IN_CELERY_WORKER_PROCESS:
153
+ if CONFIG.security.auto_install_commands and IN_CELERY_WORKER_PROCESS:
154
154
  cmd = f'flock /tmp/install.lock {cmd}'
155
155
  if getpass.getuser() != 'root':
156
156
  cmd = f'sudo {cmd}'
@@ -11,6 +11,7 @@ class Stat(OutputType):
11
11
  pid: int
12
12
  cpu: int
13
13
  memory: int
14
+ memory_limit: int
14
15
  net_conns: int = field(default=None, repr=True)
15
16
  extra_data: dict = field(default_factory=dict)
16
17
  _source: str = field(default='', repr=True, compare=False)
@@ -26,11 +27,15 @@ class Stat(OutputType):
26
27
  _sort_by = ('name', 'pid')
27
28
 
28
29
  def __str__(self) -> str:
29
- return f'{self.name} [pid={self.pid}] [cpu={self.cpu:.2f}%] [memory={self.memory:.2f}%]'
30
+ return f'{self.name} ([bold]pid[/]:{self.pid}) ([bold]cpu[/]:{self.cpu:.2f}%) ([bold]memory[/]:{self.memory:.2f}MB / {self.memory_limit}MB)' # noqa: E501
30
31
 
31
32
  def __repr__(self) -> str:
32
- s = rf'[dim yellow3]📊 {self.name} \[pid={self.pid}] \[cpu={self.cpu:.2f}%] \[memory={self.memory:.2f}%]'
33
+ s = rf'[dim yellow3]📊 {self.name} ([bold]pid[/]:{self.pid}) ([bold]cpu[/]:{self.cpu:.2f}%)'
34
+ s += rf' ([bold]memory[/]:{self.memory:.2f}MB'
35
+ if self.memory_limit != -1:
36
+ s += rf' / {self.memory_limit}MB'
37
+ s += ')'
33
38
  if self.net_conns:
34
- s += rf' \[connections={self.net_conns}]'
39
+ s += rf' ([bold]connections[/]:{self.net_conns})'
35
40
  s += ' [/]'
36
41
  return rich_to_ansi(s)
@@ -179,14 +179,14 @@ class Runner:
179
179
  # Add prior results to runner results
180
180
  self.debug(f'adding {len(results)} prior results to runner', sub='init')
181
181
  if CONFIG.addons.mongodb.enabled:
182
- self.debug('adding prior results from MongoDB', sub='init')
182
+ self.debug(f'loading {len(results)} results from MongoDB', sub='init')
183
183
  from secator.hooks.mongodb import get_results
184
184
  results = get_results(results)
185
185
  for result in results:
186
186
  self.add_result(result, print=False, output=False, hooks=False, queue=not self.has_parent)
187
187
 
188
188
  # Determine inputs
189
- self.debug(f'resolving inputs with dynamic opts ({len(self.dynamic_opts)})', obj=self.dynamic_opts, sub='init')
189
+ self.debug(f'resolving inputs with {len(self.dynamic_opts)} dynamic opts', obj=self.dynamic_opts, sub='init')
190
190
  self.inputs = [inputs] if not isinstance(inputs, list) else inputs
191
191
  self.inputs = list(set(self.inputs))
192
192
  targets = [Target(name=target) for target in self.inputs]
@@ -463,12 +463,11 @@ class Runner:
463
463
  if item._uuid and item._uuid in self.uuids:
464
464
  return
465
465
 
466
- # Keep existing ancestor id in context
467
- ancestor_id = item._context.get('ancestor_id', None)
468
-
469
- # Set context
470
- item._context.update(self.context)
471
- item._context['ancestor_id'] = ancestor_id or self.ancestor_id
466
+ # Update context with runner info
467
+ ctx = item._context.copy()
468
+ item._context = self.context.copy()
469
+ item._context.update(ctx)
470
+ item._context['ancestor_id'] = ctx.get('ancestor_id') or self.ancestor_id
472
471
 
473
472
  # Set uuid
474
473
  if not item._uuid:
@@ -756,6 +755,7 @@ class Runner:
756
755
  'last_updated_db': self.last_updated_db,
757
756
  'context': self.context,
758
757
  'errors': [e.toDict() for e in self.errors],
758
+ 'warnings': [w.toDict() for w in self.warnings],
759
759
  })
760
760
  return data
761
761
 
@@ -788,8 +788,6 @@ class Runner:
788
788
  continue
789
789
  result = hook(self, *args)
790
790
  self.debug('hook success', obj={'name': hook_type, 'fun': fun}, sub=sub, verbose='item' in sub) # noqa: E501
791
- if isinstance(result, Error):
792
- self.add_result(result, hooks=False)
793
791
  except Exception as e:
794
792
  self.debug('hook failed', obj={'name': hook_type, 'fun': fun}, sub=sub) # noqa: E501
795
793
  error = Error.from_exception(e, message=f'Hook "{fun}" execution failed')
@@ -2,11 +2,13 @@ import copy
2
2
  import getpass
3
3
  import logging
4
4
  import os
5
+ import queue
5
6
  import re
6
7
  import shlex
7
8
  import signal
8
9
  import subprocess
9
10
  import sys
11
+ import threading
10
12
 
11
13
  from time import time
12
14
 
@@ -178,6 +180,13 @@ class Command(Runner):
178
180
  # Process
179
181
  self.process = None
180
182
 
183
+ # Monitor thread (lazy initialization)
184
+ self.monitor_thread = None
185
+ self.monitor_stop_event = None
186
+ self.monitor_queue = None
187
+ self.process_start_time = None
188
+ # self.retry_count = 0 # TODO: remove this
189
+
181
190
  # Sudo
182
191
  self.requires_sudo = False
183
192
 
@@ -205,6 +214,13 @@ class Command(Runner):
205
214
  item_loaders.append(instance_func)
206
215
  self.item_loaders = item_loaders
207
216
 
217
+ def _init_monitor_objects(self):
218
+ """Initialize monitor thread objects when needed (lazy initialization)."""
219
+ if self.monitor_stop_event is None:
220
+ self.monitor_stop_event = threading.Event()
221
+ if self.monitor_queue is None:
222
+ self.monitor_queue = queue.Queue()
223
+
208
224
  def toDict(self):
209
225
  res = super().toDict()
210
226
  res.update({
@@ -419,10 +435,13 @@ class Command(Runner):
419
435
  self.print_command()
420
436
 
421
437
  # Check for sudo requirements and prepare the password if needed
422
- sudo_password, error = self._prompt_sudo(self.cmd)
423
- if error:
424
- yield Error(message=error)
425
- return
438
+ sudo_required = re.search(r'\bsudo\b', self.cmd)
439
+ sudo_password = None
440
+ if sudo_required:
441
+ sudo_password, error = self._prompt_sudo(self.cmd)
442
+ if error:
443
+ yield Error(message=error)
444
+ return
426
445
 
427
446
  # Prepare cmds
428
447
  command = self.cmd if self.shell else shlex.split(self.cmd)
@@ -440,7 +459,7 @@ class Command(Runner):
440
459
  # Output and results
441
460
  self.return_code = 0
442
461
  self.killed = False
443
- self.memory_limit_mb = CONFIG.security.memory_limit_mb
462
+ self.memory_limit_mb = CONFIG.celery.task_memory_limit_mb
444
463
 
445
464
  # Run the command using subprocess
446
465
  env = os.environ
@@ -450,11 +469,18 @@ class Command(Runner):
450
469
  stdout=subprocess.PIPE,
451
470
  stderr=subprocess.STDOUT,
452
471
  universal_newlines=True,
453
- preexec_fn=os.setsid,
472
+ preexec_fn=os.setsid if not sudo_required else None,
454
473
  shell=self.shell,
455
474
  env=env,
456
475
  cwd=self.cwd)
457
476
 
477
+ # Initialize monitor objects and start monitor thread
478
+ self._init_monitor_objects()
479
+ self.process_start_time = time()
480
+ self.monitor_stop_event.clear()
481
+ self.monitor_thread = threading.Thread(target=self._monitor_process, daemon=True)
482
+ self.monitor_thread.start()
483
+
458
484
  # If sudo password is provided, send it to stdin
459
485
  if sudo_password:
460
486
  self.process.stdin.write(f"{sudo_password}\n")
@@ -466,6 +492,7 @@ class Command(Runner):
466
492
  if not line:
467
493
  break
468
494
  yield from self.process_line(line)
495
+ yield from self.process_monitor_queue()
469
496
 
470
497
  # Run hooks after cmd has completed successfully
471
498
  result = self.run_hooks('on_cmd_done', sub='end')
@@ -475,11 +502,6 @@ class Command(Runner):
475
502
  except FileNotFoundError as e:
476
503
  yield from self.handle_file_not_found(e)
477
504
 
478
- except MemoryError as e:
479
- self.debug(f'{self.unique_name}: {type(e).__name__}.', sub='end')
480
- self.stop_process(exit_ok=True, sig=signal.SIGTERM)
481
- yield Warning(message=f'Memory limit {self.memory_limit_mb}MB reached for {self.unique_name}')
482
-
483
505
  except BaseException as e:
484
506
  self.debug(f'{self.unique_name}: {type(e).__name__}.', sub='end')
485
507
  self.stop_process()
@@ -529,13 +551,16 @@ class Command(Runner):
529
551
  if self.no_process:
530
552
  return
531
553
 
532
- # Yield command stats (CPU, memory, conns ...)
533
- # TODO: enable stats support with timer
534
- if self.last_updated_stat and (time() - self.last_updated_stat) < CONFIG.runners.stat_update_frequency:
554
+ def process_monitor_queue(self):
555
+ """Process and yield any queued items from monitor thread."""
556
+ if self.monitor_queue is None:
535
557
  return
536
-
537
- yield from self.stats(self.memory_limit_mb)
538
- self.last_updated_stat = time()
558
+ while not self.monitor_queue.empty():
559
+ try:
560
+ monitor_item = self.monitor_queue.get_nowait()
561
+ yield monitor_item
562
+ except queue.Empty:
563
+ break
539
564
 
540
565
  def print_description(self):
541
566
  """Print description"""
@@ -582,6 +607,100 @@ class Command(Runner):
582
607
  if exit_ok:
583
608
  self.exit_ok = True
584
609
 
610
+ def _stop_monitor_thread(self):
611
+ """Stop monitor thread."""
612
+ if self.monitor_thread and self.monitor_thread.is_alive() and self.monitor_stop_event:
613
+ self.monitor_stop_event.set()
614
+ self.monitor_thread.join(timeout=2.0)
615
+
616
+ def _monitor_process(self):
617
+ """Monitor thread that checks process health and kills if necessary."""
618
+ last_stats_time = 0
619
+
620
+ while not self.monitor_stop_event.is_set():
621
+ if not self.process or not self.process.pid:
622
+ break
623
+
624
+ try:
625
+ current_time = time()
626
+ self.debug('Collecting monitor items', sub='monitor')
627
+
628
+ # Collect and queue stats at regular intervals
629
+ if (current_time - last_stats_time) >= CONFIG.runners.stat_update_frequency:
630
+ stats_items = list(self._collect_stats())
631
+ for stat_item in stats_items:
632
+ if self.monitor_queue is not None:
633
+ self.monitor_queue.put(stat_item)
634
+ last_stats_time = current_time
635
+
636
+ # Check memory usage from collected stats
637
+ if self.memory_limit_mb and self.memory_limit_mb != -1:
638
+ total_mem = sum(stat_item.extra_data.get('memory_info', {}).get('rss', 0) / 1024 / 1024 for stat_item in stats_items) # noqa: E501
639
+ if total_mem > self.memory_limit_mb:
640
+ warning = Warning(message=f'Memory limit {self.memory_limit_mb}MB exceeded (actual: {total_mem:.2f}MB)')
641
+ if self.monitor_queue is not None:
642
+ self.monitor_queue.put(warning)
643
+ self.stop_process(exit_ok=True, sig=signal.SIGTERM)
644
+ break
645
+
646
+ # Check execution time
647
+ if self.process_start_time and CONFIG.celery.task_max_timeout != -1:
648
+ elapsed_time = current_time - self.process_start_time
649
+ if elapsed_time > CONFIG.celery.task_max_timeout:
650
+ warning = Warning(message=f'Task timeout {CONFIG.celery.task_max_timeout}s exceeded')
651
+ if self.monitor_queue is not None:
652
+ self.monitor_queue.put(warning)
653
+ self.stop_process(exit_ok=True, sig=signal.SIGTERM)
654
+ break
655
+
656
+ # Check retry count
657
+ # TODO: remove this
658
+ # if CONFIG.celery.task_max_retries and self.retry_count >= CONFIG.celery.task_max_retries:
659
+ # warning = Warning(message=f'Max retries {CONFIG.celery.task_max_retries} exceeded (actual: {self.retry_count})')
660
+ # self.monitor_queue.put(warning)
661
+ # self.stop_process(exit_ok=False, sig=signal.SIGTERM)
662
+ # break
663
+
664
+ except Exception as e:
665
+ self.debug(f'Monitor thread error: {e}', sub='monitor')
666
+ warning = Warning(message=f'Monitor thread error: {e}')
667
+ if self.monitor_queue is not None:
668
+ self.monitor_queue.put(warning)
669
+ break
670
+
671
+ # Sleep for a short interval before next check (stat update frequency)
672
+ self.monitor_stop_event.wait(CONFIG.runners.stat_update_frequency)
673
+
674
+ def _collect_stats(self):
675
+ """Collect stats about the current running process, if any."""
676
+ if not self.process or not self.process.pid:
677
+ return
678
+ proc = psutil.Process(self.process.pid)
679
+ stats = Command.get_process_info(proc, children=True)
680
+ total_mem = 0
681
+ for info in stats:
682
+ name = info['name']
683
+ pid = info['pid']
684
+ cpu_percent = info['cpu_percent']
685
+ # mem_percent = info['memory_percent']
686
+ mem_rss = round(info['memory_info']['rss'] / 1024 / 1024, 2)
687
+ total_mem += mem_rss
688
+ self.debug(f'{name} {pid} {mem_rss}MB', sub='monitor')
689
+ net_conns = info.get('net_connections') or []
690
+ extra_data = {k: v for k, v in info.items() if k not in ['cpu_percent', 'memory_percent', 'net_connections']}
691
+ yield Stat(
692
+ name=name,
693
+ pid=pid,
694
+ cpu=cpu_percent,
695
+ memory=mem_rss,
696
+ memory_limit=self.memory_limit_mb,
697
+ net_conns=len(net_conns),
698
+ extra_data=extra_data
699
+ )
700
+ # self.debug(f'Total mem: {total_mem}MB, memory limit: {self.memory_limit_mb}', sub='monitor')
701
+ # if self.memory_limit_mb and self.memory_limit_mb != -1 and total_mem > self.memory_limit_mb:
702
+ # raise MemoryError(f'Memory limit {self.memory_limit_mb}MB reached for {self.unique_name}')
703
+
585
704
  def stats(self, memory_limit_mb=None):
586
705
  """Gather stats about the current running process, if any."""
587
706
  if not self.process or not self.process.pid:
@@ -596,7 +715,7 @@ class Command(Runner):
596
715
  mem_percent = info['memory_percent']
597
716
  mem_rss = round(info['memory_info']['rss'] / 1024 / 1024, 2)
598
717
  total_mem += mem_rss
599
- self.debug(f'{name} {pid} {mem_rss}MB', sub='stats')
718
+ self.debug(f'process: {name} pid: {pid} memory: {mem_rss}MB', sub='stats')
600
719
  net_conns = info.get('net_connections') or []
601
720
  extra_data = {k: v for k, v in info.items() if k not in ['cpu_percent', 'memory_percent', 'net_connections']}
602
721
  yield Stat(
@@ -688,7 +807,7 @@ class Command(Runner):
688
807
  ['sudo', '-S', '-p', '', 'true'],
689
808
  input=sudo_password + "\n",
690
809
  text=True,
691
- capture_output=True
810
+ capture_output=True,
692
811
  )
693
812
  if result.returncode == 0:
694
813
  return sudo_password, None # Password is correct
@@ -698,6 +817,8 @@ class Command(Runner):
698
817
 
699
818
  def _wait_for_end(self):
700
819
  """Wait for process to finish and process output and return code."""
820
+ self._stop_monitor_thread()
821
+ yield from self.process_monitor_queue()
701
822
  if not self.process:
702
823
  return
703
824
  for line in self.process.stdout.readlines():