bps-kit 1.0.1 → 1.0.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (368) hide show
  1. package/package.json +1 -1
  2. package/templates/.agents/agents/backend-specialist.md +263 -0
  3. package/templates/.agents/agents/code-archaeologist.md +106 -0
  4. package/templates/.agents/agents/database-architect.md +226 -0
  5. package/templates/.agents/agents/debugger.md +225 -0
  6. package/templates/.agents/agents/devops-engineer.md +242 -0
  7. package/templates/.agents/agents/documentation-writer.md +104 -0
  8. package/templates/.agents/agents/explorer-agent.md +73 -0
  9. package/templates/.agents/agents/frontend-specialist.md +593 -0
  10. package/templates/.agents/agents/game-developer.md +162 -0
  11. package/templates/.agents/agents/mobile-developer.md +377 -0
  12. package/templates/.agents/agents/orchestrator.md +416 -0
  13. package/templates/.agents/agents/penetration-tester.md +188 -0
  14. package/templates/.agents/agents/performance-optimizer.md +187 -0
  15. package/templates/.agents/agents/product-manager.md +112 -0
  16. package/templates/.agents/agents/product-owner.md +95 -0
  17. package/templates/.agents/agents/project-planner.md +406 -0
  18. package/templates/.agents/agents/qa-automation-engineer.md +103 -0
  19. package/templates/.agents/agents/security-auditor.md +170 -0
  20. package/templates/.agents/agents/seo-specialist.md +111 -0
  21. package/templates/.agents/agents/test-engineer.md +158 -0
  22. package/templates/.agents/rules/GEMINI.md +219 -0
  23. package/templates/.agents/scripts/auto_preview.py +148 -0
  24. package/templates/.agents/scripts/checklist.py +217 -0
  25. package/templates/.agents/scripts/session_manager.py +120 -0
  26. package/templates/.agents/scripts/verify_all.py +327 -0
  27. package/templates/.agents/workflows/brainstorm.md +113 -0
  28. package/templates/.agents/workflows/create.md +59 -0
  29. package/templates/.agents/workflows/debug.md +103 -0
  30. package/templates/.agents/workflows/deploy.md +176 -0
  31. package/templates/.agents/workflows/enhance.md +63 -0
  32. package/templates/.agents/workflows/orchestrate.md +237 -0
  33. package/templates/.agents/workflows/plan.md +89 -0
  34. package/templates/.agents/workflows/preview.md +81 -0
  35. package/templates/.agents/workflows/setup-brain.md +39 -0
  36. package/templates/.agents/workflows/status.md +86 -0
  37. package/templates/.agents/workflows/test.md +144 -0
  38. package/templates/.agents/workflows/ui-ux-pro-max.md +296 -0
  39. package/templates/skills_normal/api-patterns/scripts/api_validator.py +211 -0
  40. package/templates/skills_normal/database-design/scripts/schema_validator.py +172 -0
  41. package/templates/skills_normal/frontend-design/scripts/accessibility_checker.py +183 -0
  42. package/templates/skills_normal/frontend-design/scripts/ux_audit.py +722 -0
  43. package/templates/skills_normal/git-pushing/scripts/smart_commit.sh +19 -0
  44. package/templates/skills_normal/lint-and-validate/scripts/lint_runner.py +184 -0
  45. package/templates/skills_normal/lint-and-validate/scripts/type_coverage.py +173 -0
  46. package/templates/skills_normal/performance-profiling/scripts/lighthouse_audit.py +76 -0
  47. package/templates/skills_normal/senior-fullstack/scripts/code_quality_analyzer.py +114 -0
  48. package/templates/skills_normal/senior-fullstack/scripts/fullstack_scaffolder.py +114 -0
  49. package/templates/skills_normal/senior-fullstack/scripts/project_scaffolder.py +114 -0
  50. package/templates/skills_normal/seo-fundamentals/scripts/seo_checker.py +219 -0
  51. package/templates/skills_normal/testing-patterns/scripts/test_runner.py +219 -0
  52. package/templates/skills_normal/vulnerability-scanner/scripts/security_scan.py +458 -0
  53. package/templates/vault/007/scripts/config.py +472 -0
  54. package/templates/vault/007/scripts/full_audit.py +1306 -0
  55. package/templates/vault/007/scripts/quick_scan.py +481 -0
  56. package/templates/vault/007/scripts/requirements.txt +26 -0
  57. package/templates/vault/007/scripts/scanners/__init__.py +0 -0
  58. package/templates/vault/007/scripts/scanners/dependency_scanner.py +1305 -0
  59. package/templates/vault/007/scripts/scanners/injection_scanner.py +1104 -0
  60. package/templates/vault/007/scripts/scanners/secrets_scanner.py +1008 -0
  61. package/templates/vault/007/scripts/score_calculator.py +693 -0
  62. package/templates/vault/agent-orchestrator/scripts/match_skills.py +329 -0
  63. package/templates/vault/agent-orchestrator/scripts/orchestrate.py +304 -0
  64. package/templates/vault/agent-orchestrator/scripts/requirements.txt +1 -0
  65. package/templates/vault/agent-orchestrator/scripts/scan_registry.py +508 -0
  66. package/templates/vault/ai-studio-image/scripts/config.py +613 -0
  67. package/templates/vault/ai-studio-image/scripts/generate.py +630 -0
  68. package/templates/vault/ai-studio-image/scripts/prompt_engine.py +424 -0
  69. package/templates/vault/ai-studio-image/scripts/requirements.txt +4 -0
  70. package/templates/vault/ai-studio-image/scripts/templates.py +349 -0
  71. package/templates/vault/android_ui_verification/scripts/verify_ui.sh +32 -0
  72. package/templates/vault/apify-audience-analysis/reference/scripts/run_actor.js +363 -0
  73. package/templates/vault/apify-brand-reputation-monitoring/reference/scripts/run_actor.js +363 -0
  74. package/templates/vault/apify-competitor-intelligence/reference/scripts/run_actor.js +363 -0
  75. package/templates/vault/apify-content-analytics/reference/scripts/run_actor.js +363 -0
  76. package/templates/vault/apify-ecommerce/reference/scripts/package.json +3 -0
  77. package/templates/vault/apify-ecommerce/reference/scripts/run_actor.js +369 -0
  78. package/templates/vault/apify-influencer-discovery/reference/scripts/run_actor.js +363 -0
  79. package/templates/vault/apify-lead-generation/reference/scripts/run_actor.js +363 -0
  80. package/templates/vault/apify-market-research/reference/scripts/run_actor.js +363 -0
  81. package/templates/vault/apify-trend-analysis/reference/scripts/run_actor.js +363 -0
  82. package/templates/vault/apify-ultimate-scraper/reference/scripts/run_actor.js +363 -0
  83. package/templates/vault/audio-transcriber/scripts/install-requirements.sh +190 -0
  84. package/templates/vault/audio-transcriber/scripts/transcribe.py +486 -0
  85. package/templates/vault/claude-monitor/scripts/api_bench.py +240 -0
  86. package/templates/vault/claude-monitor/scripts/config.py +69 -0
  87. package/templates/vault/claude-monitor/scripts/health_check.py +362 -0
  88. package/templates/vault/claude-monitor/scripts/monitor.py +296 -0
  89. package/templates/vault/content-creator/scripts/brand_voice_analyzer.py +185 -0
  90. package/templates/vault/content-creator/scripts/seo_optimizer.py +419 -0
  91. package/templates/vault/context-agent/scripts/active_context.py +227 -0
  92. package/templates/vault/context-agent/scripts/compressor.py +149 -0
  93. package/templates/vault/context-agent/scripts/config.py +69 -0
  94. package/templates/vault/context-agent/scripts/context_loader.py +155 -0
  95. package/templates/vault/context-agent/scripts/context_manager.py +302 -0
  96. package/templates/vault/context-agent/scripts/models.py +103 -0
  97. package/templates/vault/context-agent/scripts/project_registry.py +132 -0
  98. package/templates/vault/context-agent/scripts/requirements.txt +6 -0
  99. package/templates/vault/context-agent/scripts/search.py +115 -0
  100. package/templates/vault/context-agent/scripts/session_parser.py +206 -0
  101. package/templates/vault/context-agent/scripts/session_summary.py +319 -0
  102. package/templates/vault/context-guardian/scripts/context_snapshot.py +229 -0
  103. package/templates/vault/docx/ooxml/scripts/pack.py +159 -0
  104. package/templates/vault/docx/ooxml/scripts/unpack.py +29 -0
  105. package/templates/vault/docx/ooxml/scripts/validate.py +69 -0
  106. package/templates/vault/docx/ooxml/scripts/validation/__init__.py +15 -0
  107. package/templates/vault/docx/ooxml/scripts/validation/base.py +951 -0
  108. package/templates/vault/docx/ooxml/scripts/validation/docx.py +274 -0
  109. package/templates/vault/docx/ooxml/scripts/validation/pptx.py +315 -0
  110. package/templates/vault/docx/ooxml/scripts/validation/redlining.py +279 -0
  111. package/templates/vault/docx/scripts/__init__.py +1 -0
  112. package/templates/vault/docx/scripts/document.py +1276 -0
  113. package/templates/vault/docx/scripts/templates/comments.xml +3 -0
  114. package/templates/vault/docx/scripts/templates/commentsExtended.xml +3 -0
  115. package/templates/vault/docx/scripts/templates/commentsExtensible.xml +3 -0
  116. package/templates/vault/docx/scripts/templates/commentsIds.xml +3 -0
  117. package/templates/vault/docx/scripts/templates/people.xml +3 -0
  118. package/templates/vault/docx/scripts/utilities.py +374 -0
  119. package/templates/vault/docx-official/ooxml/scripts/pack.py +159 -0
  120. package/templates/vault/docx-official/ooxml/scripts/unpack.py +29 -0
  121. package/templates/vault/docx-official/ooxml/scripts/validate.py +69 -0
  122. package/templates/vault/docx-official/ooxml/scripts/validation/__init__.py +15 -0
  123. package/templates/vault/docx-official/ooxml/scripts/validation/base.py +951 -0
  124. package/templates/vault/docx-official/ooxml/scripts/validation/docx.py +274 -0
  125. package/templates/vault/docx-official/ooxml/scripts/validation/pptx.py +315 -0
  126. package/templates/vault/docx-official/ooxml/scripts/validation/redlining.py +279 -0
  127. package/templates/vault/docx-official/scripts/__init__.py +1 -0
  128. package/templates/vault/docx-official/scripts/document.py +1276 -0
  129. package/templates/vault/docx-official/scripts/templates/comments.xml +3 -0
  130. package/templates/vault/docx-official/scripts/templates/commentsExtended.xml +3 -0
  131. package/templates/vault/docx-official/scripts/templates/commentsExtensible.xml +3 -0
  132. package/templates/vault/docx-official/scripts/templates/commentsIds.xml +3 -0
  133. package/templates/vault/docx-official/scripts/templates/people.xml +3 -0
  134. package/templates/vault/docx-official/scripts/utilities.py +374 -0
  135. package/templates/vault/geo-fundamentals/scripts/geo_checker.py +289 -0
  136. package/templates/vault/helm-chart-scaffolding/scripts/validate-chart.sh +244 -0
  137. package/templates/vault/i18n-localization/scripts/i18n_checker.py +241 -0
  138. package/templates/vault/instagram/scripts/account_setup.py +233 -0
  139. package/templates/vault/instagram/scripts/analyze.py +221 -0
  140. package/templates/vault/instagram/scripts/api_client.py +444 -0
  141. package/templates/vault/instagram/scripts/auth.py +411 -0
  142. package/templates/vault/instagram/scripts/comments.py +160 -0
  143. package/templates/vault/instagram/scripts/config.py +111 -0
  144. package/templates/vault/instagram/scripts/db.py +467 -0
  145. package/templates/vault/instagram/scripts/export.py +138 -0
  146. package/templates/vault/instagram/scripts/governance.py +233 -0
  147. package/templates/vault/instagram/scripts/hashtags.py +114 -0
  148. package/templates/vault/instagram/scripts/insights.py +170 -0
  149. package/templates/vault/instagram/scripts/media.py +65 -0
  150. package/templates/vault/instagram/scripts/messages.py +103 -0
  151. package/templates/vault/instagram/scripts/profile.py +58 -0
  152. package/templates/vault/instagram/scripts/publish.py +449 -0
  153. package/templates/vault/instagram/scripts/requirements.txt +5 -0
  154. package/templates/vault/instagram/scripts/run_all.py +189 -0
  155. package/templates/vault/instagram/scripts/schedule.py +189 -0
  156. package/templates/vault/instagram/scripts/serve_api.py +234 -0
  157. package/templates/vault/instagram/scripts/templates.py +155 -0
  158. package/templates/vault/junta-leiloeiros/scripts/db.py +216 -0
  159. package/templates/vault/junta-leiloeiros/scripts/export.py +137 -0
  160. package/templates/vault/junta-leiloeiros/scripts/requirements.txt +15 -0
  161. package/templates/vault/junta-leiloeiros/scripts/run_all.py +190 -0
  162. package/templates/vault/junta-leiloeiros/scripts/scraper/__init__.py +4 -0
  163. package/templates/vault/junta-leiloeiros/scripts/scraper/base_scraper.py +209 -0
  164. package/templates/vault/junta-leiloeiros/scripts/scraper/generic_scraper.py +110 -0
  165. package/templates/vault/junta-leiloeiros/scripts/scraper/jucap.py +110 -0
  166. package/templates/vault/junta-leiloeiros/scripts/scraper/juceac.py +72 -0
  167. package/templates/vault/junta-leiloeiros/scripts/scraper/juceal.py +72 -0
  168. package/templates/vault/junta-leiloeiros/scripts/scraper/juceb.py +68 -0
  169. package/templates/vault/junta-leiloeiros/scripts/scraper/jucec.py +63 -0
  170. package/templates/vault/junta-leiloeiros/scripts/scraper/jucema.py +211 -0
  171. package/templates/vault/junta-leiloeiros/scripts/scraper/jucemg.py +218 -0
  172. package/templates/vault/junta-leiloeiros/scripts/scraper/jucep.py +70 -0
  173. package/templates/vault/junta-leiloeiros/scripts/scraper/jucepa.py +74 -0
  174. package/templates/vault/junta-leiloeiros/scripts/scraper/jucepar.py +80 -0
  175. package/templates/vault/junta-leiloeiros/scripts/scraper/jucepe.py +78 -0
  176. package/templates/vault/junta-leiloeiros/scripts/scraper/jucepi.py +69 -0
  177. package/templates/vault/junta-leiloeiros/scripts/scraper/jucer.py +256 -0
  178. package/templates/vault/junta-leiloeiros/scripts/scraper/jucerja.py +170 -0
  179. package/templates/vault/junta-leiloeiros/scripts/scraper/jucern.py +71 -0
  180. package/templates/vault/junta-leiloeiros/scripts/scraper/jucesc.py +89 -0
  181. package/templates/vault/junta-leiloeiros/scripts/scraper/jucesp.py +233 -0
  182. package/templates/vault/junta-leiloeiros/scripts/scraper/jucetins.py +134 -0
  183. package/templates/vault/junta-leiloeiros/scripts/scraper/jucis_df.py +63 -0
  184. package/templates/vault/junta-leiloeiros/scripts/scraper/jucisrs.py +299 -0
  185. package/templates/vault/junta-leiloeiros/scripts/scraper/states.py +99 -0
  186. package/templates/vault/junta-leiloeiros/scripts/serve_api.py +164 -0
  187. package/templates/vault/junta-leiloeiros/scripts/web_scraper_fallback.py +233 -0
  188. package/templates/vault/last30days/scripts/last30days.py +521 -0
  189. package/templates/vault/last30days/scripts/lib/__init__.py +1 -0
  190. package/templates/vault/last30days/scripts/lib/cache.py +152 -0
  191. package/templates/vault/last30days/scripts/lib/dates.py +124 -0
  192. package/templates/vault/last30days/scripts/lib/dedupe.py +120 -0
  193. package/templates/vault/last30days/scripts/lib/env.py +149 -0
  194. package/templates/vault/last30days/scripts/lib/http.py +152 -0
  195. package/templates/vault/last30days/scripts/lib/models.py +175 -0
  196. package/templates/vault/last30days/scripts/lib/normalize.py +160 -0
  197. package/templates/vault/last30days/scripts/lib/openai_reddit.py +230 -0
  198. package/templates/vault/last30days/scripts/lib/reddit_enrich.py +232 -0
  199. package/templates/vault/last30days/scripts/lib/render.py +383 -0
  200. package/templates/vault/last30days/scripts/lib/schema.py +336 -0
  201. package/templates/vault/last30days/scripts/lib/score.py +311 -0
  202. package/templates/vault/last30days/scripts/lib/ui.py +324 -0
  203. package/templates/vault/last30days/scripts/lib/websearch.py +401 -0
  204. package/templates/vault/last30days/scripts/lib/xai_x.py +217 -0
  205. package/templates/vault/leiloeiro-avaliacao/scripts/governance.py +106 -0
  206. package/templates/vault/leiloeiro-avaliacao/scripts/requirements.txt +1 -0
  207. package/templates/vault/leiloeiro-edital/scripts/governance.py +106 -0
  208. package/templates/vault/leiloeiro-edital/scripts/requirements.txt +1 -0
  209. package/templates/vault/leiloeiro-ia/scripts/governance.py +106 -0
  210. package/templates/vault/leiloeiro-ia/scripts/requirements.txt +1 -0
  211. package/templates/vault/leiloeiro-juridico/scripts/governance.py +106 -0
  212. package/templates/vault/leiloeiro-juridico/scripts/requirements.txt +1 -0
  213. package/templates/vault/leiloeiro-mercado/scripts/governance.py +106 -0
  214. package/templates/vault/leiloeiro-mercado/scripts/requirements.txt +1 -0
  215. package/templates/vault/leiloeiro-risco/scripts/governance.py +106 -0
  216. package/templates/vault/leiloeiro-risco/scripts/requirements.txt +1 -0
  217. package/templates/vault/loki-mode/examples/todo-app-generated/backend/src/db/database.ts +24 -0
  218. package/templates/vault/loki-mode/examples/todo-app-generated/backend/src/db/db.ts +35 -0
  219. package/templates/vault/loki-mode/examples/todo-app-generated/backend/src/db/index.ts +2 -0
  220. package/templates/vault/loki-mode/examples/todo-app-generated/backend/src/db/migrations.ts +31 -0
  221. package/templates/vault/loki-mode/examples/todo-app-generated/backend/src/db/schema.sql +8 -0
  222. package/templates/vault/loki-mode/examples/todo-app-generated/backend/src/index.ts +44 -0
  223. package/templates/vault/loki-mode/examples/todo-app-generated/backend/src/routes/todos.ts +155 -0
  224. package/templates/vault/loki-mode/examples/todo-app-generated/backend/src/types/index.ts +35 -0
  225. package/templates/vault/loki-mode/examples/todo-app-generated/frontend/src/App.css +384 -0
  226. package/templates/vault/loki-mode/examples/todo-app-generated/frontend/src/App.tsx +81 -0
  227. package/templates/vault/loki-mode/examples/todo-app-generated/frontend/src/api/todos.ts +57 -0
  228. package/templates/vault/loki-mode/examples/todo-app-generated/frontend/src/components/ConfirmDialog.tsx +26 -0
  229. package/templates/vault/loki-mode/examples/todo-app-generated/frontend/src/components/EmptyState.tsx +8 -0
  230. package/templates/vault/loki-mode/examples/todo-app-generated/frontend/src/components/TodoForm.tsx +43 -0
  231. package/templates/vault/loki-mode/examples/todo-app-generated/frontend/src/components/TodoItem.tsx +36 -0
  232. package/templates/vault/loki-mode/examples/todo-app-generated/frontend/src/components/TodoList.tsx +27 -0
  233. package/templates/vault/loki-mode/examples/todo-app-generated/frontend/src/hooks/useTodos.ts +81 -0
  234. package/templates/vault/loki-mode/examples/todo-app-generated/frontend/src/index.css +48 -0
  235. package/templates/vault/loki-mode/examples/todo-app-generated/frontend/src/main.tsx +10 -0
  236. package/templates/vault/loki-mode/examples/todo-app-generated/frontend/src/vite-env.d.ts +1 -0
  237. package/templates/vault/loki-mode/scripts/export-to-vibe-kanban.sh +178 -0
  238. package/templates/vault/loki-mode/scripts/loki-wrapper.sh +281 -0
  239. package/templates/vault/loki-mode/scripts/take-screenshots.js +55 -0
  240. package/templates/vault/matematico-tao/scripts/complexity_analyzer.py +544 -0
  241. package/templates/vault/matematico-tao/scripts/dependency_graph.py +538 -0
  242. package/templates/vault/mcp-builder/scripts/connections.py +151 -0
  243. package/templates/vault/mcp-builder/scripts/evaluation.py +373 -0
  244. package/templates/vault/mcp-builder/scripts/example_evaluation.xml +22 -0
  245. package/templates/vault/mcp-builder/scripts/requirements.txt +2 -0
  246. package/templates/vault/mobile-design/scripts/mobile_audit.py +670 -0
  247. package/templates/vault/notebooklm/scripts/__init__.py +81 -0
  248. package/templates/vault/notebooklm/scripts/ask_question.py +256 -0
  249. package/templates/vault/notebooklm/scripts/auth_manager.py +358 -0
  250. package/templates/vault/notebooklm/scripts/browser_session.py +255 -0
  251. package/templates/vault/notebooklm/scripts/browser_utils.py +107 -0
  252. package/templates/vault/notebooklm/scripts/cleanup_manager.py +302 -0
  253. package/templates/vault/notebooklm/scripts/config.py +44 -0
  254. package/templates/vault/notebooklm/scripts/notebook_manager.py +410 -0
  255. package/templates/vault/notebooklm/scripts/run.py +102 -0
  256. package/templates/vault/notebooklm/scripts/setup_environment.py +204 -0
  257. package/templates/vault/pdf/scripts/check_bounding_boxes.py +70 -0
  258. package/templates/vault/pdf/scripts/check_bounding_boxes_test.py +226 -0
  259. package/templates/vault/pdf/scripts/check_fillable_fields.py +12 -0
  260. package/templates/vault/pdf/scripts/convert_pdf_to_images.py +35 -0
  261. package/templates/vault/pdf/scripts/create_validation_image.py +41 -0
  262. package/templates/vault/pdf/scripts/extract_form_field_info.py +152 -0
  263. package/templates/vault/pdf/scripts/fill_fillable_fields.py +114 -0
  264. package/templates/vault/pdf/scripts/fill_pdf_form_with_annotations.py +108 -0
  265. package/templates/vault/pdf-official/scripts/check_bounding_boxes.py +70 -0
  266. package/templates/vault/pdf-official/scripts/check_bounding_boxes_test.py +226 -0
  267. package/templates/vault/pdf-official/scripts/check_fillable_fields.py +12 -0
  268. package/templates/vault/pdf-official/scripts/convert_pdf_to_images.py +35 -0
  269. package/templates/vault/pdf-official/scripts/create_validation_image.py +41 -0
  270. package/templates/vault/pdf-official/scripts/extract_form_field_info.py +152 -0
  271. package/templates/vault/pdf-official/scripts/fill_fillable_fields.py +114 -0
  272. package/templates/vault/pdf-official/scripts/fill_pdf_form_with_annotations.py +108 -0
  273. package/templates/vault/planning-with-files/scripts/check-complete.sh +44 -0
  274. package/templates/vault/planning-with-files/scripts/init-session.sh +120 -0
  275. package/templates/vault/pptx/ooxml/scripts/pack.py +159 -0
  276. package/templates/vault/pptx/ooxml/scripts/unpack.py +29 -0
  277. package/templates/vault/pptx/ooxml/scripts/validate.py +69 -0
  278. package/templates/vault/pptx/ooxml/scripts/validation/__init__.py +15 -0
  279. package/templates/vault/pptx/ooxml/scripts/validation/base.py +951 -0
  280. package/templates/vault/pptx/ooxml/scripts/validation/docx.py +274 -0
  281. package/templates/vault/pptx/ooxml/scripts/validation/pptx.py +315 -0
  282. package/templates/vault/pptx/ooxml/scripts/validation/redlining.py +279 -0
  283. package/templates/vault/pptx/scripts/html2pptx.js +979 -0
  284. package/templates/vault/pptx/scripts/inventory.py +1020 -0
  285. package/templates/vault/pptx/scripts/rearrange.py +231 -0
  286. package/templates/vault/pptx/scripts/replace.py +385 -0
  287. package/templates/vault/pptx/scripts/thumbnail.py +450 -0
  288. package/templates/vault/pptx-official/ooxml/scripts/pack.py +159 -0
  289. package/templates/vault/pptx-official/ooxml/scripts/unpack.py +29 -0
  290. package/templates/vault/pptx-official/ooxml/scripts/validate.py +69 -0
  291. package/templates/vault/pptx-official/ooxml/scripts/validation/__init__.py +15 -0
  292. package/templates/vault/pptx-official/ooxml/scripts/validation/base.py +951 -0
  293. package/templates/vault/pptx-official/ooxml/scripts/validation/docx.py +274 -0
  294. package/templates/vault/pptx-official/ooxml/scripts/validation/pptx.py +315 -0
  295. package/templates/vault/pptx-official/ooxml/scripts/validation/redlining.py +279 -0
  296. package/templates/vault/pptx-official/scripts/html2pptx.js +979 -0
  297. package/templates/vault/pptx-official/scripts/inventory.py +1020 -0
  298. package/templates/vault/pptx-official/scripts/rearrange.py +231 -0
  299. package/templates/vault/pptx-official/scripts/replace.py +385 -0
  300. package/templates/vault/pptx-official/scripts/thumbnail.py +450 -0
  301. package/templates/vault/product-manager-toolkit/scripts/customer_interview_analyzer.py +441 -0
  302. package/templates/vault/product-manager-toolkit/scripts/rice_prioritizer.py +296 -0
  303. package/templates/vault/prompt-engineering-patterns/scripts/optimize-prompt.py +279 -0
  304. package/templates/vault/scripts/.skill_cache.json +7538 -0
  305. package/templates/vault/scripts/skill_search.py +228 -0
  306. package/templates/vault/senior-architect/scripts/architecture_diagram_generator.py +114 -0
  307. package/templates/vault/senior-architect/scripts/dependency_analyzer.py +114 -0
  308. package/templates/vault/senior-architect/scripts/project_architect.py +114 -0
  309. package/templates/vault/shopify-development/scripts/requirements.txt +19 -0
  310. package/templates/vault/shopify-development/scripts/shopify_graphql.py +428 -0
  311. package/templates/vault/shopify-development/scripts/shopify_init.py +441 -0
  312. package/templates/vault/shopify-development/scripts/tests/test_shopify_init.py +379 -0
  313. package/templates/vault/skill-creator/scripts/init_skill.py +303 -0
  314. package/templates/vault/skill-creator/scripts/package_skill.py +110 -0
  315. package/templates/vault/skill-creator/scripts/quick_validate.py +95 -0
  316. package/templates/vault/skill-installer/scripts/detect_skills.py +318 -0
  317. package/templates/vault/skill-installer/scripts/install_skill.py +1708 -0
  318. package/templates/vault/skill-installer/scripts/package_skill.py +417 -0
  319. package/templates/vault/skill-installer/scripts/requirements.txt +1 -0
  320. package/templates/vault/skill-installer/scripts/validate_skill.py +430 -0
  321. package/templates/vault/skill-sentinel/scripts/analyzers/__init__.py +13 -0
  322. package/templates/vault/skill-sentinel/scripts/analyzers/code_quality.py +247 -0
  323. package/templates/vault/skill-sentinel/scripts/analyzers/cross_skill.py +134 -0
  324. package/templates/vault/skill-sentinel/scripts/analyzers/dependencies.py +121 -0
  325. package/templates/vault/skill-sentinel/scripts/analyzers/documentation.py +189 -0
  326. package/templates/vault/skill-sentinel/scripts/analyzers/governance_audit.py +153 -0
  327. package/templates/vault/skill-sentinel/scripts/analyzers/performance.py +164 -0
  328. package/templates/vault/skill-sentinel/scripts/analyzers/security.py +189 -0
  329. package/templates/vault/skill-sentinel/scripts/config.py +158 -0
  330. package/templates/vault/skill-sentinel/scripts/cost_optimizer.py +146 -0
  331. package/templates/vault/skill-sentinel/scripts/db.py +354 -0
  332. package/templates/vault/skill-sentinel/scripts/governance.py +58 -0
  333. package/templates/vault/skill-sentinel/scripts/recommender.py +228 -0
  334. package/templates/vault/skill-sentinel/scripts/report_generator.py +224 -0
  335. package/templates/vault/skill-sentinel/scripts/requirements.txt +1 -0
  336. package/templates/vault/skill-sentinel/scripts/run_audit.py +290 -0
  337. package/templates/vault/skill-sentinel/scripts/scanner.py +271 -0
  338. package/templates/vault/stability-ai/scripts/config.py +266 -0
  339. package/templates/vault/stability-ai/scripts/generate.py +687 -0
  340. package/templates/vault/stability-ai/scripts/requirements.txt +4 -0
  341. package/templates/vault/stability-ai/scripts/styles.py +174 -0
  342. package/templates/vault/telegram/assets/boilerplate/nodejs/src/bot-client.ts +86 -0
  343. package/templates/vault/telegram/assets/boilerplate/nodejs/src/handlers.ts +79 -0
  344. package/templates/vault/telegram/assets/boilerplate/nodejs/src/index.ts +32 -0
  345. package/templates/vault/telegram/scripts/send_message.py +143 -0
  346. package/templates/vault/telegram/scripts/setup_project.py +103 -0
  347. package/templates/vault/telegram/scripts/test_bot.py +144 -0
  348. package/templates/vault/typescript-expert/scripts/ts_diagnostic.py +203 -0
  349. package/templates/vault/ui-ux-pro-max/scripts/__pycache__/core.cpython-314.pyc +0 -0
  350. package/templates/vault/ui-ux-pro-max/scripts/__pycache__/design_system.cpython-314.pyc +0 -0
  351. package/templates/vault/ui-ux-pro-max/scripts/core.py +257 -0
  352. package/templates/vault/ui-ux-pro-max/scripts/design_system.py +487 -0
  353. package/templates/vault/ui-ux-pro-max/scripts/search.py +76 -0
  354. package/templates/vault/videodb/scripts/ws_listener.py +204 -0
  355. package/templates/vault/web-artifacts-builder/scripts/bundle-artifact.sh +54 -0
  356. package/templates/vault/web-artifacts-builder/scripts/init-artifact.sh +322 -0
  357. package/templates/vault/web-artifacts-builder/scripts/shadcn-components.tar.gz +0 -0
  358. package/templates/vault/webapp-testing/scripts/with_server.py +106 -0
  359. package/templates/vault/whatsapp-cloud-api/assets/boilerplate/nodejs/src/index.ts +125 -0
  360. package/templates/vault/whatsapp-cloud-api/assets/boilerplate/nodejs/src/template-manager.ts +67 -0
  361. package/templates/vault/whatsapp-cloud-api/assets/boilerplate/nodejs/src/types.ts +216 -0
  362. package/templates/vault/whatsapp-cloud-api/assets/boilerplate/nodejs/src/webhook-handler.ts +173 -0
  363. package/templates/vault/whatsapp-cloud-api/assets/boilerplate/nodejs/src/whatsapp-client.ts +193 -0
  364. package/templates/vault/whatsapp-cloud-api/scripts/send_test_message.py +137 -0
  365. package/templates/vault/whatsapp-cloud-api/scripts/setup_project.py +118 -0
  366. package/templates/vault/whatsapp-cloud-api/scripts/validate_config.py +190 -0
  367. package/templates/vault/youtube-summarizer/scripts/extract-transcript.py +65 -0
  368. package/templates/vault/youtube-summarizer/scripts/install-dependencies.sh +28 -0
@@ -0,0 +1,1305 @@
1
+ """007 Dependency Scanner -- Supply chain and dependency security analyzer.
2
+
3
+ Analyzes dependency security across Python and Node.js projects by inspecting
4
+ dependency files (requirements.txt, package.json, Dockerfiles, etc.) for version
5
+ pinning, known risky patterns, and supply chain best practices.
6
+
7
+ Usage:
8
+ python dependency_scanner.py --target /path/to/project
9
+ python dependency_scanner.py --target /path/to/project --output json --verbose
10
+ """
11
+
12
+ import argparse
13
+ import json
14
+ import os
15
+ import re
16
+ import sys
17
+ import time
18
+ from pathlib import Path
19
+
20
+ # ---------------------------------------------------------------------------
21
+ # Import from the 007 config hub (parent directory)
22
+ # ---------------------------------------------------------------------------
23
+ sys.path.insert(0, str(Path(__file__).resolve().parent.parent))
24
+
25
+ import config # noqa: E402
26
+
27
+ # ---------------------------------------------------------------------------
28
+ # Logger
29
+ # ---------------------------------------------------------------------------
30
+ logger = config.setup_logging("007-dependency-scanner")
31
+
32
+
33
+ # ---------------------------------------------------------------------------
34
+ # Dependency file patterns
35
+ # ---------------------------------------------------------------------------
36
+
37
+ # Python dependency files
38
+ PYTHON_DEP_FILES = {
39
+ "requirements.txt",
40
+ "requirements-dev.txt",
41
+ "requirements_dev.txt",
42
+ "requirements-test.txt",
43
+ "requirements_test.txt",
44
+ "requirements-prod.txt",
45
+ "requirements_prod.txt",
46
+ "setup.py",
47
+ "setup.cfg",
48
+ "pyproject.toml",
49
+ "Pipfile",
50
+ "Pipfile.lock",
51
+ }
52
+
53
+ # Node.js dependency files
54
+ NODE_DEP_FILES = {
55
+ "package.json",
56
+ "package-lock.json",
57
+ "yarn.lock",
58
+ }
59
+
60
+ # Docker files (matched by prefix)
61
+ DOCKER_PREFIXES = ("Dockerfile", "dockerfile", "docker-compose")
62
+
63
+ # All dependency file names (for fast lookup)
64
+ ALL_DEP_FILES = PYTHON_DEP_FILES | NODE_DEP_FILES
65
+
66
+ # Regex to match requirements*.txt variants
67
+ _REQUIREMENTS_RE = re.compile(
68
+ r"""^requirements[-_]?\w*\.txt$""", re.IGNORECASE
69
+ )
70
+
71
+
72
+ # ---------------------------------------------------------------------------
73
+ # Python analysis patterns
74
+ # ---------------------------------------------------------------------------
75
+
76
+ # Pinned: package==1.2.3
77
+ # Hashed: package==1.2.3 --hash=sha256:abc...
78
+ # Loose: package>=1.0 package~=1.0 package!=1.0 package package<=2
79
+ # Comment: # this is a comment
80
+ # Options: -r other.txt --find-links -e . etc.
81
+
82
+ _PY_COMMENT_RE = re.compile(r"""^\s*#""")
83
+ _PY_OPTION_RE = re.compile(r"""^\s*-""")
84
+ _PY_BLANK_RE = re.compile(r"""^\s*$""")
85
+
86
+ # Matches: package==version or package[extras]==version
87
+ _PY_PINNED_RE = re.compile(
88
+ r"""^([A-Za-z0-9_][A-Za-z0-9._-]*)(?:\[.*?\])?\s*==\s*[\d]""",
89
+ )
90
+
91
+ # Matches any package line (not comment, not option, not blank)
92
+ _PY_PACKAGE_RE = re.compile(
93
+ r"""^([A-Za-z0-9_][A-Za-z0-9._-]*)""",
94
+ )
95
+
96
+ # Hash present
97
+ _PY_HASH_RE = re.compile(r"""--hash[=:]""")
98
+
99
+ # Known risky Python packages or patterns
100
+ _RISKY_PYTHON_PACKAGES = {
101
+ "pyyaml": "PyYAML with yaml.load() (without SafeLoader) enables arbitrary code execution",
102
+ "pickle": "pickle module allows arbitrary code execution during deserialization",
103
+ "shelve": "shelve uses pickle internally, same deserialization risks",
104
+ "marshal": "marshal module can execute arbitrary code during deserialization",
105
+ "dill": "dill extends pickle with same arbitrary code execution risks",
106
+ "cloudpickle": "cloudpickle extends pickle with same security concerns",
107
+ "jsonpickle": "jsonpickle can deserialize to arbitrary objects",
108
+ "pyinstaller": "PyInstaller bundles can hide malicious code in executables",
109
+ "subprocess32": "Deprecated subprocess replacement; use stdlib subprocess instead",
110
+ }
111
+
112
+
113
+ # ---------------------------------------------------------------------------
114
+ # Node.js analysis patterns
115
+ # ---------------------------------------------------------------------------
116
+
117
+ # Exact version: "1.2.3"
118
+ # Pinned prefix: "1.2.3" (no ^ or ~ or * or > or <)
119
+ # Loose: "^1.2.3" "~1.2.3" ">=1.0" "*" "latest"
120
+
121
+ _NODE_EXACT_VERSION_RE = re.compile(
122
+ r"""^\d+\.\d+\.\d+$"""
123
+ )
124
+
125
+ _NODE_LOOSE_INDICATORS = re.compile(
126
+ r"""^[\^~*><=]|latest|next|canary""", re.IGNORECASE
127
+ )
128
+
129
+ # Risky postinstall script patterns
130
+ _NODE_RISKY_SCRIPTS = re.compile(
131
+ r"""(?:curl|wget|fetch|http|eval|exec|child_process|\.sh\b|powershell)""",
132
+ re.IGNORECASE,
133
+ )
134
+
135
+
136
+ # ---------------------------------------------------------------------------
137
+ # Dockerfile analysis patterns
138
+ # ---------------------------------------------------------------------------
139
+
140
+ _DOCKER_FROM_RE = re.compile(
141
+ r"""^\s*FROM\s+(\S+)""", re.IGNORECASE
142
+ )
143
+
144
+ _DOCKER_FROM_LATEST_RE = re.compile(
145
+ r"""(?::latest\s*$|^[^:]+\s*$)"""
146
+ )
147
+
148
+ _DOCKER_USER_RE = re.compile(
149
+ r"""^\s*USER\s+""", re.IGNORECASE
150
+ )
151
+
152
+ _DOCKER_COPY_SENSITIVE_RE = re.compile(
153
+ r"""^\s*(?:COPY|ADD)\s+.*?(?:\.env|\.key|\.pem|\.p12|\.pfx|id_rsa|id_ed25519|\.secret)""",
154
+ re.IGNORECASE,
155
+ )
156
+
157
+ _DOCKER_CURL_PIPE_RE = re.compile(
158
+ r"""(?:curl|wget)\s+[^|]*\|\s*(?:bash|sh|zsh|python|perl|ruby|node)""",
159
+ re.IGNORECASE,
160
+ )
161
+
162
+ # Known trusted base images (prefixes)
163
+ _DOCKER_TRUSTED_BASES = {
164
+ "python", "node", "golang", "ruby", "openjdk", "amazoncorretto",
165
+ "alpine", "ubuntu", "debian", "centos", "fedora", "archlinux",
166
+ "nginx", "httpd", "redis", "postgres", "mysql", "mongo", "memcached",
167
+ "mcr.microsoft.com/", "gcr.io/", "ghcr.io/", "docker.io/library/",
168
+ "registry.access.redhat.com/",
169
+ }
170
+
171
+
172
+ # ---------------------------------------------------------------------------
173
+ # Finding builder
174
+ # ---------------------------------------------------------------------------
175
+
176
+ def _make_finding(
177
+ file: str,
178
+ line: int,
179
+ severity: str,
180
+ description: str,
181
+ recommendation: str,
182
+ pattern: str = "dependency",
183
+ ) -> dict:
184
+ """Create a standardized finding dict.
185
+
186
+ Args:
187
+ file: Absolute path to the dependency file.
188
+ line: Line number where the issue was found (1-based, 0 if N/A).
189
+ severity: CRITICAL, HIGH, MEDIUM, or LOW.
190
+ description: Human-readable description of the issue.
191
+ recommendation: Actionable fix suggestion.
192
+ pattern: Finding sub-type for aggregation.
193
+
194
+ Returns:
195
+ Finding dict compatible with other 007 scanners.
196
+ """
197
+ return {
198
+ "type": "supply_chain",
199
+ "pattern": pattern,
200
+ "severity": severity,
201
+ "file": file,
202
+ "line": line,
203
+ "description": description,
204
+ "recommendation": recommendation,
205
+ }
206
+
207
+
208
+ # ---------------------------------------------------------------------------
209
+ # Python dependency analysis
210
+ # ---------------------------------------------------------------------------
211
+
212
+ def analyze_requirements_txt(filepath: Path, verbose: bool = False) -> dict:
213
+ """Analyze a Python requirements.txt file.
214
+
215
+ Returns:
216
+ Dict with keys: deps_total, deps_pinned, deps_hashed,
217
+ deps_unpinned, findings.
218
+ """
219
+ findings: list[dict] = []
220
+ file_str = str(filepath)
221
+ deps_total = 0
222
+ deps_pinned = 0
223
+ deps_hashed = 0
224
+ deps_unpinned: list[str] = []
225
+
226
+ try:
227
+ text = filepath.read_text(encoding="utf-8", errors="replace")
228
+ except OSError as exc:
229
+ if verbose:
230
+ logger.debug("Cannot read %s: %s", filepath, exc)
231
+ return {
232
+ "deps_total": 0, "deps_pinned": 0, "deps_hashed": 0,
233
+ "deps_unpinned": [], "findings": findings,
234
+ }
235
+
236
+ for line_num, raw_line in enumerate(text.splitlines(), start=1):
237
+ line = raw_line.strip()
238
+
239
+ # Skip comments, options, blanks
240
+ if _PY_COMMENT_RE.match(line) or _PY_OPTION_RE.match(line) or _PY_BLANK_RE.match(line):
241
+ continue
242
+
243
+ # Remove inline comments
244
+ line_no_comment = re.sub(r"""\s+#.*$""", "", line)
245
+
246
+ pkg_match = _PY_PACKAGE_RE.match(line_no_comment)
247
+ if not pkg_match:
248
+ continue
249
+
250
+ pkg_name = pkg_match.group(1).lower()
251
+ deps_total += 1
252
+
253
+ # Check pinning
254
+ is_pinned = bool(_PY_PINNED_RE.match(line_no_comment))
255
+ has_hash = bool(_PY_HASH_RE.search(raw_line))
256
+
257
+ if is_pinned:
258
+ deps_pinned += 1
259
+ else:
260
+ deps_unpinned.append(pkg_name)
261
+ findings.append(_make_finding(
262
+ file=file_str,
263
+ line=line_num,
264
+ severity="HIGH",
265
+ description=f"Dependency '{pkg_name}' is not pinned to an exact version",
266
+ recommendation=f"Pin to exact version: {pkg_name}==<version>",
267
+ pattern="unpinned_dependency",
268
+ ))
269
+
270
+ if has_hash:
271
+ deps_hashed += 1
272
+
273
+ # Check risky packages
274
+ if pkg_name in _RISKY_PYTHON_PACKAGES:
275
+ findings.append(_make_finding(
276
+ file=file_str,
277
+ line=line_num,
278
+ severity="MEDIUM",
279
+ description=f"Risky package '{pkg_name}': {_RISKY_PYTHON_PACKAGES[pkg_name]}",
280
+ recommendation=f"Review usage of '{pkg_name}' and ensure safe configuration",
281
+ pattern="risky_package",
282
+ ))
283
+
284
+ # Flag if no hashes used at all and there are deps
285
+ if deps_total > 0 and deps_hashed == 0:
286
+ findings.append(_make_finding(
287
+ file=file_str,
288
+ line=0,
289
+ severity="LOW",
290
+ description="No hash verification used for any dependency",
291
+ recommendation="Consider using --hash for supply chain integrity (pip install --require-hashes)",
292
+ pattern="no_hash_verification",
293
+ ))
294
+
295
+ # Complexity warning
296
+ if deps_total > 100:
297
+ findings.append(_make_finding(
298
+ file=file_str,
299
+ line=0,
300
+ severity="LOW",
301
+ description=f"High dependency count ({deps_total}). Large dependency trees increase supply chain risk",
302
+ recommendation="Audit dependencies and remove unused packages. Consider dependency-free alternatives",
303
+ pattern="high_dependency_count",
304
+ ))
305
+
306
+ return {
307
+ "deps_total": deps_total,
308
+ "deps_pinned": deps_pinned,
309
+ "deps_hashed": deps_hashed,
310
+ "deps_unpinned": deps_unpinned,
311
+ "findings": findings,
312
+ }
313
+
314
+
315
+ def analyze_pyproject_toml(filepath: Path, verbose: bool = False) -> dict:
316
+ """Analyze a pyproject.toml for dependency information.
317
+
318
+ Performs best-effort parsing without a TOML library (stdlib only).
319
+
320
+ Returns:
321
+ Dict with keys: deps_total, deps_pinned, deps_unpinned, findings.
322
+ """
323
+ findings: list[dict] = []
324
+ file_str = str(filepath)
325
+ deps_total = 0
326
+ deps_pinned = 0
327
+ deps_unpinned: list[str] = []
328
+
329
+ try:
330
+ text = filepath.read_text(encoding="utf-8", errors="replace")
331
+ except OSError as exc:
332
+ if verbose:
333
+ logger.debug("Cannot read %s: %s", filepath, exc)
334
+ return {
335
+ "deps_total": 0, "deps_pinned": 0,
336
+ "deps_unpinned": [], "findings": findings,
337
+ }
338
+
339
+ # Best-effort: look for dependency lines in [project.dependencies] or
340
+ # [tool.poetry.dependencies] sections
341
+ in_deps_section = False
342
+ dep_line_re = re.compile(r"""^\s*['"]([A-Za-z0-9_][A-Za-z0-9._-]*)([^'"]*)['\"]""")
343
+ section_re = re.compile(r"""^\s*\[""")
344
+
345
+ for line_num, raw_line in enumerate(text.splitlines(), start=1):
346
+ line = raw_line.strip()
347
+
348
+ # Track sections
349
+ if re.match(r"""^\s*\[(?:project\.)?dependencies""", line, re.IGNORECASE):
350
+ in_deps_section = True
351
+ continue
352
+ if re.match(r"""^\s*\[tool\.poetry\.dependencies""", line, re.IGNORECASE):
353
+ in_deps_section = True
354
+ continue
355
+ if section_re.match(line) and in_deps_section:
356
+ in_deps_section = False
357
+ continue
358
+
359
+ if not in_deps_section:
360
+ continue
361
+
362
+ m = dep_line_re.match(line)
363
+ if not m:
364
+ # Also check for key = "version" style (poetry)
365
+ poetry_re = re.match(
366
+ r"""^([A-Za-z0-9_][A-Za-z0-9._-]*)\s*=\s*['"]([^'"]*)['\"]""",
367
+ line,
368
+ )
369
+ if poetry_re:
370
+ pkg_name = poetry_re.group(1).lower()
371
+ version_spec = poetry_re.group(2)
372
+ if pkg_name in ("python",):
373
+ continue
374
+ deps_total += 1
375
+ if re.match(r"""^\d+\.\d+""", version_spec):
376
+ deps_pinned += 1
377
+ else:
378
+ deps_unpinned.append(pkg_name)
379
+ findings.append(_make_finding(
380
+ file=file_str,
381
+ line=line_num,
382
+ severity="MEDIUM",
383
+ description=f"Dependency '{pkg_name}' version spec '{version_spec}' is not an exact pin",
384
+ recommendation=f"Pin to exact version: {pkg_name} = \"<exact_version>\"",
385
+ pattern="unpinned_dependency",
386
+ ))
387
+ continue
388
+
389
+ pkg_name = m.group(1).lower()
390
+ version_spec = m.group(2).strip()
391
+ deps_total += 1
392
+
393
+ if "==" in version_spec:
394
+ deps_pinned += 1
395
+ else:
396
+ deps_unpinned.append(pkg_name)
397
+ if version_spec:
398
+ findings.append(_make_finding(
399
+ file=file_str,
400
+ line=line_num,
401
+ severity="MEDIUM",
402
+ description=f"Dependency '{pkg_name}' has loose version spec '{version_spec}'",
403
+ recommendation=f"Pin to exact version with ==",
404
+ pattern="unpinned_dependency",
405
+ ))
406
+ else:
407
+ findings.append(_make_finding(
408
+ file=file_str,
409
+ line=line_num,
410
+ severity="HIGH",
411
+ description=f"Dependency '{pkg_name}' has no version constraint",
412
+ recommendation=f"Add exact version pin: {pkg_name}==<version>",
413
+ pattern="unpinned_dependency",
414
+ ))
415
+
416
+ return {
417
+ "deps_total": deps_total,
418
+ "deps_pinned": deps_pinned,
419
+ "deps_unpinned": deps_unpinned,
420
+ "findings": findings,
421
+ }
422
+
423
+
424
+ def analyze_pipfile(filepath: Path, verbose: bool = False) -> dict:
425
+ """Analyze a Pipfile for dependency information (best-effort INI-like parsing).
426
+
427
+ Returns:
428
+ Dict with keys: deps_total, deps_pinned, deps_unpinned, findings.
429
+ """
430
+ findings: list[dict] = []
431
+ file_str = str(filepath)
432
+ deps_total = 0
433
+ deps_pinned = 0
434
+ deps_unpinned: list[str] = []
435
+
436
+ try:
437
+ text = filepath.read_text(encoding="utf-8", errors="replace")
438
+ except OSError as exc:
439
+ if verbose:
440
+ logger.debug("Cannot read %s: %s", filepath, exc)
441
+ return {
442
+ "deps_total": 0, "deps_pinned": 0,
443
+ "deps_unpinned": [], "findings": findings,
444
+ }
445
+
446
+ in_deps = False
447
+ section_re = re.compile(r"""^\s*\[""")
448
+
449
+ for line_num, raw_line in enumerate(text.splitlines(), start=1):
450
+ line = raw_line.strip()
451
+
452
+ if re.match(r"""^\[(?:packages|dev-packages)\]""", line, re.IGNORECASE):
453
+ in_deps = True
454
+ continue
455
+ if section_re.match(line) and in_deps:
456
+ in_deps = False
457
+ continue
458
+
459
+ if not in_deps or not line or line.startswith("#"):
460
+ continue
461
+
462
+ # package = "version_spec" or package = {version = "...", ...}
463
+ pkg_match = re.match(
464
+ r"""^([A-Za-z0-9_][A-Za-z0-9._-]*)\s*=\s*['"]([^'"]*)['\"]""",
465
+ line,
466
+ )
467
+ if pkg_match:
468
+ pkg_name = pkg_match.group(1).lower()
469
+ version_spec = pkg_match.group(2)
470
+ deps_total += 1
471
+
472
+ if version_spec == "*":
473
+ deps_unpinned.append(pkg_name)
474
+ findings.append(_make_finding(
475
+ file=file_str,
476
+ line=line_num,
477
+ severity="HIGH",
478
+ description=f"Dependency '{pkg_name}' uses wildcard version '*'",
479
+ recommendation=f"Pin to exact version: {pkg_name} = \"==<version>\"",
480
+ pattern="unpinned_dependency",
481
+ ))
482
+ elif version_spec.startswith("=="):
483
+ deps_pinned += 1
484
+ else:
485
+ deps_unpinned.append(pkg_name)
486
+ findings.append(_make_finding(
487
+ file=file_str,
488
+ line=line_num,
489
+ severity="MEDIUM",
490
+ description=f"Dependency '{pkg_name}' version '{version_spec}' is not exact",
491
+ recommendation=f"Pin to exact version with ==",
492
+ pattern="unpinned_dependency",
493
+ ))
494
+ continue
495
+
496
+ # Dict-style: package = {version = "...", extras = [...]}
497
+ dict_match = re.match(
498
+ r"""^([A-Za-z0-9_][A-Za-z0-9._-]*)\s*=\s*\{""",
499
+ line,
500
+ )
501
+ if dict_match:
502
+ pkg_name = dict_match.group(1).lower()
503
+ deps_total += 1
504
+ if '==' in line:
505
+ deps_pinned += 1
506
+ else:
507
+ deps_unpinned.append(pkg_name)
508
+ findings.append(_make_finding(
509
+ file=file_str,
510
+ line=line_num,
511
+ severity="MEDIUM",
512
+ description=f"Dependency '{pkg_name}' may not have exact version pin",
513
+ recommendation="Pin to exact version with ==",
514
+ pattern="unpinned_dependency",
515
+ ))
516
+
517
+ return {
518
+ "deps_total": deps_total,
519
+ "deps_pinned": deps_pinned,
520
+ "deps_unpinned": deps_unpinned,
521
+ "findings": findings,
522
+ }
523
+
524
+
525
+ # ---------------------------------------------------------------------------
526
+ # Node.js dependency analysis
527
+ # ---------------------------------------------------------------------------
528
+
529
+ def analyze_package_json(filepath: Path, verbose: bool = False) -> dict:
530
+ """Analyze a package.json for dependency security.
531
+
532
+ Returns:
533
+ Dict with keys: deps_total, deps_pinned, deps_unpinned,
534
+ dev_deps_total, findings.
535
+ """
536
+ findings: list[dict] = []
537
+ file_str = str(filepath)
538
+ deps_total = 0
539
+ deps_pinned = 0
540
+ deps_unpinned: list[str] = []
541
+ dev_deps_total = 0
542
+
543
+ try:
544
+ text = filepath.read_text(encoding="utf-8", errors="replace")
545
+ except OSError as exc:
546
+ if verbose:
547
+ logger.debug("Cannot read %s: %s", filepath, exc)
548
+ return {
549
+ "deps_total": 0, "deps_pinned": 0, "deps_unpinned": [],
550
+ "dev_deps_total": 0, "findings": findings,
551
+ }
552
+
553
+ try:
554
+ data = json.loads(text)
555
+ except json.JSONDecodeError as exc:
556
+ findings.append(_make_finding(
557
+ file=file_str,
558
+ line=0,
559
+ severity="MEDIUM",
560
+ description=f"Invalid JSON in package.json: {exc}",
561
+ recommendation="Fix JSON syntax errors in package.json",
562
+ pattern="invalid_manifest",
563
+ ))
564
+ return {
565
+ "deps_total": 0, "deps_pinned": 0, "deps_unpinned": [],
566
+ "dev_deps_total": 0, "findings": findings,
567
+ }
568
+
569
+ if not isinstance(data, dict):
570
+ return {
571
+ "deps_total": 0, "deps_pinned": 0, "deps_unpinned": [],
572
+ "dev_deps_total": 0, "findings": findings,
573
+ }
574
+
575
+ # Helper to find the approximate line number of a key in JSON text
576
+ def _find_line(key: str, section: str = "") -> int:
577
+ """Best-effort line number lookup for a key in the file text."""
578
+ search_term = f'"{key}"'
579
+ for i, file_line in enumerate(text.splitlines(), start=1):
580
+ if search_term in file_line:
581
+ return i
582
+ return 0
583
+
584
+ # Analyze dependencies
585
+ for section_name in ("dependencies", "devDependencies"):
586
+ deps = data.get(section_name, {})
587
+ if not isinstance(deps, dict):
588
+ continue
589
+
590
+ is_dev = section_name == "devDependencies"
591
+
592
+ for pkg_name, version_spec in deps.items():
593
+ if not isinstance(version_spec, str):
594
+ continue
595
+
596
+ if is_dev:
597
+ dev_deps_total += 1
598
+ deps_total += 1
599
+ line_num = _find_line(pkg_name, section_name)
600
+
601
+ if _NODE_EXACT_VERSION_RE.match(version_spec):
602
+ deps_pinned += 1
603
+ elif _NODE_LOOSE_INDICATORS.match(version_spec):
604
+ deps_unpinned.append(pkg_name)
605
+ severity = "MEDIUM" if is_dev else "HIGH"
606
+ findings.append(_make_finding(
607
+ file=file_str,
608
+ line=line_num,
609
+ severity=severity,
610
+ description=f"{'Dev d' if is_dev else 'D'}ependency '{pkg_name}' uses loose version '{version_spec}'",
611
+ recommendation=f"Pin to exact version: \"{pkg_name}\": \"{version_spec.lstrip('^~')}\"",
612
+ pattern="unpinned_dependency",
613
+ ))
614
+ else:
615
+ # URLs, git refs, file paths, etc. -- flag as non-standard
616
+ deps_unpinned.append(pkg_name)
617
+ findings.append(_make_finding(
618
+ file=file_str,
619
+ line=line_num,
620
+ severity="MEDIUM",
621
+ description=f"Dependency '{pkg_name}' uses non-standard version spec: '{version_spec}'",
622
+ recommendation="Consider pinning to an exact registry version",
623
+ pattern="non_standard_version",
624
+ ))
625
+
626
+ # Check scripts for risky patterns
627
+ scripts = data.get("scripts", {})
628
+ if isinstance(scripts, dict):
629
+ for script_name, script_cmd in scripts.items():
630
+ if not isinstance(script_cmd, str):
631
+ continue
632
+
633
+ if script_name in ("postinstall", "preinstall", "install") and _NODE_RISKY_SCRIPTS.search(script_cmd):
634
+ line_num = _find_line(script_name)
635
+ findings.append(_make_finding(
636
+ file=file_str,
637
+ line=line_num,
638
+ severity="CRITICAL",
639
+ description=f"Risky '{script_name}' lifecycle script: may execute arbitrary code",
640
+ recommendation=f"Review and audit the '{script_name}' script: {script_cmd[:120]}",
641
+ pattern="risky_lifecycle_script",
642
+ ))
643
+
644
+ # Complexity warning
645
+ if deps_total > 100:
646
+ findings.append(_make_finding(
647
+ file=file_str,
648
+ line=0,
649
+ severity="LOW",
650
+ description=f"High dependency count ({deps_total}). Large dependency trees increase supply chain risk",
651
+ recommendation="Audit dependencies and remove unused packages",
652
+ pattern="high_dependency_count",
653
+ ))
654
+
655
+ # Check if devDependencies are mixed into dependencies
656
+ prod_deps = data.get("dependencies", {})
657
+ dev_deps = data.get("devDependencies", {})
658
+ if isinstance(prod_deps, dict) and isinstance(dev_deps, dict):
659
+ _DEV_ONLY_PACKAGES = {
660
+ "jest", "mocha", "chai", "sinon", "nyc", "istanbul",
661
+ "eslint", "prettier", "nodemon", "ts-node",
662
+ "webpack-dev-server", "storybook", "@storybook/react",
663
+ }
664
+ for pkg in prod_deps:
665
+ if pkg.lower() in _DEV_ONLY_PACKAGES:
666
+ line_num = _find_line(pkg)
667
+ findings.append(_make_finding(
668
+ file=file_str,
669
+ line=line_num,
670
+ severity="LOW",
671
+ description=f"'{pkg}' is typically a devDependency but listed in dependencies",
672
+ recommendation=f"Move '{pkg}' to devDependencies to reduce production bundle size",
673
+ pattern="misplaced_dependency",
674
+ ))
675
+
676
+ return {
677
+ "deps_total": deps_total,
678
+ "deps_pinned": deps_pinned,
679
+ "deps_unpinned": deps_unpinned,
680
+ "dev_deps_total": dev_deps_total,
681
+ "findings": findings,
682
+ }
683
+
684
+
685
+ # ---------------------------------------------------------------------------
686
+ # Dockerfile analysis
687
+ # ---------------------------------------------------------------------------
688
+
689
+ def analyze_dockerfile(filepath: Path, verbose: bool = False) -> dict:
690
+ """Analyze a Dockerfile for supply chain security issues.
691
+
692
+ Returns:
693
+ Dict with keys: base_images, findings.
694
+ """
695
+ findings: list[dict] = []
696
+ file_str = str(filepath)
697
+ base_images: list[str] = []
698
+ has_user_directive = False
699
+
700
+ try:
701
+ text = filepath.read_text(encoding="utf-8", errors="replace")
702
+ except OSError as exc:
703
+ if verbose:
704
+ logger.debug("Cannot read %s: %s", filepath, exc)
705
+ return {"base_images": [], "findings": findings}
706
+
707
+ lines = text.splitlines()
708
+
709
+ for line_num, raw_line in enumerate(lines, start=1):
710
+ line = raw_line.strip()
711
+
712
+ # Skip comments and blanks
713
+ if not line or line.startswith("#"):
714
+ continue
715
+
716
+ # FROM analysis
717
+ from_match = _DOCKER_FROM_RE.match(line)
718
+ if from_match:
719
+ image = from_match.group(1)
720
+ base_images.append(image)
721
+
722
+ # Check for :latest or no tag
723
+ image_lower = image.lower()
724
+ # Strip alias (AS builder)
725
+ image_core = image_lower.split()[0] if " " in image_lower else image_lower
726
+
727
+ if image_core == "scratch":
728
+ # scratch is fine
729
+ pass
730
+ elif ":" not in image_core or image_core.endswith(":latest"):
731
+ findings.append(_make_finding(
732
+ file=file_str,
733
+ line=line_num,
734
+ severity="HIGH",
735
+ description=f"Base image '{image_core}' uses ':latest' or no version tag",
736
+ recommendation="Pin base image to a specific version tag (e.g., python:3.12-slim)",
737
+ pattern="unpinned_base_image",
738
+ ))
739
+ elif "@sha256:" in image_core:
740
+ # Digest pinning is the best practice -- no finding
741
+ pass
742
+
743
+ # Check for untrusted base images
744
+ is_trusted = any(
745
+ image_core.startswith(prefix) or image_core.startswith(f"docker.io/library/{prefix}")
746
+ for prefix in _DOCKER_TRUSTED_BASES
747
+ )
748
+ if not is_trusted and image_core != "scratch":
749
+ findings.append(_make_finding(
750
+ file=file_str,
751
+ line=line_num,
752
+ severity="MEDIUM",
753
+ description=f"Base image '{image_core}' is from an unverified source",
754
+ recommendation="Use official images from Docker Hub or trusted registries",
755
+ pattern="untrusted_base_image",
756
+ ))
757
+
758
+ # USER directive
759
+ if _DOCKER_USER_RE.match(line):
760
+ has_user_directive = True
761
+
762
+ # COPY/ADD sensitive files
763
+ if _DOCKER_COPY_SENSITIVE_RE.match(line):
764
+ findings.append(_make_finding(
765
+ file=file_str,
766
+ line=line_num,
767
+ severity="CRITICAL",
768
+ description="COPY/ADD of potentially sensitive file (keys, .env, certificates)",
769
+ recommendation="Use Docker secrets or build args instead of copying sensitive files into images",
770
+ pattern="sensitive_file_in_image",
771
+ ))
772
+
773
+ # curl | bash pattern
774
+ if _DOCKER_CURL_PIPE_RE.search(line):
775
+ findings.append(_make_finding(
776
+ file=file_str,
777
+ line=line_num,
778
+ severity="CRITICAL",
779
+ description="Pipe-to-shell pattern detected (curl|bash). Remote code execution risk",
780
+ recommendation="Download scripts first, verify checksum, then execute",
781
+ pattern="curl_pipe_bash",
782
+ ))
783
+
784
+ # Check for running as root
785
+ if base_images and not has_user_directive:
786
+ findings.append(_make_finding(
787
+ file=file_str,
788
+ line=0,
789
+ severity="MEDIUM",
790
+ description="Dockerfile has no USER directive -- container runs as root by default",
791
+ recommendation="Add 'USER nonroot' or 'USER 1000' before the final CMD/ENTRYPOINT",
792
+ pattern="running_as_root",
793
+ ))
794
+
795
+ return {"base_images": base_images, "findings": findings}
796
+
797
+
798
+ def analyze_docker_compose(filepath: Path, verbose: bool = False) -> dict:
799
+ """Analyze a docker-compose.yml for supply chain issues (best-effort YAML parsing).
800
+
801
+ Returns:
802
+ Dict with keys: services, findings.
803
+ """
804
+ findings: list[dict] = []
805
+ file_str = str(filepath)
806
+ services: list[str] = []
807
+
808
+ try:
809
+ text = filepath.read_text(encoding="utf-8", errors="replace")
810
+ except OSError as exc:
811
+ if verbose:
812
+ logger.debug("Cannot read %s: %s", filepath, exc)
813
+ return {"services": [], "findings": findings}
814
+
815
+ # Best-effort: look for image: lines
816
+ for line_num, raw_line in enumerate(text.splitlines(), start=1):
817
+ line = raw_line.strip()
818
+
819
+ image_match = re.match(r"""^image:\s*['"]?(\S+?)['"]?\s*$""", line)
820
+ if image_match:
821
+ image = image_match.group(1).lower()
822
+ services.append(image)
823
+
824
+ if ":" not in image or image.endswith(":latest"):
825
+ findings.append(_make_finding(
826
+ file=file_str,
827
+ line=line_num,
828
+ severity="HIGH",
829
+ description=f"Service image '{image}' uses ':latest' or no version tag",
830
+ recommendation="Pin image to a specific version tag",
831
+ pattern="unpinned_base_image",
832
+ ))
833
+
834
+ # Check for .env file mounts
835
+ if re.match(r"""^-?\s*\.env""", line) or "env_file" in line:
836
+ # This is expected usage, just informational
837
+ pass
838
+
839
+ return {"services": services, "findings": findings}
840
+
841
+
842
+ # ---------------------------------------------------------------------------
843
+ # File discovery
844
+ # ---------------------------------------------------------------------------
845
+
846
+ def discover_dependency_files(target: Path) -> list[Path]:
847
+ """Recursively find all dependency files under the target directory.
848
+
849
+ Respects SKIP_DIRECTORIES from config.
850
+ """
851
+ found: list[Path] = []
852
+
853
+ for root, dirs, filenames in os.walk(target):
854
+ dirs[:] = [d for d in dirs if d not in config.SKIP_DIRECTORIES]
855
+
856
+ for fname in filenames:
857
+ fpath = Path(root) / fname
858
+ fname_lower = fname.lower()
859
+
860
+ # Exact name matches
861
+ if fname in ALL_DEP_FILES:
862
+ found.append(fpath)
863
+ continue
864
+
865
+ # requirements*.txt variants
866
+ if _REQUIREMENTS_RE.match(fname):
867
+ found.append(fpath)
868
+ continue
869
+
870
+ # Docker files (prefix match)
871
+ if any(fname_lower.startswith(prefix.lower()) for prefix in DOCKER_PREFIXES):
872
+ found.append(fpath)
873
+ continue
874
+
875
+ return found
876
+
877
+
878
+ # ---------------------------------------------------------------------------
879
+ # Core scan logic
880
+ # ---------------------------------------------------------------------------
881
+
882
+ def scan_dependency_file(filepath: Path, verbose: bool = False) -> dict:
883
+ """Route a dependency file to its appropriate analyzer.
884
+
885
+ Returns:
886
+ Analysis result dict including 'findings' key.
887
+ """
888
+ fname = filepath.name.lower()
889
+
890
+ # Python: requirements*.txt
891
+ if _REQUIREMENTS_RE.match(filepath.name):
892
+ return analyze_requirements_txt(filepath, verbose=verbose)
893
+
894
+ # Python: pyproject.toml
895
+ if fname == "pyproject.toml":
896
+ return analyze_pyproject_toml(filepath, verbose=verbose)
897
+
898
+ # Python: Pipfile
899
+ if fname == "pipfile":
900
+ return analyze_pipfile(filepath, verbose=verbose)
901
+
902
+ # Python: Pipfile.lock, setup.py, setup.cfg -- detect but minimal analysis
903
+ if fname in ("pipfile.lock", "setup.py", "setup.cfg"):
904
+ # Just count as a detected dep file with no deep analysis for now
905
+ return {"deps_total": 0, "deps_pinned": 0, "deps_unpinned": [], "findings": []}
906
+
907
+ # Node.js: package.json
908
+ if fname == "package.json":
909
+ return analyze_package_json(filepath, verbose=verbose)
910
+
911
+ # Node.js: package-lock.json, yarn.lock -- lockfiles are generally good
912
+ if fname in ("package-lock.json", "yarn.lock"):
913
+ return {"deps_total": 0, "deps_pinned": 0, "deps_unpinned": [], "findings": []}
914
+
915
+ # Docker: Dockerfile*
916
+ if fname.startswith("dockerfile"):
917
+ return analyze_dockerfile(filepath, verbose=verbose)
918
+
919
+ # Docker: docker-compose*
920
+ if fname.startswith("docker-compose"):
921
+ return analyze_docker_compose(filepath, verbose=verbose)
922
+
923
+ return {"findings": []}
924
+
925
+
926
+ # ---------------------------------------------------------------------------
927
+ # Scoring
928
+ # ---------------------------------------------------------------------------
929
+
930
+ SCORE_DEDUCTIONS = {
931
+ "CRITICAL": 15,
932
+ "HIGH": 7,
933
+ "MEDIUM": 3,
934
+ "LOW": 1,
935
+ "INFO": 0,
936
+ }
937
+
938
+
939
+ def compute_supply_chain_score(findings: list[dict], pinning_pct: float) -> int:
940
+ """Compute the supply chain security score (0-100).
941
+
942
+ Combines finding-based deductions with overall pinning coverage.
943
+ A project with 0% pinning starts at 50 max. A project with 100% pinning
944
+ and no findings scores 100.
945
+
946
+ Args:
947
+ findings: All findings across all dependency files.
948
+ pinning_pct: Percentage of dependencies that are pinned (0.0-100.0).
949
+
950
+ Returns:
951
+ Integer score between 0 and 100.
952
+ """
953
+ # Base score from pinning coverage (contributes up to 50 points)
954
+ pinning_score = pinning_pct * 0.5
955
+
956
+ # Finding-based deductions from the remaining 50 points
957
+ finding_base = 50.0
958
+ for f in findings:
959
+ deduction = SCORE_DEDUCTIONS.get(f.get("severity", "INFO"), 0)
960
+ finding_base -= deduction
961
+ finding_score = max(0.0, finding_base)
962
+
963
+ total = pinning_score + finding_score
964
+ return max(0, min(100, round(total)))
965
+
966
+
967
+ # ---------------------------------------------------------------------------
968
+ # Aggregation helpers
969
+ # ---------------------------------------------------------------------------
970
+
971
+ def aggregate_by_severity(findings: list[dict]) -> dict[str, int]:
972
+ """Count findings per severity level."""
973
+ counts: dict[str, int] = {sev: 0 for sev in config.SEVERITY}
974
+ for f in findings:
975
+ sev = f.get("severity", "INFO")
976
+ if sev in counts:
977
+ counts[sev] += 1
978
+ return counts
979
+
980
+
981
+ def aggregate_by_pattern(findings: list[dict]) -> dict[str, int]:
982
+ """Count findings per pattern type."""
983
+ counts: dict[str, int] = {}
984
+ for f in findings:
985
+ pattern = f.get("pattern", "unknown")
986
+ counts[pattern] = counts.get(pattern, 0) + 1
987
+ return counts
988
+
989
+
990
+ # ---------------------------------------------------------------------------
991
+ # Report formatters
992
+ # ---------------------------------------------------------------------------
993
+
994
+ def format_text_report(
995
+ target: str,
996
+ dep_files: list[str],
997
+ total_deps: int,
998
+ total_pinned: int,
999
+ pinning_pct: float,
1000
+ findings: list[dict],
1001
+ severity_counts: dict[str, int],
1002
+ pattern_counts: dict[str, int],
1003
+ score: int,
1004
+ verdict: dict,
1005
+ elapsed: float,
1006
+ ) -> str:
1007
+ """Build a human-readable text report."""
1008
+ lines: list[str] = []
1009
+
1010
+ lines.append("=" * 72)
1011
+ lines.append(" 007 DEPENDENCY SCANNER -- SUPPLY CHAIN REPORT")
1012
+ lines.append("=" * 72)
1013
+ lines.append("")
1014
+
1015
+ # Metadata
1016
+ lines.append(f" Target: {target}")
1017
+ lines.append(f" Timestamp: {config.get_timestamp()}")
1018
+ lines.append(f" Duration: {elapsed:.2f}s")
1019
+ lines.append(f" Dep files found: {len(dep_files)}")
1020
+ lines.append(f" Total deps: {total_deps}")
1021
+ lines.append(f" Pinned deps: {total_pinned}")
1022
+ lines.append(f" Pinning coverage: {pinning_pct:.1f}%")
1023
+ lines.append(f" Total findings: {len(findings)}")
1024
+ lines.append("")
1025
+
1026
+ # Dependency files list
1027
+ if dep_files:
1028
+ lines.append("-" * 72)
1029
+ lines.append(" DEPENDENCY FILES DETECTED")
1030
+ lines.append("-" * 72)
1031
+ for df in sorted(dep_files):
1032
+ lines.append(f" {df}")
1033
+ lines.append("")
1034
+
1035
+ # Severity breakdown
1036
+ lines.append("-" * 72)
1037
+ lines.append(" FINDINGS BY SEVERITY")
1038
+ lines.append("-" * 72)
1039
+ for sev in ("CRITICAL", "HIGH", "MEDIUM", "LOW", "INFO"):
1040
+ count = severity_counts.get(sev, 0)
1041
+ bar = "#" * min(count, 40)
1042
+ lines.append(f" {sev:<10} {count:>5} {bar}")
1043
+ lines.append("")
1044
+
1045
+ # Pattern breakdown
1046
+ if pattern_counts:
1047
+ lines.append("-" * 72)
1048
+ lines.append(" FINDINGS BY TYPE")
1049
+ lines.append("-" * 72)
1050
+ sorted_patterns = sorted(pattern_counts.items(), key=lambda x: x[1], reverse=True)
1051
+ for pname, count in sorted_patterns[:20]:
1052
+ lines.append(f" {pname:<35} {count:>5}")
1053
+ lines.append("")
1054
+
1055
+ # Detail findings grouped by severity
1056
+ displayed = [f for f in findings if config.SEVERITY.get(f.get("severity", "INFO"), 0) >= config.SEVERITY["MEDIUM"]]
1057
+
1058
+ if displayed:
1059
+ by_severity: dict[str, list[dict]] = {}
1060
+ for f in displayed:
1061
+ sev = f.get("severity", "INFO")
1062
+ by_severity.setdefault(sev, []).append(f)
1063
+
1064
+ for sev in ("CRITICAL", "HIGH", "MEDIUM"):
1065
+ sev_findings = by_severity.get(sev, [])
1066
+ if not sev_findings:
1067
+ continue
1068
+
1069
+ lines.append("-" * 72)
1070
+ lines.append(f" [{sev}] FINDINGS ({len(sev_findings)})")
1071
+ lines.append("-" * 72)
1072
+
1073
+ by_file: dict[str, list[dict]] = {}
1074
+ for f in sev_findings:
1075
+ by_file.setdefault(f["file"], []).append(f)
1076
+
1077
+ for fpath, file_findings in sorted(by_file.items()):
1078
+ lines.append(f" {fpath}")
1079
+ for f in sorted(file_findings, key=lambda x: x.get("line", 0)):
1080
+ loc = f"L{f['line']}" if f.get("line") else " "
1081
+ lines.append(f" {loc:>6} {f['description']}")
1082
+ lines.append(f" -> {f['recommendation']}")
1083
+ lines.append("")
1084
+ else:
1085
+ lines.append(" No findings at MEDIUM severity or above.")
1086
+ lines.append("")
1087
+
1088
+ # Score and verdict
1089
+ lines.append("=" * 72)
1090
+ lines.append(f" SUPPLY CHAIN SCORE: {score} / 100")
1091
+ lines.append(f" VERDICT: {verdict['emoji']} {verdict['label']}")
1092
+ lines.append(f" {verdict['description']}")
1093
+ lines.append("=" * 72)
1094
+ lines.append("")
1095
+
1096
+ return "\n".join(lines)
1097
+
1098
+
1099
+ def build_json_report(
1100
+ target: str,
1101
+ dep_files: list[str],
1102
+ total_deps: int,
1103
+ total_pinned: int,
1104
+ pinning_pct: float,
1105
+ findings: list[dict],
1106
+ severity_counts: dict[str, int],
1107
+ pattern_counts: dict[str, int],
1108
+ score: int,
1109
+ verdict: dict,
1110
+ elapsed: float,
1111
+ ) -> dict:
1112
+ """Build a structured JSON-serializable report dict."""
1113
+ return {
1114
+ "scan": "dependency_scanner",
1115
+ "target": target,
1116
+ "timestamp": config.get_timestamp(),
1117
+ "duration_seconds": round(elapsed, 3),
1118
+ "dependency_files": dep_files,
1119
+ "total_dependencies": total_deps,
1120
+ "total_pinned": total_pinned,
1121
+ "pinning_coverage_pct": round(pinning_pct, 1),
1122
+ "total_findings": len(findings),
1123
+ "severity_counts": severity_counts,
1124
+ "pattern_counts": pattern_counts,
1125
+ "score": score,
1126
+ "verdict": {
1127
+ "label": verdict["label"],
1128
+ "description": verdict["description"],
1129
+ "emoji": verdict["emoji"],
1130
+ },
1131
+ "findings": findings,
1132
+ }
1133
+
1134
+
1135
+ # ---------------------------------------------------------------------------
1136
+ # Main entry point
1137
+ # ---------------------------------------------------------------------------
1138
+
1139
+ def run_scan(
1140
+ target_path: str,
1141
+ output_format: str = "text",
1142
+ verbose: bool = False,
1143
+ ) -> dict:
1144
+ """Execute the dependency scan and return the report dict.
1145
+
1146
+ Also prints the report to stdout in the requested format.
1147
+
1148
+ Args:
1149
+ target_path: Path to the directory to scan.
1150
+ output_format: 'text' or 'json'.
1151
+ verbose: Enable debug-level logging.
1152
+
1153
+ Returns:
1154
+ JSON-compatible report dict.
1155
+ """
1156
+ if verbose:
1157
+ logger.setLevel("DEBUG")
1158
+
1159
+ config.ensure_directories()
1160
+
1161
+ target = Path(target_path).resolve()
1162
+ if not target.exists():
1163
+ logger.error("Target path does not exist: %s", target)
1164
+ sys.exit(1)
1165
+ if not target.is_dir():
1166
+ logger.error("Target is not a directory: %s", target)
1167
+ sys.exit(1)
1168
+
1169
+ logger.info("Starting dependency scan of %s", target)
1170
+ start_time = time.time()
1171
+
1172
+ # Discover dependency files
1173
+ dep_file_paths = discover_dependency_files(target)
1174
+ dep_files = [str(p) for p in dep_file_paths]
1175
+ logger.info("Found %d dependency files", len(dep_files))
1176
+
1177
+ # Analyze each dependency file
1178
+ all_findings: list[dict] = []
1179
+ total_deps = 0
1180
+ total_pinned = 0
1181
+
1182
+ for fpath in dep_file_paths:
1183
+ if verbose:
1184
+ logger.debug("Analyzing: %s", fpath)
1185
+
1186
+ result = scan_dependency_file(fpath, verbose=verbose)
1187
+ all_findings.extend(result.get("findings", []))
1188
+ total_deps += result.get("deps_total", 0)
1189
+ total_pinned += result.get("deps_pinned", 0)
1190
+
1191
+ # Truncate findings if over limit
1192
+ max_report = config.LIMITS["max_report_findings"]
1193
+ if len(all_findings) > max_report:
1194
+ logger.warning("Truncating findings from %d to %d", len(all_findings), max_report)
1195
+ all_findings = all_findings[:max_report]
1196
+
1197
+ elapsed = time.time() - start_time
1198
+
1199
+ # Calculate pinning percentage
1200
+ pinning_pct = (total_pinned / total_deps * 100.0) if total_deps > 0 else 100.0
1201
+
1202
+ # Aggregation
1203
+ severity_counts = aggregate_by_severity(all_findings)
1204
+ pattern_counts = aggregate_by_pattern(all_findings)
1205
+ score = compute_supply_chain_score(all_findings, pinning_pct)
1206
+ verdict = config.get_verdict(score)
1207
+
1208
+ logger.info(
1209
+ "Dependency scan complete: %d files, %d deps, %d findings, "
1210
+ "pinning=%.1f%%, score=%d in %.2fs",
1211
+ len(dep_files), total_deps, len(all_findings),
1212
+ pinning_pct, score, elapsed,
1213
+ )
1214
+
1215
+ # Audit log
1216
+ config.log_audit_event(
1217
+ action="dependency_scan",
1218
+ target=str(target),
1219
+ result=f"score={score}, findings={len(all_findings)}, verdict={verdict['label']}",
1220
+ details={
1221
+ "dependency_files": len(dep_files),
1222
+ "total_dependencies": total_deps,
1223
+ "total_pinned": total_pinned,
1224
+ "pinning_coverage_pct": round(pinning_pct, 1),
1225
+ "severity_counts": severity_counts,
1226
+ "pattern_counts": pattern_counts,
1227
+ "duration_seconds": round(elapsed, 3),
1228
+ },
1229
+ )
1230
+
1231
+ # Build report
1232
+ report = build_json_report(
1233
+ target=str(target),
1234
+ dep_files=dep_files,
1235
+ total_deps=total_deps,
1236
+ total_pinned=total_pinned,
1237
+ pinning_pct=pinning_pct,
1238
+ findings=all_findings,
1239
+ severity_counts=severity_counts,
1240
+ pattern_counts=pattern_counts,
1241
+ score=score,
1242
+ verdict=verdict,
1243
+ elapsed=elapsed,
1244
+ )
1245
+
1246
+ # Output
1247
+ if output_format == "json":
1248
+ print(json.dumps(report, indent=2, ensure_ascii=False))
1249
+ else:
1250
+ print(format_text_report(
1251
+ target=str(target),
1252
+ dep_files=dep_files,
1253
+ total_deps=total_deps,
1254
+ total_pinned=total_pinned,
1255
+ pinning_pct=pinning_pct,
1256
+ findings=all_findings,
1257
+ severity_counts=severity_counts,
1258
+ pattern_counts=pattern_counts,
1259
+ score=score,
1260
+ verdict=verdict,
1261
+ elapsed=elapsed,
1262
+ ))
1263
+
1264
+ return report
1265
+
1266
+
1267
+ # ---------------------------------------------------------------------------
1268
+ # CLI
1269
+ # ---------------------------------------------------------------------------
1270
+
1271
+ if __name__ == "__main__":
1272
+ parser = argparse.ArgumentParser(
1273
+ description="007 Dependency Scanner -- Supply chain and dependency security analyzer.",
1274
+ epilog=(
1275
+ "Examples:\n"
1276
+ " python dependency_scanner.py --target ./my-project\n"
1277
+ " python dependency_scanner.py --target ./my-project --output json\n"
1278
+ " python dependency_scanner.py --target ./my-project --verbose"
1279
+ ),
1280
+ formatter_class=argparse.RawDescriptionHelpFormatter,
1281
+ )
1282
+ parser.add_argument(
1283
+ "--target",
1284
+ required=True,
1285
+ help="Path to the directory to scan (required).",
1286
+ )
1287
+ parser.add_argument(
1288
+ "--output",
1289
+ choices=["text", "json"],
1290
+ default="text",
1291
+ help="Output format: 'text' (default) or 'json'.",
1292
+ )
1293
+ parser.add_argument(
1294
+ "--verbose",
1295
+ action="store_true",
1296
+ default=False,
1297
+ help="Enable verbose/debug logging.",
1298
+ )
1299
+
1300
+ args = parser.parse_args()
1301
+ run_scan(
1302
+ target_path=args.target,
1303
+ output_format=args.output,
1304
+ verbose=args.verbose,
1305
+ )