bps-kit 1.0.1 → 1.0.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/package.json +1 -1
- package/templates/.agents/agents/backend-specialist.md +263 -0
- package/templates/.agents/agents/code-archaeologist.md +106 -0
- package/templates/.agents/agents/database-architect.md +226 -0
- package/templates/.agents/agents/debugger.md +225 -0
- package/templates/.agents/agents/devops-engineer.md +242 -0
- package/templates/.agents/agents/documentation-writer.md +104 -0
- package/templates/.agents/agents/explorer-agent.md +73 -0
- package/templates/.agents/agents/frontend-specialist.md +593 -0
- package/templates/.agents/agents/game-developer.md +162 -0
- package/templates/.agents/agents/mobile-developer.md +377 -0
- package/templates/.agents/agents/orchestrator.md +416 -0
- package/templates/.agents/agents/penetration-tester.md +188 -0
- package/templates/.agents/agents/performance-optimizer.md +187 -0
- package/templates/.agents/agents/product-manager.md +112 -0
- package/templates/.agents/agents/product-owner.md +95 -0
- package/templates/.agents/agents/project-planner.md +406 -0
- package/templates/.agents/agents/qa-automation-engineer.md +103 -0
- package/templates/.agents/agents/security-auditor.md +170 -0
- package/templates/.agents/agents/seo-specialist.md +111 -0
- package/templates/.agents/agents/test-engineer.md +158 -0
- package/templates/.agents/rules/GEMINI.md +219 -0
- package/templates/.agents/scripts/auto_preview.py +148 -0
- package/templates/.agents/scripts/checklist.py +217 -0
- package/templates/.agents/scripts/session_manager.py +120 -0
- package/templates/.agents/scripts/verify_all.py +327 -0
- package/templates/.agents/workflows/brainstorm.md +113 -0
- package/templates/.agents/workflows/create.md +59 -0
- package/templates/.agents/workflows/debug.md +103 -0
- package/templates/.agents/workflows/deploy.md +176 -0
- package/templates/.agents/workflows/enhance.md +63 -0
- package/templates/.agents/workflows/orchestrate.md +237 -0
- package/templates/.agents/workflows/plan.md +89 -0
- package/templates/.agents/workflows/preview.md +81 -0
- package/templates/.agents/workflows/setup-brain.md +39 -0
- package/templates/.agents/workflows/status.md +86 -0
- package/templates/.agents/workflows/test.md +144 -0
- package/templates/.agents/workflows/ui-ux-pro-max.md +296 -0
- package/templates/skills_normal/api-patterns/scripts/api_validator.py +211 -0
- package/templates/skills_normal/database-design/scripts/schema_validator.py +172 -0
- package/templates/skills_normal/frontend-design/scripts/accessibility_checker.py +183 -0
- package/templates/skills_normal/frontend-design/scripts/ux_audit.py +722 -0
- package/templates/skills_normal/git-pushing/scripts/smart_commit.sh +19 -0
- package/templates/skills_normal/lint-and-validate/scripts/lint_runner.py +184 -0
- package/templates/skills_normal/lint-and-validate/scripts/type_coverage.py +173 -0
- package/templates/skills_normal/performance-profiling/scripts/lighthouse_audit.py +76 -0
- package/templates/skills_normal/senior-fullstack/scripts/code_quality_analyzer.py +114 -0
- package/templates/skills_normal/senior-fullstack/scripts/fullstack_scaffolder.py +114 -0
- package/templates/skills_normal/senior-fullstack/scripts/project_scaffolder.py +114 -0
- package/templates/skills_normal/seo-fundamentals/scripts/seo_checker.py +219 -0
- package/templates/skills_normal/testing-patterns/scripts/test_runner.py +219 -0
- package/templates/skills_normal/vulnerability-scanner/scripts/security_scan.py +458 -0
- package/templates/vault/007/scripts/config.py +472 -0
- package/templates/vault/007/scripts/full_audit.py +1306 -0
- package/templates/vault/007/scripts/quick_scan.py +481 -0
- package/templates/vault/007/scripts/requirements.txt +26 -0
- package/templates/vault/007/scripts/scanners/__init__.py +0 -0
- package/templates/vault/007/scripts/scanners/dependency_scanner.py +1305 -0
- package/templates/vault/007/scripts/scanners/injection_scanner.py +1104 -0
- package/templates/vault/007/scripts/scanners/secrets_scanner.py +1008 -0
- package/templates/vault/007/scripts/score_calculator.py +693 -0
- package/templates/vault/agent-orchestrator/scripts/match_skills.py +329 -0
- package/templates/vault/agent-orchestrator/scripts/orchestrate.py +304 -0
- package/templates/vault/agent-orchestrator/scripts/requirements.txt +1 -0
- package/templates/vault/agent-orchestrator/scripts/scan_registry.py +508 -0
- package/templates/vault/ai-studio-image/scripts/config.py +613 -0
- package/templates/vault/ai-studio-image/scripts/generate.py +630 -0
- package/templates/vault/ai-studio-image/scripts/prompt_engine.py +424 -0
- package/templates/vault/ai-studio-image/scripts/requirements.txt +4 -0
- package/templates/vault/ai-studio-image/scripts/templates.py +349 -0
- package/templates/vault/android_ui_verification/scripts/verify_ui.sh +32 -0
- package/templates/vault/apify-audience-analysis/reference/scripts/run_actor.js +363 -0
- package/templates/vault/apify-brand-reputation-monitoring/reference/scripts/run_actor.js +363 -0
- package/templates/vault/apify-competitor-intelligence/reference/scripts/run_actor.js +363 -0
- package/templates/vault/apify-content-analytics/reference/scripts/run_actor.js +363 -0
- package/templates/vault/apify-ecommerce/reference/scripts/package.json +3 -0
- package/templates/vault/apify-ecommerce/reference/scripts/run_actor.js +369 -0
- package/templates/vault/apify-influencer-discovery/reference/scripts/run_actor.js +363 -0
- package/templates/vault/apify-lead-generation/reference/scripts/run_actor.js +363 -0
- package/templates/vault/apify-market-research/reference/scripts/run_actor.js +363 -0
- package/templates/vault/apify-trend-analysis/reference/scripts/run_actor.js +363 -0
- package/templates/vault/apify-ultimate-scraper/reference/scripts/run_actor.js +363 -0
- package/templates/vault/audio-transcriber/scripts/install-requirements.sh +190 -0
- package/templates/vault/audio-transcriber/scripts/transcribe.py +486 -0
- package/templates/vault/claude-monitor/scripts/api_bench.py +240 -0
- package/templates/vault/claude-monitor/scripts/config.py +69 -0
- package/templates/vault/claude-monitor/scripts/health_check.py +362 -0
- package/templates/vault/claude-monitor/scripts/monitor.py +296 -0
- package/templates/vault/content-creator/scripts/brand_voice_analyzer.py +185 -0
- package/templates/vault/content-creator/scripts/seo_optimizer.py +419 -0
- package/templates/vault/context-agent/scripts/active_context.py +227 -0
- package/templates/vault/context-agent/scripts/compressor.py +149 -0
- package/templates/vault/context-agent/scripts/config.py +69 -0
- package/templates/vault/context-agent/scripts/context_loader.py +155 -0
- package/templates/vault/context-agent/scripts/context_manager.py +302 -0
- package/templates/vault/context-agent/scripts/models.py +103 -0
- package/templates/vault/context-agent/scripts/project_registry.py +132 -0
- package/templates/vault/context-agent/scripts/requirements.txt +6 -0
- package/templates/vault/context-agent/scripts/search.py +115 -0
- package/templates/vault/context-agent/scripts/session_parser.py +206 -0
- package/templates/vault/context-agent/scripts/session_summary.py +319 -0
- package/templates/vault/context-guardian/scripts/context_snapshot.py +229 -0
- package/templates/vault/docx/ooxml/scripts/pack.py +159 -0
- package/templates/vault/docx/ooxml/scripts/unpack.py +29 -0
- package/templates/vault/docx/ooxml/scripts/validate.py +69 -0
- package/templates/vault/docx/ooxml/scripts/validation/__init__.py +15 -0
- package/templates/vault/docx/ooxml/scripts/validation/base.py +951 -0
- package/templates/vault/docx/ooxml/scripts/validation/docx.py +274 -0
- package/templates/vault/docx/ooxml/scripts/validation/pptx.py +315 -0
- package/templates/vault/docx/ooxml/scripts/validation/redlining.py +279 -0
- package/templates/vault/docx/scripts/__init__.py +1 -0
- package/templates/vault/docx/scripts/document.py +1276 -0
- package/templates/vault/docx/scripts/templates/comments.xml +3 -0
- package/templates/vault/docx/scripts/templates/commentsExtended.xml +3 -0
- package/templates/vault/docx/scripts/templates/commentsExtensible.xml +3 -0
- package/templates/vault/docx/scripts/templates/commentsIds.xml +3 -0
- package/templates/vault/docx/scripts/templates/people.xml +3 -0
- package/templates/vault/docx/scripts/utilities.py +374 -0
- package/templates/vault/docx-official/ooxml/scripts/pack.py +159 -0
- package/templates/vault/docx-official/ooxml/scripts/unpack.py +29 -0
- package/templates/vault/docx-official/ooxml/scripts/validate.py +69 -0
- package/templates/vault/docx-official/ooxml/scripts/validation/__init__.py +15 -0
- package/templates/vault/docx-official/ooxml/scripts/validation/base.py +951 -0
- package/templates/vault/docx-official/ooxml/scripts/validation/docx.py +274 -0
- package/templates/vault/docx-official/ooxml/scripts/validation/pptx.py +315 -0
- package/templates/vault/docx-official/ooxml/scripts/validation/redlining.py +279 -0
- package/templates/vault/docx-official/scripts/__init__.py +1 -0
- package/templates/vault/docx-official/scripts/document.py +1276 -0
- package/templates/vault/docx-official/scripts/templates/comments.xml +3 -0
- package/templates/vault/docx-official/scripts/templates/commentsExtended.xml +3 -0
- package/templates/vault/docx-official/scripts/templates/commentsExtensible.xml +3 -0
- package/templates/vault/docx-official/scripts/templates/commentsIds.xml +3 -0
- package/templates/vault/docx-official/scripts/templates/people.xml +3 -0
- package/templates/vault/docx-official/scripts/utilities.py +374 -0
- package/templates/vault/geo-fundamentals/scripts/geo_checker.py +289 -0
- package/templates/vault/helm-chart-scaffolding/scripts/validate-chart.sh +244 -0
- package/templates/vault/i18n-localization/scripts/i18n_checker.py +241 -0
- package/templates/vault/instagram/scripts/account_setup.py +233 -0
- package/templates/vault/instagram/scripts/analyze.py +221 -0
- package/templates/vault/instagram/scripts/api_client.py +444 -0
- package/templates/vault/instagram/scripts/auth.py +411 -0
- package/templates/vault/instagram/scripts/comments.py +160 -0
- package/templates/vault/instagram/scripts/config.py +111 -0
- package/templates/vault/instagram/scripts/db.py +467 -0
- package/templates/vault/instagram/scripts/export.py +138 -0
- package/templates/vault/instagram/scripts/governance.py +233 -0
- package/templates/vault/instagram/scripts/hashtags.py +114 -0
- package/templates/vault/instagram/scripts/insights.py +170 -0
- package/templates/vault/instagram/scripts/media.py +65 -0
- package/templates/vault/instagram/scripts/messages.py +103 -0
- package/templates/vault/instagram/scripts/profile.py +58 -0
- package/templates/vault/instagram/scripts/publish.py +449 -0
- package/templates/vault/instagram/scripts/requirements.txt +5 -0
- package/templates/vault/instagram/scripts/run_all.py +189 -0
- package/templates/vault/instagram/scripts/schedule.py +189 -0
- package/templates/vault/instagram/scripts/serve_api.py +234 -0
- package/templates/vault/instagram/scripts/templates.py +155 -0
- package/templates/vault/junta-leiloeiros/scripts/db.py +216 -0
- package/templates/vault/junta-leiloeiros/scripts/export.py +137 -0
- package/templates/vault/junta-leiloeiros/scripts/requirements.txt +15 -0
- package/templates/vault/junta-leiloeiros/scripts/run_all.py +190 -0
- package/templates/vault/junta-leiloeiros/scripts/scraper/__init__.py +4 -0
- package/templates/vault/junta-leiloeiros/scripts/scraper/base_scraper.py +209 -0
- package/templates/vault/junta-leiloeiros/scripts/scraper/generic_scraper.py +110 -0
- package/templates/vault/junta-leiloeiros/scripts/scraper/jucap.py +110 -0
- package/templates/vault/junta-leiloeiros/scripts/scraper/juceac.py +72 -0
- package/templates/vault/junta-leiloeiros/scripts/scraper/juceal.py +72 -0
- package/templates/vault/junta-leiloeiros/scripts/scraper/juceb.py +68 -0
- package/templates/vault/junta-leiloeiros/scripts/scraper/jucec.py +63 -0
- package/templates/vault/junta-leiloeiros/scripts/scraper/jucema.py +211 -0
- package/templates/vault/junta-leiloeiros/scripts/scraper/jucemg.py +218 -0
- package/templates/vault/junta-leiloeiros/scripts/scraper/jucep.py +70 -0
- package/templates/vault/junta-leiloeiros/scripts/scraper/jucepa.py +74 -0
- package/templates/vault/junta-leiloeiros/scripts/scraper/jucepar.py +80 -0
- package/templates/vault/junta-leiloeiros/scripts/scraper/jucepe.py +78 -0
- package/templates/vault/junta-leiloeiros/scripts/scraper/jucepi.py +69 -0
- package/templates/vault/junta-leiloeiros/scripts/scraper/jucer.py +256 -0
- package/templates/vault/junta-leiloeiros/scripts/scraper/jucerja.py +170 -0
- package/templates/vault/junta-leiloeiros/scripts/scraper/jucern.py +71 -0
- package/templates/vault/junta-leiloeiros/scripts/scraper/jucesc.py +89 -0
- package/templates/vault/junta-leiloeiros/scripts/scraper/jucesp.py +233 -0
- package/templates/vault/junta-leiloeiros/scripts/scraper/jucetins.py +134 -0
- package/templates/vault/junta-leiloeiros/scripts/scraper/jucis_df.py +63 -0
- package/templates/vault/junta-leiloeiros/scripts/scraper/jucisrs.py +299 -0
- package/templates/vault/junta-leiloeiros/scripts/scraper/states.py +99 -0
- package/templates/vault/junta-leiloeiros/scripts/serve_api.py +164 -0
- package/templates/vault/junta-leiloeiros/scripts/web_scraper_fallback.py +233 -0
- package/templates/vault/last30days/scripts/last30days.py +521 -0
- package/templates/vault/last30days/scripts/lib/__init__.py +1 -0
- package/templates/vault/last30days/scripts/lib/cache.py +152 -0
- package/templates/vault/last30days/scripts/lib/dates.py +124 -0
- package/templates/vault/last30days/scripts/lib/dedupe.py +120 -0
- package/templates/vault/last30days/scripts/lib/env.py +149 -0
- package/templates/vault/last30days/scripts/lib/http.py +152 -0
- package/templates/vault/last30days/scripts/lib/models.py +175 -0
- package/templates/vault/last30days/scripts/lib/normalize.py +160 -0
- package/templates/vault/last30days/scripts/lib/openai_reddit.py +230 -0
- package/templates/vault/last30days/scripts/lib/reddit_enrich.py +232 -0
- package/templates/vault/last30days/scripts/lib/render.py +383 -0
- package/templates/vault/last30days/scripts/lib/schema.py +336 -0
- package/templates/vault/last30days/scripts/lib/score.py +311 -0
- package/templates/vault/last30days/scripts/lib/ui.py +324 -0
- package/templates/vault/last30days/scripts/lib/websearch.py +401 -0
- package/templates/vault/last30days/scripts/lib/xai_x.py +217 -0
- package/templates/vault/leiloeiro-avaliacao/scripts/governance.py +106 -0
- package/templates/vault/leiloeiro-avaliacao/scripts/requirements.txt +1 -0
- package/templates/vault/leiloeiro-edital/scripts/governance.py +106 -0
- package/templates/vault/leiloeiro-edital/scripts/requirements.txt +1 -0
- package/templates/vault/leiloeiro-ia/scripts/governance.py +106 -0
- package/templates/vault/leiloeiro-ia/scripts/requirements.txt +1 -0
- package/templates/vault/leiloeiro-juridico/scripts/governance.py +106 -0
- package/templates/vault/leiloeiro-juridico/scripts/requirements.txt +1 -0
- package/templates/vault/leiloeiro-mercado/scripts/governance.py +106 -0
- package/templates/vault/leiloeiro-mercado/scripts/requirements.txt +1 -0
- package/templates/vault/leiloeiro-risco/scripts/governance.py +106 -0
- package/templates/vault/leiloeiro-risco/scripts/requirements.txt +1 -0
- package/templates/vault/loki-mode/examples/todo-app-generated/backend/src/db/database.ts +24 -0
- package/templates/vault/loki-mode/examples/todo-app-generated/backend/src/db/db.ts +35 -0
- package/templates/vault/loki-mode/examples/todo-app-generated/backend/src/db/index.ts +2 -0
- package/templates/vault/loki-mode/examples/todo-app-generated/backend/src/db/migrations.ts +31 -0
- package/templates/vault/loki-mode/examples/todo-app-generated/backend/src/db/schema.sql +8 -0
- package/templates/vault/loki-mode/examples/todo-app-generated/backend/src/index.ts +44 -0
- package/templates/vault/loki-mode/examples/todo-app-generated/backend/src/routes/todos.ts +155 -0
- package/templates/vault/loki-mode/examples/todo-app-generated/backend/src/types/index.ts +35 -0
- package/templates/vault/loki-mode/examples/todo-app-generated/frontend/src/App.css +384 -0
- package/templates/vault/loki-mode/examples/todo-app-generated/frontend/src/App.tsx +81 -0
- package/templates/vault/loki-mode/examples/todo-app-generated/frontend/src/api/todos.ts +57 -0
- package/templates/vault/loki-mode/examples/todo-app-generated/frontend/src/components/ConfirmDialog.tsx +26 -0
- package/templates/vault/loki-mode/examples/todo-app-generated/frontend/src/components/EmptyState.tsx +8 -0
- package/templates/vault/loki-mode/examples/todo-app-generated/frontend/src/components/TodoForm.tsx +43 -0
- package/templates/vault/loki-mode/examples/todo-app-generated/frontend/src/components/TodoItem.tsx +36 -0
- package/templates/vault/loki-mode/examples/todo-app-generated/frontend/src/components/TodoList.tsx +27 -0
- package/templates/vault/loki-mode/examples/todo-app-generated/frontend/src/hooks/useTodos.ts +81 -0
- package/templates/vault/loki-mode/examples/todo-app-generated/frontend/src/index.css +48 -0
- package/templates/vault/loki-mode/examples/todo-app-generated/frontend/src/main.tsx +10 -0
- package/templates/vault/loki-mode/examples/todo-app-generated/frontend/src/vite-env.d.ts +1 -0
- package/templates/vault/loki-mode/scripts/export-to-vibe-kanban.sh +178 -0
- package/templates/vault/loki-mode/scripts/loki-wrapper.sh +281 -0
- package/templates/vault/loki-mode/scripts/take-screenshots.js +55 -0
- package/templates/vault/matematico-tao/scripts/complexity_analyzer.py +544 -0
- package/templates/vault/matematico-tao/scripts/dependency_graph.py +538 -0
- package/templates/vault/mcp-builder/scripts/connections.py +151 -0
- package/templates/vault/mcp-builder/scripts/evaluation.py +373 -0
- package/templates/vault/mcp-builder/scripts/example_evaluation.xml +22 -0
- package/templates/vault/mcp-builder/scripts/requirements.txt +2 -0
- package/templates/vault/mobile-design/scripts/mobile_audit.py +670 -0
- package/templates/vault/notebooklm/scripts/__init__.py +81 -0
- package/templates/vault/notebooklm/scripts/ask_question.py +256 -0
- package/templates/vault/notebooklm/scripts/auth_manager.py +358 -0
- package/templates/vault/notebooklm/scripts/browser_session.py +255 -0
- package/templates/vault/notebooklm/scripts/browser_utils.py +107 -0
- package/templates/vault/notebooklm/scripts/cleanup_manager.py +302 -0
- package/templates/vault/notebooklm/scripts/config.py +44 -0
- package/templates/vault/notebooklm/scripts/notebook_manager.py +410 -0
- package/templates/vault/notebooklm/scripts/run.py +102 -0
- package/templates/vault/notebooklm/scripts/setup_environment.py +204 -0
- package/templates/vault/pdf/scripts/check_bounding_boxes.py +70 -0
- package/templates/vault/pdf/scripts/check_bounding_boxes_test.py +226 -0
- package/templates/vault/pdf/scripts/check_fillable_fields.py +12 -0
- package/templates/vault/pdf/scripts/convert_pdf_to_images.py +35 -0
- package/templates/vault/pdf/scripts/create_validation_image.py +41 -0
- package/templates/vault/pdf/scripts/extract_form_field_info.py +152 -0
- package/templates/vault/pdf/scripts/fill_fillable_fields.py +114 -0
- package/templates/vault/pdf/scripts/fill_pdf_form_with_annotations.py +108 -0
- package/templates/vault/pdf-official/scripts/check_bounding_boxes.py +70 -0
- package/templates/vault/pdf-official/scripts/check_bounding_boxes_test.py +226 -0
- package/templates/vault/pdf-official/scripts/check_fillable_fields.py +12 -0
- package/templates/vault/pdf-official/scripts/convert_pdf_to_images.py +35 -0
- package/templates/vault/pdf-official/scripts/create_validation_image.py +41 -0
- package/templates/vault/pdf-official/scripts/extract_form_field_info.py +152 -0
- package/templates/vault/pdf-official/scripts/fill_fillable_fields.py +114 -0
- package/templates/vault/pdf-official/scripts/fill_pdf_form_with_annotations.py +108 -0
- package/templates/vault/planning-with-files/scripts/check-complete.sh +44 -0
- package/templates/vault/planning-with-files/scripts/init-session.sh +120 -0
- package/templates/vault/pptx/ooxml/scripts/pack.py +159 -0
- package/templates/vault/pptx/ooxml/scripts/unpack.py +29 -0
- package/templates/vault/pptx/ooxml/scripts/validate.py +69 -0
- package/templates/vault/pptx/ooxml/scripts/validation/__init__.py +15 -0
- package/templates/vault/pptx/ooxml/scripts/validation/base.py +951 -0
- package/templates/vault/pptx/ooxml/scripts/validation/docx.py +274 -0
- package/templates/vault/pptx/ooxml/scripts/validation/pptx.py +315 -0
- package/templates/vault/pptx/ooxml/scripts/validation/redlining.py +279 -0
- package/templates/vault/pptx/scripts/html2pptx.js +979 -0
- package/templates/vault/pptx/scripts/inventory.py +1020 -0
- package/templates/vault/pptx/scripts/rearrange.py +231 -0
- package/templates/vault/pptx/scripts/replace.py +385 -0
- package/templates/vault/pptx/scripts/thumbnail.py +450 -0
- package/templates/vault/pptx-official/ooxml/scripts/pack.py +159 -0
- package/templates/vault/pptx-official/ooxml/scripts/unpack.py +29 -0
- package/templates/vault/pptx-official/ooxml/scripts/validate.py +69 -0
- package/templates/vault/pptx-official/ooxml/scripts/validation/__init__.py +15 -0
- package/templates/vault/pptx-official/ooxml/scripts/validation/base.py +951 -0
- package/templates/vault/pptx-official/ooxml/scripts/validation/docx.py +274 -0
- package/templates/vault/pptx-official/ooxml/scripts/validation/pptx.py +315 -0
- package/templates/vault/pptx-official/ooxml/scripts/validation/redlining.py +279 -0
- package/templates/vault/pptx-official/scripts/html2pptx.js +979 -0
- package/templates/vault/pptx-official/scripts/inventory.py +1020 -0
- package/templates/vault/pptx-official/scripts/rearrange.py +231 -0
- package/templates/vault/pptx-official/scripts/replace.py +385 -0
- package/templates/vault/pptx-official/scripts/thumbnail.py +450 -0
- package/templates/vault/product-manager-toolkit/scripts/customer_interview_analyzer.py +441 -0
- package/templates/vault/product-manager-toolkit/scripts/rice_prioritizer.py +296 -0
- package/templates/vault/prompt-engineering-patterns/scripts/optimize-prompt.py +279 -0
- package/templates/vault/scripts/.skill_cache.json +7538 -0
- package/templates/vault/scripts/skill_search.py +228 -0
- package/templates/vault/senior-architect/scripts/architecture_diagram_generator.py +114 -0
- package/templates/vault/senior-architect/scripts/dependency_analyzer.py +114 -0
- package/templates/vault/senior-architect/scripts/project_architect.py +114 -0
- package/templates/vault/shopify-development/scripts/requirements.txt +19 -0
- package/templates/vault/shopify-development/scripts/shopify_graphql.py +428 -0
- package/templates/vault/shopify-development/scripts/shopify_init.py +441 -0
- package/templates/vault/shopify-development/scripts/tests/test_shopify_init.py +379 -0
- package/templates/vault/skill-creator/scripts/init_skill.py +303 -0
- package/templates/vault/skill-creator/scripts/package_skill.py +110 -0
- package/templates/vault/skill-creator/scripts/quick_validate.py +95 -0
- package/templates/vault/skill-installer/scripts/detect_skills.py +318 -0
- package/templates/vault/skill-installer/scripts/install_skill.py +1708 -0
- package/templates/vault/skill-installer/scripts/package_skill.py +417 -0
- package/templates/vault/skill-installer/scripts/requirements.txt +1 -0
- package/templates/vault/skill-installer/scripts/validate_skill.py +430 -0
- package/templates/vault/skill-sentinel/scripts/analyzers/__init__.py +13 -0
- package/templates/vault/skill-sentinel/scripts/analyzers/code_quality.py +247 -0
- package/templates/vault/skill-sentinel/scripts/analyzers/cross_skill.py +134 -0
- package/templates/vault/skill-sentinel/scripts/analyzers/dependencies.py +121 -0
- package/templates/vault/skill-sentinel/scripts/analyzers/documentation.py +189 -0
- package/templates/vault/skill-sentinel/scripts/analyzers/governance_audit.py +153 -0
- package/templates/vault/skill-sentinel/scripts/analyzers/performance.py +164 -0
- package/templates/vault/skill-sentinel/scripts/analyzers/security.py +189 -0
- package/templates/vault/skill-sentinel/scripts/config.py +158 -0
- package/templates/vault/skill-sentinel/scripts/cost_optimizer.py +146 -0
- package/templates/vault/skill-sentinel/scripts/db.py +354 -0
- package/templates/vault/skill-sentinel/scripts/governance.py +58 -0
- package/templates/vault/skill-sentinel/scripts/recommender.py +228 -0
- package/templates/vault/skill-sentinel/scripts/report_generator.py +224 -0
- package/templates/vault/skill-sentinel/scripts/requirements.txt +1 -0
- package/templates/vault/skill-sentinel/scripts/run_audit.py +290 -0
- package/templates/vault/skill-sentinel/scripts/scanner.py +271 -0
- package/templates/vault/stability-ai/scripts/config.py +266 -0
- package/templates/vault/stability-ai/scripts/generate.py +687 -0
- package/templates/vault/stability-ai/scripts/requirements.txt +4 -0
- package/templates/vault/stability-ai/scripts/styles.py +174 -0
- package/templates/vault/telegram/assets/boilerplate/nodejs/src/bot-client.ts +86 -0
- package/templates/vault/telegram/assets/boilerplate/nodejs/src/handlers.ts +79 -0
- package/templates/vault/telegram/assets/boilerplate/nodejs/src/index.ts +32 -0
- package/templates/vault/telegram/scripts/send_message.py +143 -0
- package/templates/vault/telegram/scripts/setup_project.py +103 -0
- package/templates/vault/telegram/scripts/test_bot.py +144 -0
- package/templates/vault/typescript-expert/scripts/ts_diagnostic.py +203 -0
- package/templates/vault/ui-ux-pro-max/scripts/__pycache__/core.cpython-314.pyc +0 -0
- package/templates/vault/ui-ux-pro-max/scripts/__pycache__/design_system.cpython-314.pyc +0 -0
- package/templates/vault/ui-ux-pro-max/scripts/core.py +257 -0
- package/templates/vault/ui-ux-pro-max/scripts/design_system.py +487 -0
- package/templates/vault/ui-ux-pro-max/scripts/search.py +76 -0
- package/templates/vault/videodb/scripts/ws_listener.py +204 -0
- package/templates/vault/web-artifacts-builder/scripts/bundle-artifact.sh +54 -0
- package/templates/vault/web-artifacts-builder/scripts/init-artifact.sh +322 -0
- package/templates/vault/web-artifacts-builder/scripts/shadcn-components.tar.gz +0 -0
- package/templates/vault/webapp-testing/scripts/with_server.py +106 -0
- package/templates/vault/whatsapp-cloud-api/assets/boilerplate/nodejs/src/index.ts +125 -0
- package/templates/vault/whatsapp-cloud-api/assets/boilerplate/nodejs/src/template-manager.ts +67 -0
- package/templates/vault/whatsapp-cloud-api/assets/boilerplate/nodejs/src/types.ts +216 -0
- package/templates/vault/whatsapp-cloud-api/assets/boilerplate/nodejs/src/webhook-handler.ts +173 -0
- package/templates/vault/whatsapp-cloud-api/assets/boilerplate/nodejs/src/whatsapp-client.ts +193 -0
- package/templates/vault/whatsapp-cloud-api/scripts/send_test_message.py +137 -0
- package/templates/vault/whatsapp-cloud-api/scripts/setup_project.py +118 -0
- package/templates/vault/whatsapp-cloud-api/scripts/validate_config.py +190 -0
- package/templates/vault/youtube-summarizer/scripts/extract-transcript.py +65 -0
- package/templates/vault/youtube-summarizer/scripts/install-dependencies.sh +28 -0
|
@@ -0,0 +1,521 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
last30days - Research a topic from the last 30 days on Reddit + X.
|
|
4
|
+
|
|
5
|
+
Usage:
|
|
6
|
+
python3 last30days.py <topic> [options]
|
|
7
|
+
|
|
8
|
+
Options:
|
|
9
|
+
--mock Use fixtures instead of real API calls
|
|
10
|
+
--emit=MODE Output mode: compact|json|md|context|path (default: compact)
|
|
11
|
+
--sources=MODE Source selection: auto|reddit|x|both (default: auto)
|
|
12
|
+
--quick Faster research with fewer sources (8-12 each)
|
|
13
|
+
--deep Comprehensive research with more sources (50-70 Reddit, 40-60 X)
|
|
14
|
+
--debug Enable verbose debug logging
|
|
15
|
+
"""
|
|
16
|
+
|
|
17
|
+
import argparse
|
|
18
|
+
import json
|
|
19
|
+
import os
|
|
20
|
+
import sys
|
|
21
|
+
from concurrent.futures import ThreadPoolExecutor, as_completed
|
|
22
|
+
from datetime import datetime, timezone
|
|
23
|
+
from pathlib import Path
|
|
24
|
+
|
|
25
|
+
# Add lib to path
|
|
26
|
+
SCRIPT_DIR = Path(__file__).parent.resolve()
|
|
27
|
+
sys.path.insert(0, str(SCRIPT_DIR))
|
|
28
|
+
|
|
29
|
+
from lib import (
|
|
30
|
+
dates,
|
|
31
|
+
dedupe,
|
|
32
|
+
env,
|
|
33
|
+
http,
|
|
34
|
+
models,
|
|
35
|
+
normalize,
|
|
36
|
+
openai_reddit,
|
|
37
|
+
reddit_enrich,
|
|
38
|
+
render,
|
|
39
|
+
schema,
|
|
40
|
+
score,
|
|
41
|
+
ui,
|
|
42
|
+
websearch,
|
|
43
|
+
xai_x,
|
|
44
|
+
)
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
def load_fixture(name: str) -> dict:
|
|
48
|
+
"""Load a fixture file."""
|
|
49
|
+
fixture_path = SCRIPT_DIR.parent / "fixtures" / name
|
|
50
|
+
if fixture_path.exists():
|
|
51
|
+
with open(fixture_path) as f:
|
|
52
|
+
return json.load(f)
|
|
53
|
+
return {}
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
def _search_reddit(
|
|
57
|
+
topic: str,
|
|
58
|
+
config: dict,
|
|
59
|
+
selected_models: dict,
|
|
60
|
+
from_date: str,
|
|
61
|
+
to_date: str,
|
|
62
|
+
depth: str,
|
|
63
|
+
mock: bool,
|
|
64
|
+
) -> tuple:
|
|
65
|
+
"""Search Reddit via OpenAI (runs in thread).
|
|
66
|
+
|
|
67
|
+
Returns:
|
|
68
|
+
Tuple of (reddit_items, raw_openai, error)
|
|
69
|
+
"""
|
|
70
|
+
raw_openai = None
|
|
71
|
+
reddit_error = None
|
|
72
|
+
|
|
73
|
+
if mock:
|
|
74
|
+
raw_openai = load_fixture("openai_sample.json")
|
|
75
|
+
else:
|
|
76
|
+
try:
|
|
77
|
+
raw_openai = openai_reddit.search_reddit(
|
|
78
|
+
config["OPENAI_API_KEY"],
|
|
79
|
+
selected_models["openai"],
|
|
80
|
+
topic,
|
|
81
|
+
from_date,
|
|
82
|
+
to_date,
|
|
83
|
+
depth=depth,
|
|
84
|
+
)
|
|
85
|
+
except http.HTTPError as e:
|
|
86
|
+
raw_openai = {"error": str(e)}
|
|
87
|
+
reddit_error = f"API error: {e}"
|
|
88
|
+
except Exception as e:
|
|
89
|
+
raw_openai = {"error": str(e)}
|
|
90
|
+
reddit_error = f"{type(e).__name__}: {e}"
|
|
91
|
+
|
|
92
|
+
# Parse response
|
|
93
|
+
reddit_items = openai_reddit.parse_reddit_response(raw_openai or {})
|
|
94
|
+
|
|
95
|
+
# Quick retry with simpler query if few results
|
|
96
|
+
if len(reddit_items) < 5 and not mock and not reddit_error:
|
|
97
|
+
core = openai_reddit._extract_core_subject(topic)
|
|
98
|
+
if core.lower() != topic.lower():
|
|
99
|
+
try:
|
|
100
|
+
retry_raw = openai_reddit.search_reddit(
|
|
101
|
+
config["OPENAI_API_KEY"],
|
|
102
|
+
selected_models["openai"],
|
|
103
|
+
core,
|
|
104
|
+
from_date, to_date,
|
|
105
|
+
depth=depth,
|
|
106
|
+
)
|
|
107
|
+
retry_items = openai_reddit.parse_reddit_response(retry_raw)
|
|
108
|
+
# Add items not already found (by URL)
|
|
109
|
+
existing_urls = {item.get("url") for item in reddit_items}
|
|
110
|
+
for item in retry_items:
|
|
111
|
+
if item.get("url") not in existing_urls:
|
|
112
|
+
reddit_items.append(item)
|
|
113
|
+
except Exception:
|
|
114
|
+
pass
|
|
115
|
+
|
|
116
|
+
return reddit_items, raw_openai, reddit_error
|
|
117
|
+
|
|
118
|
+
|
|
119
|
+
def _search_x(
|
|
120
|
+
topic: str,
|
|
121
|
+
config: dict,
|
|
122
|
+
selected_models: dict,
|
|
123
|
+
from_date: str,
|
|
124
|
+
to_date: str,
|
|
125
|
+
depth: str,
|
|
126
|
+
mock: bool,
|
|
127
|
+
) -> tuple:
|
|
128
|
+
"""Search X via xAI (runs in thread).
|
|
129
|
+
|
|
130
|
+
Returns:
|
|
131
|
+
Tuple of (x_items, raw_xai, error)
|
|
132
|
+
"""
|
|
133
|
+
raw_xai = None
|
|
134
|
+
x_error = None
|
|
135
|
+
|
|
136
|
+
if mock:
|
|
137
|
+
raw_xai = load_fixture("xai_sample.json")
|
|
138
|
+
else:
|
|
139
|
+
try:
|
|
140
|
+
raw_xai = xai_x.search_x(
|
|
141
|
+
config["XAI_API_KEY"],
|
|
142
|
+
selected_models["xai"],
|
|
143
|
+
topic,
|
|
144
|
+
from_date,
|
|
145
|
+
to_date,
|
|
146
|
+
depth=depth,
|
|
147
|
+
)
|
|
148
|
+
except http.HTTPError as e:
|
|
149
|
+
raw_xai = {"error": str(e)}
|
|
150
|
+
x_error = f"API error: {e}"
|
|
151
|
+
except Exception as e:
|
|
152
|
+
raw_xai = {"error": str(e)}
|
|
153
|
+
x_error = f"{type(e).__name__}: {e}"
|
|
154
|
+
|
|
155
|
+
# Parse response
|
|
156
|
+
x_items = xai_x.parse_x_response(raw_xai or {})
|
|
157
|
+
|
|
158
|
+
return x_items, raw_xai, x_error
|
|
159
|
+
|
|
160
|
+
|
|
161
|
+
def run_research(
|
|
162
|
+
topic: str,
|
|
163
|
+
sources: str,
|
|
164
|
+
config: dict,
|
|
165
|
+
selected_models: dict,
|
|
166
|
+
from_date: str,
|
|
167
|
+
to_date: str,
|
|
168
|
+
depth: str = "default",
|
|
169
|
+
mock: bool = False,
|
|
170
|
+
progress: ui.ProgressDisplay = None,
|
|
171
|
+
) -> tuple:
|
|
172
|
+
"""Run the research pipeline.
|
|
173
|
+
|
|
174
|
+
Returns:
|
|
175
|
+
Tuple of (reddit_items, x_items, web_needed, raw_openai, raw_xai, raw_reddit_enriched, reddit_error, x_error)
|
|
176
|
+
|
|
177
|
+
Note: web_needed is True when WebSearch should be performed by Claude.
|
|
178
|
+
The script outputs a marker and Claude handles WebSearch in its session.
|
|
179
|
+
"""
|
|
180
|
+
reddit_items = []
|
|
181
|
+
x_items = []
|
|
182
|
+
raw_openai = None
|
|
183
|
+
raw_xai = None
|
|
184
|
+
raw_reddit_enriched = []
|
|
185
|
+
reddit_error = None
|
|
186
|
+
x_error = None
|
|
187
|
+
|
|
188
|
+
# Check if WebSearch is needed (always needed in web-only mode)
|
|
189
|
+
web_needed = sources in ("all", "web", "reddit-web", "x-web")
|
|
190
|
+
|
|
191
|
+
# Web-only mode: no API calls needed, Claude handles everything
|
|
192
|
+
if sources == "web":
|
|
193
|
+
if progress:
|
|
194
|
+
progress.start_web_only()
|
|
195
|
+
progress.end_web_only()
|
|
196
|
+
return reddit_items, x_items, True, raw_openai, raw_xai, raw_reddit_enriched, reddit_error, x_error
|
|
197
|
+
|
|
198
|
+
# Determine which searches to run
|
|
199
|
+
run_reddit = sources in ("both", "reddit", "all", "reddit-web")
|
|
200
|
+
run_x = sources in ("both", "x", "all", "x-web")
|
|
201
|
+
|
|
202
|
+
# Run Reddit and X searches in parallel
|
|
203
|
+
reddit_future = None
|
|
204
|
+
x_future = None
|
|
205
|
+
|
|
206
|
+
with ThreadPoolExecutor(max_workers=2) as executor:
|
|
207
|
+
# Submit both searches
|
|
208
|
+
if run_reddit:
|
|
209
|
+
if progress:
|
|
210
|
+
progress.start_reddit()
|
|
211
|
+
reddit_future = executor.submit(
|
|
212
|
+
_search_reddit, topic, config, selected_models,
|
|
213
|
+
from_date, to_date, depth, mock
|
|
214
|
+
)
|
|
215
|
+
|
|
216
|
+
if run_x:
|
|
217
|
+
if progress:
|
|
218
|
+
progress.start_x()
|
|
219
|
+
x_future = executor.submit(
|
|
220
|
+
_search_x, topic, config, selected_models,
|
|
221
|
+
from_date, to_date, depth, mock
|
|
222
|
+
)
|
|
223
|
+
|
|
224
|
+
# Collect results
|
|
225
|
+
if reddit_future:
|
|
226
|
+
try:
|
|
227
|
+
reddit_items, raw_openai, reddit_error = reddit_future.result()
|
|
228
|
+
if reddit_error and progress:
|
|
229
|
+
progress.show_error(f"Reddit error: {reddit_error}")
|
|
230
|
+
except Exception as e:
|
|
231
|
+
reddit_error = f"{type(e).__name__}: {e}"
|
|
232
|
+
if progress:
|
|
233
|
+
progress.show_error(f"Reddit error: {e}")
|
|
234
|
+
if progress:
|
|
235
|
+
progress.end_reddit(len(reddit_items))
|
|
236
|
+
|
|
237
|
+
if x_future:
|
|
238
|
+
try:
|
|
239
|
+
x_items, raw_xai, x_error = x_future.result()
|
|
240
|
+
if x_error and progress:
|
|
241
|
+
progress.show_error(f"X error: {x_error}")
|
|
242
|
+
except Exception as e:
|
|
243
|
+
x_error = f"{type(e).__name__}: {e}"
|
|
244
|
+
if progress:
|
|
245
|
+
progress.show_error(f"X error: {e}")
|
|
246
|
+
if progress:
|
|
247
|
+
progress.end_x(len(x_items))
|
|
248
|
+
|
|
249
|
+
# Enrich Reddit items with real data (sequential, but with error handling per-item)
|
|
250
|
+
if reddit_items:
|
|
251
|
+
if progress:
|
|
252
|
+
progress.start_reddit_enrich(1, len(reddit_items))
|
|
253
|
+
|
|
254
|
+
for i, item in enumerate(reddit_items):
|
|
255
|
+
if progress and i > 0:
|
|
256
|
+
progress.update_reddit_enrich(i + 1, len(reddit_items))
|
|
257
|
+
|
|
258
|
+
try:
|
|
259
|
+
if mock:
|
|
260
|
+
mock_thread = load_fixture("reddit_thread_sample.json")
|
|
261
|
+
reddit_items[i] = reddit_enrich.enrich_reddit_item(item, mock_thread)
|
|
262
|
+
else:
|
|
263
|
+
reddit_items[i] = reddit_enrich.enrich_reddit_item(item)
|
|
264
|
+
except Exception as e:
|
|
265
|
+
# Log but don't crash - keep the unenriched item
|
|
266
|
+
if progress:
|
|
267
|
+
progress.show_error(f"Enrich failed for {item.get('url', 'unknown')}: {e}")
|
|
268
|
+
|
|
269
|
+
raw_reddit_enriched.append(reddit_items[i])
|
|
270
|
+
|
|
271
|
+
if progress:
|
|
272
|
+
progress.end_reddit_enrich()
|
|
273
|
+
|
|
274
|
+
return reddit_items, x_items, web_needed, raw_openai, raw_xai, raw_reddit_enriched, reddit_error, x_error
|
|
275
|
+
|
|
276
|
+
|
|
277
|
+
def main():
|
|
278
|
+
parser = argparse.ArgumentParser(
|
|
279
|
+
description="Research a topic from the last 30 days on Reddit + X"
|
|
280
|
+
)
|
|
281
|
+
parser.add_argument("topic", nargs="?", help="Topic to research")
|
|
282
|
+
parser.add_argument("--mock", action="store_true", help="Use fixtures")
|
|
283
|
+
parser.add_argument(
|
|
284
|
+
"--emit",
|
|
285
|
+
choices=["compact", "json", "md", "context", "path"],
|
|
286
|
+
default="compact",
|
|
287
|
+
help="Output mode",
|
|
288
|
+
)
|
|
289
|
+
parser.add_argument(
|
|
290
|
+
"--sources",
|
|
291
|
+
choices=["auto", "reddit", "x", "both"],
|
|
292
|
+
default="auto",
|
|
293
|
+
help="Source selection",
|
|
294
|
+
)
|
|
295
|
+
parser.add_argument(
|
|
296
|
+
"--quick",
|
|
297
|
+
action="store_true",
|
|
298
|
+
help="Faster research with fewer sources (8-12 each)",
|
|
299
|
+
)
|
|
300
|
+
parser.add_argument(
|
|
301
|
+
"--deep",
|
|
302
|
+
action="store_true",
|
|
303
|
+
help="Comprehensive research with more sources (50-70 Reddit, 40-60 X)",
|
|
304
|
+
)
|
|
305
|
+
parser.add_argument(
|
|
306
|
+
"--debug",
|
|
307
|
+
action="store_true",
|
|
308
|
+
help="Enable verbose debug logging",
|
|
309
|
+
)
|
|
310
|
+
parser.add_argument(
|
|
311
|
+
"--include-web",
|
|
312
|
+
action="store_true",
|
|
313
|
+
help="Include general web search alongside Reddit/X (lower weighted)",
|
|
314
|
+
)
|
|
315
|
+
|
|
316
|
+
args = parser.parse_args()
|
|
317
|
+
|
|
318
|
+
# Enable debug logging if requested
|
|
319
|
+
if args.debug:
|
|
320
|
+
os.environ["LAST30DAYS_DEBUG"] = "1"
|
|
321
|
+
# Re-import http to pick up debug flag
|
|
322
|
+
from lib import http as http_module
|
|
323
|
+
http_module.DEBUG = True
|
|
324
|
+
|
|
325
|
+
# Determine depth
|
|
326
|
+
if args.quick and args.deep:
|
|
327
|
+
print("Error: Cannot use both --quick and --deep", file=sys.stderr)
|
|
328
|
+
sys.exit(1)
|
|
329
|
+
elif args.quick:
|
|
330
|
+
depth = "quick"
|
|
331
|
+
elif args.deep:
|
|
332
|
+
depth = "deep"
|
|
333
|
+
else:
|
|
334
|
+
depth = "default"
|
|
335
|
+
|
|
336
|
+
if not args.topic:
|
|
337
|
+
print("Error: Please provide a topic to research.", file=sys.stderr)
|
|
338
|
+
print("Usage: python3 last30days.py <topic> [options]", file=sys.stderr)
|
|
339
|
+
sys.exit(1)
|
|
340
|
+
|
|
341
|
+
# Load config
|
|
342
|
+
config = env.get_config()
|
|
343
|
+
|
|
344
|
+
# Check available sources
|
|
345
|
+
available = env.get_available_sources(config)
|
|
346
|
+
|
|
347
|
+
# Mock mode can work without keys
|
|
348
|
+
if args.mock:
|
|
349
|
+
if args.sources == "auto":
|
|
350
|
+
sources = "both"
|
|
351
|
+
else:
|
|
352
|
+
sources = args.sources
|
|
353
|
+
else:
|
|
354
|
+
# Validate requested sources against available
|
|
355
|
+
sources, error = env.validate_sources(args.sources, available, args.include_web)
|
|
356
|
+
if error:
|
|
357
|
+
# If it's a warning about WebSearch fallback, print but continue
|
|
358
|
+
if "WebSearch fallback" in error:
|
|
359
|
+
print(f"Note: {error}", file=sys.stderr)
|
|
360
|
+
else:
|
|
361
|
+
print(f"Error: {error}", file=sys.stderr)
|
|
362
|
+
sys.exit(1)
|
|
363
|
+
|
|
364
|
+
# Get date range
|
|
365
|
+
from_date, to_date = dates.get_date_range(30)
|
|
366
|
+
|
|
367
|
+
# Check what keys are missing for promo messaging
|
|
368
|
+
missing_keys = env.get_missing_keys(config)
|
|
369
|
+
|
|
370
|
+
# Initialize progress display
|
|
371
|
+
progress = ui.ProgressDisplay(args.topic, show_banner=True)
|
|
372
|
+
|
|
373
|
+
# Show promo for missing keys BEFORE research
|
|
374
|
+
if missing_keys != 'none':
|
|
375
|
+
progress.show_promo(missing_keys)
|
|
376
|
+
|
|
377
|
+
# Select models
|
|
378
|
+
if args.mock:
|
|
379
|
+
# Use mock models
|
|
380
|
+
mock_openai_models = load_fixture("models_openai_sample.json").get("data", [])
|
|
381
|
+
mock_xai_models = load_fixture("models_xai_sample.json").get("data", [])
|
|
382
|
+
selected_models = models.get_models(
|
|
383
|
+
{
|
|
384
|
+
"OPENAI_API_KEY": "mock",
|
|
385
|
+
"XAI_API_KEY": "mock",
|
|
386
|
+
**config,
|
|
387
|
+
},
|
|
388
|
+
mock_openai_models,
|
|
389
|
+
mock_xai_models,
|
|
390
|
+
)
|
|
391
|
+
else:
|
|
392
|
+
selected_models = models.get_models(config)
|
|
393
|
+
|
|
394
|
+
# Determine mode string
|
|
395
|
+
if sources == "all":
|
|
396
|
+
mode = "all" # reddit + x + web
|
|
397
|
+
elif sources == "both":
|
|
398
|
+
mode = "both" # reddit + x
|
|
399
|
+
elif sources == "reddit":
|
|
400
|
+
mode = "reddit-only"
|
|
401
|
+
elif sources == "reddit-web":
|
|
402
|
+
mode = "reddit-web"
|
|
403
|
+
elif sources == "x":
|
|
404
|
+
mode = "x-only"
|
|
405
|
+
elif sources == "x-web":
|
|
406
|
+
mode = "x-web"
|
|
407
|
+
elif sources == "web":
|
|
408
|
+
mode = "web-only"
|
|
409
|
+
else:
|
|
410
|
+
mode = sources
|
|
411
|
+
|
|
412
|
+
# Run research
|
|
413
|
+
reddit_items, x_items, web_needed, raw_openai, raw_xai, raw_reddit_enriched, reddit_error, x_error = run_research(
|
|
414
|
+
args.topic,
|
|
415
|
+
sources,
|
|
416
|
+
config,
|
|
417
|
+
selected_models,
|
|
418
|
+
from_date,
|
|
419
|
+
to_date,
|
|
420
|
+
depth,
|
|
421
|
+
args.mock,
|
|
422
|
+
progress,
|
|
423
|
+
)
|
|
424
|
+
|
|
425
|
+
# Processing phase
|
|
426
|
+
progress.start_processing()
|
|
427
|
+
|
|
428
|
+
# Normalize items
|
|
429
|
+
normalized_reddit = normalize.normalize_reddit_items(reddit_items, from_date, to_date)
|
|
430
|
+
normalized_x = normalize.normalize_x_items(x_items, from_date, to_date)
|
|
431
|
+
|
|
432
|
+
# Hard date filter: exclude items with verified dates outside the range
|
|
433
|
+
# This is the safety net - even if prompts let old content through, this filters it
|
|
434
|
+
filtered_reddit = normalize.filter_by_date_range(normalized_reddit, from_date, to_date)
|
|
435
|
+
filtered_x = normalize.filter_by_date_range(normalized_x, from_date, to_date)
|
|
436
|
+
|
|
437
|
+
# Score items
|
|
438
|
+
scored_reddit = score.score_reddit_items(filtered_reddit)
|
|
439
|
+
scored_x = score.score_x_items(filtered_x)
|
|
440
|
+
|
|
441
|
+
# Sort items
|
|
442
|
+
sorted_reddit = score.sort_items(scored_reddit)
|
|
443
|
+
sorted_x = score.sort_items(scored_x)
|
|
444
|
+
|
|
445
|
+
# Dedupe items
|
|
446
|
+
deduped_reddit = dedupe.dedupe_reddit(sorted_reddit)
|
|
447
|
+
deduped_x = dedupe.dedupe_x(sorted_x)
|
|
448
|
+
|
|
449
|
+
progress.end_processing()
|
|
450
|
+
|
|
451
|
+
# Create report
|
|
452
|
+
report = schema.create_report(
|
|
453
|
+
args.topic,
|
|
454
|
+
from_date,
|
|
455
|
+
to_date,
|
|
456
|
+
mode,
|
|
457
|
+
selected_models.get("openai"),
|
|
458
|
+
selected_models.get("xai"),
|
|
459
|
+
)
|
|
460
|
+
report.reddit = deduped_reddit
|
|
461
|
+
report.x = deduped_x
|
|
462
|
+
report.reddit_error = reddit_error
|
|
463
|
+
report.x_error = x_error
|
|
464
|
+
|
|
465
|
+
# Generate context snippet
|
|
466
|
+
report.context_snippet_md = render.render_context_snippet(report)
|
|
467
|
+
|
|
468
|
+
# Write outputs
|
|
469
|
+
render.write_outputs(report, raw_openai, raw_xai, raw_reddit_enriched)
|
|
470
|
+
|
|
471
|
+
# Show completion
|
|
472
|
+
if sources == "web":
|
|
473
|
+
progress.show_web_only_complete()
|
|
474
|
+
else:
|
|
475
|
+
progress.show_complete(len(deduped_reddit), len(deduped_x))
|
|
476
|
+
|
|
477
|
+
# Output result
|
|
478
|
+
output_result(report, args.emit, web_needed, args.topic, from_date, to_date, missing_keys)
|
|
479
|
+
|
|
480
|
+
|
|
481
|
+
def output_result(
|
|
482
|
+
report: schema.Report,
|
|
483
|
+
emit_mode: str,
|
|
484
|
+
web_needed: bool = False,
|
|
485
|
+
topic: str = "",
|
|
486
|
+
from_date: str = "",
|
|
487
|
+
to_date: str = "",
|
|
488
|
+
missing_keys: str = "none",
|
|
489
|
+
):
|
|
490
|
+
"""Output the result based on emit mode."""
|
|
491
|
+
if emit_mode == "compact":
|
|
492
|
+
print(render.render_compact(report, missing_keys=missing_keys))
|
|
493
|
+
elif emit_mode == "json":
|
|
494
|
+
print(json.dumps(report.to_dict(), indent=2))
|
|
495
|
+
elif emit_mode == "md":
|
|
496
|
+
print(render.render_full_report(report))
|
|
497
|
+
elif emit_mode == "context":
|
|
498
|
+
print(report.context_snippet_md)
|
|
499
|
+
elif emit_mode == "path":
|
|
500
|
+
print(render.get_context_path())
|
|
501
|
+
|
|
502
|
+
# Output WebSearch instructions if needed
|
|
503
|
+
if web_needed:
|
|
504
|
+
print("\n" + "="*60)
|
|
505
|
+
print("### WEBSEARCH REQUIRED ###")
|
|
506
|
+
print("="*60)
|
|
507
|
+
print(f"Topic: {topic}")
|
|
508
|
+
print(f"Date range: {from_date} to {to_date}")
|
|
509
|
+
print("")
|
|
510
|
+
print("Claude: Use your WebSearch tool to find 8-15 relevant web pages.")
|
|
511
|
+
print("EXCLUDE: reddit.com, x.com, twitter.com (already covered above)")
|
|
512
|
+
print("INCLUDE: blogs, docs, news, tutorials from the last 30 days")
|
|
513
|
+
print("")
|
|
514
|
+
print("After searching, synthesize WebSearch results WITH the Reddit/X")
|
|
515
|
+
print("results above. WebSearch items should rank LOWER than comparable")
|
|
516
|
+
print("Reddit/X items (they lack engagement metrics).")
|
|
517
|
+
print("="*60)
|
|
518
|
+
|
|
519
|
+
|
|
520
|
+
if __name__ == "__main__":
|
|
521
|
+
main()
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
# last30days library modules
|
|
@@ -0,0 +1,152 @@
|
|
|
1
|
+
"""Caching utilities for last30days skill."""
|
|
2
|
+
|
|
3
|
+
import hashlib
|
|
4
|
+
import json
|
|
5
|
+
import os
|
|
6
|
+
from datetime import datetime, timezone
|
|
7
|
+
from pathlib import Path
|
|
8
|
+
from typing import Any, Optional
|
|
9
|
+
|
|
10
|
+
CACHE_DIR = Path.home() / ".cache" / "last30days"
|
|
11
|
+
DEFAULT_TTL_HOURS = 24
|
|
12
|
+
MODEL_CACHE_TTL_DAYS = 7
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
def ensure_cache_dir():
|
|
16
|
+
"""Ensure cache directory exists."""
|
|
17
|
+
CACHE_DIR.mkdir(parents=True, exist_ok=True)
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
def get_cache_key(topic: str, from_date: str, to_date: str, sources: str) -> str:
|
|
21
|
+
"""Generate a cache key from query parameters."""
|
|
22
|
+
key_data = f"{topic}|{from_date}|{to_date}|{sources}"
|
|
23
|
+
return hashlib.sha256(key_data.encode()).hexdigest()[:16]
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
def get_cache_path(cache_key: str) -> Path:
|
|
27
|
+
"""Get path to cache file."""
|
|
28
|
+
return CACHE_DIR / f"{cache_key}.json"
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
def is_cache_valid(cache_path: Path, ttl_hours: int = DEFAULT_TTL_HOURS) -> bool:
|
|
32
|
+
"""Check if cache file exists and is within TTL."""
|
|
33
|
+
if not cache_path.exists():
|
|
34
|
+
return False
|
|
35
|
+
|
|
36
|
+
try:
|
|
37
|
+
stat = cache_path.stat()
|
|
38
|
+
mtime = datetime.fromtimestamp(stat.st_mtime, tz=timezone.utc)
|
|
39
|
+
now = datetime.now(timezone.utc)
|
|
40
|
+
age_hours = (now - mtime).total_seconds() / 3600
|
|
41
|
+
return age_hours < ttl_hours
|
|
42
|
+
except OSError:
|
|
43
|
+
return False
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
def load_cache(cache_key: str, ttl_hours: int = DEFAULT_TTL_HOURS) -> Optional[dict]:
|
|
47
|
+
"""Load data from cache if valid."""
|
|
48
|
+
cache_path = get_cache_path(cache_key)
|
|
49
|
+
|
|
50
|
+
if not is_cache_valid(cache_path, ttl_hours):
|
|
51
|
+
return None
|
|
52
|
+
|
|
53
|
+
try:
|
|
54
|
+
with open(cache_path, 'r') as f:
|
|
55
|
+
return json.load(f)
|
|
56
|
+
except (json.JSONDecodeError, OSError):
|
|
57
|
+
return None
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
def get_cache_age_hours(cache_path: Path) -> Optional[float]:
|
|
61
|
+
"""Get age of cache file in hours."""
|
|
62
|
+
if not cache_path.exists():
|
|
63
|
+
return None
|
|
64
|
+
try:
|
|
65
|
+
stat = cache_path.stat()
|
|
66
|
+
mtime = datetime.fromtimestamp(stat.st_mtime, tz=timezone.utc)
|
|
67
|
+
now = datetime.now(timezone.utc)
|
|
68
|
+
return (now - mtime).total_seconds() / 3600
|
|
69
|
+
except OSError:
|
|
70
|
+
return None
|
|
71
|
+
|
|
72
|
+
|
|
73
|
+
def load_cache_with_age(cache_key: str, ttl_hours: int = DEFAULT_TTL_HOURS) -> tuple:
|
|
74
|
+
"""Load data from cache with age info.
|
|
75
|
+
|
|
76
|
+
Returns:
|
|
77
|
+
Tuple of (data, age_hours) or (None, None) if invalid
|
|
78
|
+
"""
|
|
79
|
+
cache_path = get_cache_path(cache_key)
|
|
80
|
+
|
|
81
|
+
if not is_cache_valid(cache_path, ttl_hours):
|
|
82
|
+
return None, None
|
|
83
|
+
|
|
84
|
+
age = get_cache_age_hours(cache_path)
|
|
85
|
+
|
|
86
|
+
try:
|
|
87
|
+
with open(cache_path, 'r') as f:
|
|
88
|
+
return json.load(f), age
|
|
89
|
+
except (json.JSONDecodeError, OSError):
|
|
90
|
+
return None, None
|
|
91
|
+
|
|
92
|
+
|
|
93
|
+
def save_cache(cache_key: str, data: dict):
|
|
94
|
+
"""Save data to cache."""
|
|
95
|
+
ensure_cache_dir()
|
|
96
|
+
cache_path = get_cache_path(cache_key)
|
|
97
|
+
|
|
98
|
+
try:
|
|
99
|
+
with open(cache_path, 'w') as f:
|
|
100
|
+
json.dump(data, f)
|
|
101
|
+
except OSError:
|
|
102
|
+
pass # Silently fail on cache write errors
|
|
103
|
+
|
|
104
|
+
|
|
105
|
+
def clear_cache():
|
|
106
|
+
"""Clear all cache files."""
|
|
107
|
+
if CACHE_DIR.exists():
|
|
108
|
+
for f in CACHE_DIR.glob("*.json"):
|
|
109
|
+
try:
|
|
110
|
+
f.unlink()
|
|
111
|
+
except OSError:
|
|
112
|
+
pass
|
|
113
|
+
|
|
114
|
+
|
|
115
|
+
# Model selection cache (longer TTL)
|
|
116
|
+
MODEL_CACHE_FILE = CACHE_DIR / "model_selection.json"
|
|
117
|
+
|
|
118
|
+
|
|
119
|
+
def load_model_cache() -> dict:
|
|
120
|
+
"""Load model selection cache."""
|
|
121
|
+
if not is_cache_valid(MODEL_CACHE_FILE, MODEL_CACHE_TTL_DAYS * 24):
|
|
122
|
+
return {}
|
|
123
|
+
|
|
124
|
+
try:
|
|
125
|
+
with open(MODEL_CACHE_FILE, 'r') as f:
|
|
126
|
+
return json.load(f)
|
|
127
|
+
except (json.JSONDecodeError, OSError):
|
|
128
|
+
return {}
|
|
129
|
+
|
|
130
|
+
|
|
131
|
+
def save_model_cache(data: dict):
|
|
132
|
+
"""Save model selection cache."""
|
|
133
|
+
ensure_cache_dir()
|
|
134
|
+
try:
|
|
135
|
+
with open(MODEL_CACHE_FILE, 'w') as f:
|
|
136
|
+
json.dump(data, f)
|
|
137
|
+
except OSError:
|
|
138
|
+
pass
|
|
139
|
+
|
|
140
|
+
|
|
141
|
+
def get_cached_model(provider: str) -> Optional[str]:
|
|
142
|
+
"""Get cached model selection for a provider."""
|
|
143
|
+
cache = load_model_cache()
|
|
144
|
+
return cache.get(provider)
|
|
145
|
+
|
|
146
|
+
|
|
147
|
+
def set_cached_model(provider: str, model: str):
|
|
148
|
+
"""Cache model selection for a provider."""
|
|
149
|
+
cache = load_model_cache()
|
|
150
|
+
cache[provider] = model
|
|
151
|
+
cache['updated_at'] = datetime.now(timezone.utc).isoformat()
|
|
152
|
+
save_model_cache(cache)
|