@laitszkin/apollo-toolkit 2.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (204) hide show
  1. package/AGENTS.md +62 -0
  2. package/CHANGELOG.md +100 -0
  3. package/LICENSE +21 -0
  4. package/README.md +144 -0
  5. package/align-project-documents/SKILL.md +94 -0
  6. package/align-project-documents/agents/openai.yaml +4 -0
  7. package/analyse-app-logs/LICENSE +21 -0
  8. package/analyse-app-logs/README.md +126 -0
  9. package/analyse-app-logs/SKILL.md +121 -0
  10. package/analyse-app-logs/agents/openai.yaml +4 -0
  11. package/analyse-app-logs/references/investigation-checklist.md +58 -0
  12. package/analyse-app-logs/references/log-signal-patterns.md +52 -0
  13. package/answering-questions-with-research/SKILL.md +46 -0
  14. package/answering-questions-with-research/agents/openai.yaml +4 -0
  15. package/bin/apollo-toolkit.js +7 -0
  16. package/commit-and-push/LICENSE +21 -0
  17. package/commit-and-push/README.md +26 -0
  18. package/commit-and-push/SKILL.md +70 -0
  19. package/commit-and-push/agents/openai.yaml +4 -0
  20. package/commit-and-push/references/branch-naming.md +15 -0
  21. package/commit-and-push/references/commit-messages.md +19 -0
  22. package/deep-research-topics/LICENSE +21 -0
  23. package/deep-research-topics/README.md +43 -0
  24. package/deep-research-topics/SKILL.md +84 -0
  25. package/deep-research-topics/agents/openai.yaml +4 -0
  26. package/develop-new-features/LICENSE +21 -0
  27. package/develop-new-features/README.md +52 -0
  28. package/develop-new-features/SKILL.md +105 -0
  29. package/develop-new-features/agents/openai.yaml +4 -0
  30. package/develop-new-features/references/testing-e2e.md +35 -0
  31. package/develop-new-features/references/testing-integration.md +42 -0
  32. package/develop-new-features/references/testing-property-based.md +44 -0
  33. package/develop-new-features/references/testing-unit.md +37 -0
  34. package/discover-edge-cases/CHANGELOG.md +19 -0
  35. package/discover-edge-cases/LICENSE +21 -0
  36. package/discover-edge-cases/README.md +87 -0
  37. package/discover-edge-cases/SKILL.md +124 -0
  38. package/discover-edge-cases/agents/openai.yaml +4 -0
  39. package/discover-edge-cases/references/architecture-edge-cases.md +41 -0
  40. package/discover-edge-cases/references/code-edge-cases.md +46 -0
  41. package/docs-to-voice/.env.example +106 -0
  42. package/docs-to-voice/CHANGELOG.md +71 -0
  43. package/docs-to-voice/LICENSE +21 -0
  44. package/docs-to-voice/README.md +118 -0
  45. package/docs-to-voice/SKILL.md +107 -0
  46. package/docs-to-voice/agents/openai.yaml +4 -0
  47. package/docs-to-voice/scripts/docs_to_voice.py +1385 -0
  48. package/docs-to-voice/scripts/docs_to_voice.sh +11 -0
  49. package/docs-to-voice/tests/test_docs_to_voice_api_max_chars.py +210 -0
  50. package/docs-to-voice/tests/test_docs_to_voice_sentence_timeline.py +115 -0
  51. package/docs-to-voice/tests/test_docs_to_voice_settings.py +43 -0
  52. package/docs-to-voice/tests/test_docs_to_voice_speech_rate.py +57 -0
  53. package/enhance-existing-features/CHANGELOG.md +35 -0
  54. package/enhance-existing-features/LICENSE +21 -0
  55. package/enhance-existing-features/README.md +54 -0
  56. package/enhance-existing-features/SKILL.md +120 -0
  57. package/enhance-existing-features/agents/openai.yaml +4 -0
  58. package/enhance-existing-features/references/e2e-tests.md +25 -0
  59. package/enhance-existing-features/references/integration-tests.md +30 -0
  60. package/enhance-existing-features/references/property-based-tests.md +33 -0
  61. package/enhance-existing-features/references/unit-tests.md +29 -0
  62. package/feature-propose/LICENSE +21 -0
  63. package/feature-propose/README.md +23 -0
  64. package/feature-propose/SKILL.md +107 -0
  65. package/feature-propose/agents/openai.yaml +4 -0
  66. package/feature-propose/references/enhancement-features.md +25 -0
  67. package/feature-propose/references/important-features.md +25 -0
  68. package/feature-propose/references/mvp-features.md +25 -0
  69. package/feature-propose/references/performance-features.md +25 -0
  70. package/financial-research/SKILL.md +208 -0
  71. package/financial-research/agents/openai.yaml +4 -0
  72. package/financial-research/assets/weekly_market_report_template.md +45 -0
  73. package/fix-github-issues/SKILL.md +98 -0
  74. package/fix-github-issues/agents/openai.yaml +4 -0
  75. package/fix-github-issues/scripts/list_issues.py +148 -0
  76. package/fix-github-issues/tests/test_list_issues.py +127 -0
  77. package/generate-spec/LICENSE +21 -0
  78. package/generate-spec/README.md +61 -0
  79. package/generate-spec/SKILL.md +96 -0
  80. package/generate-spec/agents/openai.yaml +4 -0
  81. package/generate-spec/references/templates/checklist.md +78 -0
  82. package/generate-spec/references/templates/spec.md +55 -0
  83. package/generate-spec/references/templates/tasks.md +35 -0
  84. package/generate-spec/scripts/create-specs +123 -0
  85. package/harden-app-security/CHANGELOG.md +27 -0
  86. package/harden-app-security/LICENSE +21 -0
  87. package/harden-app-security/README.md +46 -0
  88. package/harden-app-security/SKILL.md +127 -0
  89. package/harden-app-security/agents/openai.yaml +4 -0
  90. package/harden-app-security/references/agent-attack-catalog.md +117 -0
  91. package/harden-app-security/references/common-software-attack-catalog.md +168 -0
  92. package/harden-app-security/references/red-team-extreme-scenarios.md +81 -0
  93. package/harden-app-security/references/risk-checklist.md +78 -0
  94. package/harden-app-security/references/security-test-patterns-agent.md +101 -0
  95. package/harden-app-security/references/security-test-patterns-finance.md +88 -0
  96. package/harden-app-security/references/test-snippets.md +73 -0
  97. package/improve-observability/SKILL.md +114 -0
  98. package/improve-observability/agents/openai.yaml +4 -0
  99. package/learn-skill-from-conversations/CHANGELOG.md +15 -0
  100. package/learn-skill-from-conversations/LICENSE +22 -0
  101. package/learn-skill-from-conversations/README.md +47 -0
  102. package/learn-skill-from-conversations/SKILL.md +85 -0
  103. package/learn-skill-from-conversations/agents/openai.yaml +4 -0
  104. package/learn-skill-from-conversations/scripts/extract_recent_conversations.py +369 -0
  105. package/learn-skill-from-conversations/tests/test_extract_recent_conversations.py +176 -0
  106. package/learning-error-book/SKILL.md +112 -0
  107. package/learning-error-book/agents/openai.yaml +4 -0
  108. package/learning-error-book/assets/error_book_template.md +66 -0
  109. package/learning-error-book/scripts/render_markdown_to_pdf.py +367 -0
  110. package/lib/cli.js +338 -0
  111. package/lib/installer.js +225 -0
  112. package/maintain-project-constraints/SKILL.md +109 -0
  113. package/maintain-project-constraints/agents/openai.yaml +4 -0
  114. package/maintain-skill-catalog/README.md +18 -0
  115. package/maintain-skill-catalog/SKILL.md +66 -0
  116. package/maintain-skill-catalog/agents/openai.yaml +4 -0
  117. package/novel-to-short-video/CHANGELOG.md +53 -0
  118. package/novel-to-short-video/LICENSE +21 -0
  119. package/novel-to-short-video/README.md +63 -0
  120. package/novel-to-short-video/SKILL.md +233 -0
  121. package/novel-to-short-video/agents/openai.yaml +4 -0
  122. package/novel-to-short-video/references/plan-template.md +71 -0
  123. package/novel-to-short-video/references/roles-json.md +41 -0
  124. package/open-github-issue/LICENSE +21 -0
  125. package/open-github-issue/README.md +97 -0
  126. package/open-github-issue/SKILL.md +119 -0
  127. package/open-github-issue/agents/openai.yaml +4 -0
  128. package/open-github-issue/scripts/open_github_issue.py +380 -0
  129. package/open-github-issue/tests/test_open_github_issue.py +159 -0
  130. package/open-source-pr-workflow/CHANGELOG.md +32 -0
  131. package/open-source-pr-workflow/LICENSE +21 -0
  132. package/open-source-pr-workflow/README.md +23 -0
  133. package/open-source-pr-workflow/SKILL.md +123 -0
  134. package/open-source-pr-workflow/agents/openai.yaml +4 -0
  135. package/openai-text-to-image-storyboard/.env.example +10 -0
  136. package/openai-text-to-image-storyboard/CHANGELOG.md +49 -0
  137. package/openai-text-to-image-storyboard/LICENSE +21 -0
  138. package/openai-text-to-image-storyboard/README.md +99 -0
  139. package/openai-text-to-image-storyboard/SKILL.md +107 -0
  140. package/openai-text-to-image-storyboard/agents/openai.yaml +4 -0
  141. package/openai-text-to-image-storyboard/scripts/generate_storyboard_images.py +763 -0
  142. package/package.json +36 -0
  143. package/record-spending/SKILL.md +113 -0
  144. package/record-spending/agents/openai.yaml +4 -0
  145. package/record-spending/references/account-format.md +33 -0
  146. package/record-spending/references/workbook-layout.md +84 -0
  147. package/resolve-review-comments/SKILL.md +122 -0
  148. package/resolve-review-comments/agents/openai.yaml +4 -0
  149. package/resolve-review-comments/references/adoption-criteria.md +23 -0
  150. package/resolve-review-comments/scripts/review_threads.py +425 -0
  151. package/resolve-review-comments/tests/test_review_threads.py +74 -0
  152. package/review-change-set/LICENSE +21 -0
  153. package/review-change-set/README.md +55 -0
  154. package/review-change-set/SKILL.md +103 -0
  155. package/review-change-set/agents/openai.yaml +4 -0
  156. package/review-codebases/LICENSE +21 -0
  157. package/review-codebases/README.md +67 -0
  158. package/review-codebases/SKILL.md +109 -0
  159. package/review-codebases/agents/openai.yaml +4 -0
  160. package/scripts/install_skills.ps1 +283 -0
  161. package/scripts/install_skills.sh +262 -0
  162. package/scripts/validate_openai_agent_config.py +194 -0
  163. package/scripts/validate_skill_frontmatter.py +110 -0
  164. package/specs-to-project-docs/LICENSE +21 -0
  165. package/specs-to-project-docs/README.md +57 -0
  166. package/specs-to-project-docs/SKILL.md +111 -0
  167. package/specs-to-project-docs/agents/openai.yaml +4 -0
  168. package/specs-to-project-docs/references/templates/architecture.md +29 -0
  169. package/specs-to-project-docs/references/templates/configuration.md +29 -0
  170. package/specs-to-project-docs/references/templates/developer-guide.md +33 -0
  171. package/specs-to-project-docs/references/templates/docs-index.md +39 -0
  172. package/specs-to-project-docs/references/templates/features.md +25 -0
  173. package/specs-to-project-docs/references/templates/getting-started.md +38 -0
  174. package/specs-to-project-docs/references/templates/readme.md +49 -0
  175. package/systematic-debug/LICENSE +21 -0
  176. package/systematic-debug/README.md +81 -0
  177. package/systematic-debug/SKILL.md +59 -0
  178. package/systematic-debug/agents/openai.yaml +4 -0
  179. package/text-to-short-video/.env.example +36 -0
  180. package/text-to-short-video/LICENSE +21 -0
  181. package/text-to-short-video/README.md +82 -0
  182. package/text-to-short-video/SKILL.md +221 -0
  183. package/text-to-short-video/agents/openai.yaml +4 -0
  184. package/text-to-short-video/scripts/enforce_video_aspect_ratio.py +350 -0
  185. package/version-release/CHANGELOG.md +53 -0
  186. package/version-release/LICENSE +21 -0
  187. package/version-release/README.md +28 -0
  188. package/version-release/SKILL.md +94 -0
  189. package/version-release/agents/openai.yaml +4 -0
  190. package/version-release/references/branch-naming.md +15 -0
  191. package/version-release/references/changelog-writing.md +8 -0
  192. package/version-release/references/commit-messages.md +19 -0
  193. package/version-release/references/readme-writing.md +12 -0
  194. package/version-release/references/semantic-versioning.md +12 -0
  195. package/video-production/CHANGELOG.md +104 -0
  196. package/video-production/LICENSE +18 -0
  197. package/video-production/README.md +68 -0
  198. package/video-production/SKILL.md +213 -0
  199. package/video-production/agents/openai.yaml +4 -0
  200. package/video-production/references/plan-template.md +54 -0
  201. package/video-production/references/roles-json.md +41 -0
  202. package/weekly-financial-event-report/SKILL.md +195 -0
  203. package/weekly-financial-event-report/agents/openai.yaml +4 -0
  204. package/weekly-financial-event-report/assets/financial_event_report_template.md +53 -0
@@ -0,0 +1,25 @@
1
+ # E2E Testing Guide
2
+
3
+ ## Purpose
4
+ - Verify critical user-visible paths at end-to-end level.
5
+ - Increase confidence in real behavior after cross-layer integration.
6
+
7
+ ## Required when
8
+ - If changes impact key user-visible flows, add or update E2E tests.
9
+ - E2E must still be evaluated even when specs are not used; if not applicable, record explicit rationale.
10
+
11
+ ## E2E decision rules
12
+ - Prefer E2E for high-risk, high-impact, multi-step flow changes.
13
+ - Integration tests may replace E2E when E2E is too costly, unstable, or hard to maintain.
14
+ - When replacing E2E, provide equivalent risk coverage and record replacement cases plus reasons.
15
+
16
+ ## Design guidance
17
+ - Focus on minimal critical path coverage; avoid over-expansion.
18
+ - Use stable test data and reproducible flows.
19
+ - Prioritize business outcomes over brittle UI details.
20
+ - Prefer one critical success path and one highest-value denial/failure path over many shallow happy-path journeys.
21
+ - Assert business-visible outcomes, not just DOM presence: final state, permission denial, user-facing error, persisted result, or prevented duplicate action.
22
+
23
+ ## Recording rules
24
+ - Specs flow: record E2E or replacement strategy with outcomes in `checklist.md`.
25
+ - Non-specs flow: explain E2E execution or replacement testing with rationale in the response.
@@ -0,0 +1,30 @@
1
+ # Integration Testing Guide
2
+
3
+ ## Purpose
4
+ - Verify correctness of cross-layer/cross-module collaboration.
5
+ - Focus especially on user-critical logic chains.
6
+ - Validate business outcomes across the full changed chain, not just connectivity.
7
+
8
+ ## Required when
9
+ - Any change affecting service/repository/API handlers/event flows should add or update integration tests.
10
+ - Integration tests for user-critical logic chains are required even when specs are not used.
11
+
12
+ ## Coverage focus
13
+ - Key data flow from entrypoint to output.
14
+ - Cross-module contract and configuration interaction.
15
+ - Common failure patterns (timeout, data inconsistency, external dependency failure).
16
+ - External dependency state changes and fallback/compensation behavior.
17
+ - Adversarial/abuse paths such as invalid transitions, replay, duplication, forged identifiers, or out-of-order events when relevant.
18
+
19
+ ## Design guidance
20
+ - Prefer near-real dependencies inside the application boundary; mock/fake external services unless the real service contract itself is under test.
21
+ - Build scenario matrices for external states such as success, timeout, retries exhausted, partial data, stale data, duplicate callbacks, inconsistent responses, and permission failures.
22
+ - Keep test data reconstructable and cleanable.
23
+ - When workflows can partially commit, assert rollback/compensation/no-partial-write behavior instead of only final status codes.
24
+ - Assert business outcomes across boundaries: persisted state, emitted events, deduplication, retry accounting, audit trail, or intentional absence of writes/notifications.
25
+ - Add at least one regression-style integration test for the highest-risk chain whenever the change fixes a bug or touches a historically fragile path.
26
+ - Each test case should map to an explainable risk.
27
+
28
+ ## Recording rules
29
+ - Specs flow: record IT cases and outcomes in `checklist.md`.
30
+ - Non-specs flow: list user-critical integration tests, mocked external scenarios, adversarial cases, and outcomes in the response.
@@ -0,0 +1,33 @@
1
+ # Property-based Testing Guide
2
+
3
+ ## Purpose
4
+ - Verify invariants across large input combinations.
5
+ - Validate business rules by generating or exhaustively enumerating meaningful input spaces and checking outputs against expected business behavior.
6
+ - Catch combinational, adversarial, and boundary behavior that fixed examples often miss.
7
+
8
+ ## Required when
9
+ - If changes include logic with describable invariants (calculation, transformation, sorting, aggregation, serialization), add/update property-based tests.
10
+ - If changes include business rules that can be expressed as allowed outputs, forbidden outputs, valid transitions, rejection rules, or safety constraints, add/update property-based tests.
11
+ - If logic depends on external services but the service can be replaced with a mock/fake to generate service states, property-based tests should cover those state combinations too.
12
+ - If not applicable, record `N/A` with a concrete reason.
13
+
14
+ ## Common properties
15
+ - Round-trip: `decode(encode(x)) == x`
16
+ - Idempotency: repeated execution does not change the result
17
+ - Monotonicity/conservation/set invariance
18
+ - Generated invalid or unauthorized inputs always fail with an expected result/error class
19
+ - Generated state transitions always end in an allowed business state
20
+ - Under generated mocked service states, the business logic chain preserves fallback/retry/compensation rules
21
+
22
+ ## Design guidance
23
+ - Properties must be machine-verifiable, whether they are invariants, allow-lists, rejection rules, or business-output predicates.
24
+ - Generator strategy should include normal cases, boundaries, extremes, malformed inputs, and suspicious/adversarial combinations.
25
+ - Prefer modeling the business rule directly: generate inputs, run the logic, then assert output/error/state transition matches the rule.
26
+ - When the behavior is stateful, prefer state-machine or sequence-based properties over isolated single-call generators.
27
+ - When exact outputs are hard to predict, use metamorphic properties (for example reordering, retrying, deduplicating, or replaying inputs should preserve an allowed relation).
28
+ - For external-service-dependent logic, mock/fake the service and generate multiple service states (success, timeout, empty, partial, stale, inconsistent, duplicate, rejected).
29
+ - Control execution cost while preserving reproducibility, and preserve failing seeds/examples for regression coverage.
30
+
31
+ ## Recording rules
32
+ - If specs are used, record cases and outcomes in `checklist.md`.
33
+ - If specs are not used, record cases, external-state coverage, adversarial coverage, or `N/A` reasons in the response.
@@ -0,0 +1,29 @@
1
+ # Unit Testing Guide
2
+
3
+ ## Purpose
4
+ - Verify correctness of a single function/module and localize failures quickly.
5
+ - Cover both success and failure paths for the smallest changed behavior unit.
6
+
7
+ ## Required when
8
+ - Any non-trivial logic change should add or update unit tests.
9
+ - Unit test evaluation is required even when specs are not used.
10
+
11
+ ## Coverage focus
12
+ - Core logic branches and boundary values.
13
+ - Error handling, validation failures, and incompatible states.
14
+ - Function paths with highest regression risk.
15
+
16
+ ## Design guidance
17
+ - Isolate external dependencies (mock/stub/fake).
18
+ - Keep tests small and focused: one behavior per test.
19
+ - Do not stop at happy-path assertions; verify exact errors, rejected states, and intentional lack of side effects when the unit should block an action.
20
+ - Where the input space is small and discrete, exhaustively enumerate business inputs and expected outputs.
21
+ - Prefer table-driven cases when many small business permutations share the same oracle.
22
+ - Add regression tests for bug-prone or high-risk logic so previously broken behavior cannot silently return.
23
+ - If the unit owns authorization, invalid transition, idempotency, or concurrency decisions, test those denials explicitly.
24
+ - Keep tests reproducible and fast.
25
+ - Avoid assertion-light smoke tests and snapshot-only coverage unless the snapshot has a strict business oracle behind it.
26
+
27
+ ## Recording rules
28
+ - If specs are used, record mapped test cases and results in `checklist.md`.
29
+ - If specs are not used, list test IDs and results in the response.
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2026 tszkinlai
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
@@ -0,0 +1,23 @@
1
+ # feature-propose
2
+
3
+ `feature-propose` is a Codex skill for product-oriented feature planning in existing codebases.
4
+ It guides the agent to:
5
+
6
+ 1. Understand the product from real code evidence.
7
+ 2. Classify user-facing functions into MVP / Important / Enhancement / Performance.
8
+ 3. Propose numbered feature recommendations with clear implementation direction.
9
+ 4. Publish accepted proposals through `open-github-issue` with reason and suggested architecture.
10
+ 5. Record accepted feature proposals in `AGENTS.md`.
11
+ 6. Remove implemented proposals from `AGENTS.md` after delivery.
12
+
13
+ ## Repository layout
14
+
15
+ - `SKILL.md`: skill metadata and workflow instructions.
16
+ - `agents/openai.yaml`: agent-facing registration metadata.
17
+ - `references/`: feature classification references used during proposal generation.
18
+
19
+ ## Usage
20
+
21
+ Use this skill when a user asks to analyze an existing application and propose prioritized features from a PM perspective.
22
+
23
+ Accepted proposals should be ready for GitHub tracking: each one includes a clear reason to prioritize and a suggested architecture that can be handed to `open-github-issue`.
@@ -0,0 +1,107 @@
1
+ ---
2
+ name: feature-propose
3
+ description: Professional product-management workflow for proposing features from an existing codebase. Use when the user asks to understand an application, classify features from a user perspective into MVP/Important/Enhancement/Performance tiers, ask 3-5 clarifying questions when needed, propose numbered feature recommendations, publish accepted proposals through `open-github-issue`, record accepted items in AGENTS.md, and remove implemented items from AGENTS.md.
4
+ ---
5
+
6
+ # Feature Propose
7
+
8
+ ## Dependencies
9
+
10
+ - Required: none.
11
+ - Conditional: `open-github-issue` for accepted features that should be published as GitHub issues.
12
+ - Optional: none.
13
+ - Fallback: If issue publication is skipped or unavailable, keep accepted proposals synchronized in `AGENTS.md` and report the publication state explicitly.
14
+
15
+ ## Standards
16
+
17
+ - Evidence: Understand the existing product from code, tests, and repo docs before proposing features.
18
+ - Execution: Classify current functions first, ask questions only when necessary, then propose numbered features across the four priority tiers.
19
+ - Quality: Keep proposals minimal, user-value driven, and tied to concrete modules, acceptance criteria, and prioritization reasons.
20
+ - Output: Return current understanding, function classification, proposed features, confirmation request, and publication status when applicable.
21
+
22
+ ## Overview
23
+
24
+ Act as a professional PM: build a complete understanding of the current product from code, classify capabilities by user value, propose prioritized features, publish accepted proposals through `open-github-issue`, persist accepted proposals in `AGENTS.md`, and keep the list clean by removing implemented items.
25
+
26
+ ## References
27
+
28
+ Load these references as needed during classification:
29
+
30
+ - `references/mvp-features.md`
31
+ - `references/important-features.md`
32
+ - `references/enhancement-features.md`
33
+ - `references/performance-features.md`
34
+
35
+ ## Workflow
36
+
37
+ ### 1) Explore the codebase before proposing anything
38
+
39
+ - Read repo-level guidance first (`AGENTS.md`, `README`, major docs).
40
+ - Map architecture, entrypoints, user-facing flows, data models, and external integrations.
41
+ - Identify implemented features, obvious gaps, and technical constraints from code and tests.
42
+ - Summarize findings before moving to prioritization.
43
+ - Refuse to guess when facts are missing; gather more evidence from code.
44
+
45
+ ### 2) Build a user-perspective function inventory
46
+
47
+ - List the current user-facing functions as numbered items.
48
+ - For each function, include: user goal, current behavior, pain point/opportunity, and key file references.
49
+ - Classify each function into exactly one primary type using the reference definitions.
50
+ - If a function spans multiple types, keep one primary type and note the secondary type briefly.
51
+
52
+ ### 3) Ask clarifying questions only when necessary
53
+
54
+ - Ask 3-5 targeted questions only when uncertainty blocks accurate prioritization.
55
+ - Focus questions on target users, business goal, release horizon, success metrics, and hard constraints.
56
+ - If context is already sufficient, skip questions and continue directly.
57
+
58
+ ### 4) Propose features in four numbered groups
59
+
60
+ - Present features in this order:
61
+ 1. MVP features
62
+ 2. Important features
63
+ 3. Enhancement features
64
+ 4. Performance features
65
+ - Number every proposed feature (for example: `1`, `2`, `3`...) so acceptance can reference numbers.
66
+ - For each feature include:
67
+ - User problem
68
+ - Expected user value
69
+ - Reason to prioritize now
70
+ - Suggested architecture
71
+ - Affected modules/files
72
+ - Acceptance criteria
73
+ - Keep proposals focused and minimal; avoid over-engineering.
74
+
75
+ ### 5) Persist accepted features to AGENTS.md, publish them, and clean up after implementation
76
+
77
+ - Ask the user to accept/reject/edit features by number.
78
+ - Once accepted, update repo-root `AGENTS.md` with a dedicated section:
79
+ - `## Accepted Feature Proposals`
80
+ - Append accepted features as a numbered list with:
81
+ - Date (`YYYY-MM-DD`)
82
+ - Type (`MVP`, `Important`, `Enhancement`, `Performance`)
83
+ - Short feature statement
84
+ - Preserve existing `AGENTS.md` content and style; do not rewrite unrelated sections.
85
+ - If `AGENTS.md` does not exist, ask before creating it.
86
+ - For each accepted feature, invoke `open-github-issue` exactly once with feature-proposal content.
87
+ - Default to publishing accepted features unless the user explicitly says not to create GitHub issues.
88
+ - Pass these fields to `open-github-issue`:
89
+ - `issue-type`: `feature`
90
+ - `title`: short feature statement
91
+ - `proposal`: feature summary
92
+ - `reason`: why this feature should exist now
93
+ - `suggested-architecture`: minimal architecture and module plan
94
+ - `repo`: target repository in `owner/repo` format when known
95
+ - Reuse the returned `mode`, `issue_url`, and `publish_error` in the response.
96
+ - After the related feature is implemented, remove that feature entry from `## Accepted Feature Proposals` in `AGENTS.md`.
97
+ - Remove only implemented items; keep unimplemented accepted items untouched.
98
+
99
+ ## Output template
100
+
101
+ Use this structure when responding:
102
+
103
+ 1. `Current understanding` (codebase findings)
104
+ 2. `Function classification` (current functions mapped to 4 types)
105
+ 3. `Proposed features` (numbered)
106
+ 4. `Confirmation request` (ask user to accept/edit/reject by number)
107
+ 5. `Publication status` (only after accepted features are published through `open-github-issue`)
@@ -0,0 +1,4 @@
1
+ interface:
2
+ display_name: "feature-propose"
3
+ short_description: "Propose prioritized product features"
4
+ default_prompt: "Use $feature-propose to explore a codebase, classify user-facing functions, propose numbered MVP/Important/Enhancement/Performance features, and publish accepted proposals through $open-github-issue."
@@ -0,0 +1,25 @@
1
+ # Enhancement Features Definition
2
+
3
+ ## What it is
4
+
5
+ Enhancement features are quality-of-life improvements that make the product better, easier, or more delightful but are not essential for core success.
6
+
7
+ ## Classification rules
8
+
9
+ Classify a feature as Enhancement when most of the following are true:
10
+
11
+ 1. Users can complete key workflows without it.
12
+ 2. It improves usability, convenience, or flexibility.
13
+ 3. Value is real but moderate in urgency.
14
+ 4. It is suitable for iterative delivery after higher-priority tiers.
15
+
16
+ ## What it is not
17
+
18
+ - Capabilities needed to make the product functionally viable
19
+ - Urgent items with major business risk if delayed
20
+ - Pure system-level performance remediations tied to SLO breaches
21
+
22
+ ## PM check questions
23
+
24
+ - Is this primarily a better experience rather than a missing core capability?
25
+ - Would delaying it by one release still keep the product viable?
@@ -0,0 +1,25 @@
1
+ # Important Features Definition
2
+
3
+ ## What it is
4
+
5
+ Important features are high-value capabilities that significantly improve adoption, trust, retention, or operational reliability after MVP is usable.
6
+
7
+ ## Classification rules
8
+
9
+ Classify a feature as Important when most of the following are true:
10
+
11
+ 1. The core workflow works without it, but user success is materially weaker.
12
+ 2. It removes frequent friction for a large share of users.
13
+ 3. It meaningfully impacts business goals (activation, retention, conversion, safety).
14
+ 4. It should follow soon after MVP in roadmap priority.
15
+
16
+ ## What it is not
17
+
18
+ - Purely cosmetic changes with limited user impact
19
+ - Deep optimization work without measurable user/business effect
20
+ - Experimental ideas with unclear value
21
+
22
+ ## PM check questions
23
+
24
+ - Does this feature produce measurable product outcome improvement?
25
+ - Is it broadly valuable instead of only niche convenience?
@@ -0,0 +1,25 @@
1
+ # MVP Features Definition
2
+
3
+ ## What it is
4
+
5
+ MVP features are the minimum set of capabilities required for users to complete the product's core job successfully.
6
+
7
+ ## Classification rules
8
+
9
+ Classify a feature as MVP when most of the following are true:
10
+
11
+ 1. Without it, primary users cannot complete the main workflow.
12
+ 2. It removes a critical blocker, risk, or failure in the core journey.
13
+ 3. It is required to validate product-market fit or core value hypothesis.
14
+ 4. It should be delivered as a minimal, testable slice.
15
+
16
+ ## What it is not
17
+
18
+ - Nice-to-have improvements that do not block core usage
19
+ - Pure polish changes without direct core-journey impact
20
+ - Long-tail edge cases not needed for initial viability
21
+
22
+ ## PM check questions
23
+
24
+ - If this feature is removed, can users still complete the main job?
25
+ - Does this feature directly determine whether the product is usable at all?
@@ -0,0 +1,25 @@
1
+ # Performance Features Definition
2
+
3
+ ## What it is
4
+
5
+ Performance features focus on speed, scalability, reliability, and efficiency, ensuring the system meets user expectations and operational targets.
6
+
7
+ ## Classification rules
8
+
9
+ Classify a feature as Performance when most of the following are true:
10
+
11
+ 1. It targets latency, throughput, resource usage, uptime, or fault tolerance.
12
+ 2. User experience is degraded due to measurable performance/reliability issues.
13
+ 3. It is driven by SLO/SLA, scaling limits, or operational risk.
14
+ 4. Success can be validated with concrete technical metrics.
15
+
16
+ ## What it is not
17
+
18
+ - New user-facing business workflows
19
+ - General UI enhancements without measurable performance effect
20
+ - Architectural rewrites without clear performance hypotheses
21
+
22
+ ## PM check questions
23
+
24
+ - Which metric improves (for example: p95 latency, error rate, cost/request)?
25
+ - What threshold or target proves the feature is successful?
@@ -0,0 +1,208 @@
1
+ ---
2
+ name: financial-research
3
+ description: Research the most important tradeable instruments to watch for the coming week by reviewing the most recent completed local week of financial and economic news, then produce a standardized PDF market report. Use when the user asks for a weekly market briefing, macro recap, market sentiment summary, key financial news review, or a next-week trading watchlist.
4
+ ---
5
+
6
+ # Financial Research
7
+
8
+ ## Dependencies
9
+
10
+ - Required: `pdf` for the final deliverable.
11
+ - Conditional: none.
12
+ - Optional: none.
13
+ - Fallback: If `pdf` is unavailable, stop and report the missing dependency instead of inventing another export workflow.
14
+
15
+ ## Standards
16
+
17
+ - Evidence: Resolve the current local date and timezone first, then research the most recent completed 7-day local window with authoritative current sources.
18
+ - Execution: Collect weekly market evidence, distill the macro picture, select 3-8 watchlist instruments, and hand the final report to `pdf`.
19
+ - Quality: Use Chinese-compatible rendering requirements, verify exact dates and values, and complete screenshot-based PDF QA before finishing.
20
+ - Output: Save only the final PDF under the month folder using the standardized market-research naming scheme.
21
+
22
+ ## Overview
23
+
24
+ Create an evidence-based weekly market report for the next trading week. Start from the current local date and time, review the most recent completed local week of financial and economic news, distill the key market signals, and deliver a standardized PDF report in Chinese by default.
25
+
26
+ ## Behavior Contract
27
+
28
+ GIVEN the user needs research on the instruments worth watching in the coming week
29
+ WHEN the agent uses this skill proactively or the user calls this skill directly
30
+ THEN the agent must check the current local date, time, and timezone first
31
+ AND define the research window as the most recent completed local 7-day period ending yesterday
32
+ AND search financial and economic news from that window
33
+ AND identify the week's macro conditions, overall market sentiment, key news, and the most important instruments to monitor next week
34
+ AND save the report under a month-based folder
35
+ AND keep only the final PDF report as the persistent deliverable
36
+ AND generate a standardized PDF report with Chinese-compatible characters
37
+ AND default the report language to Chinese unless the user explicitly requests another language.
38
+
39
+ ## Required Inputs
40
+
41
+ Before drafting the report, confirm these facts from context or current sources:
42
+
43
+ - Current local date, time, and timezone
44
+ - Any user-specified geography, market, asset class, or language preference
45
+ - If the user did not narrow the scope, cover the major global cross-asset picture
46
+
47
+ Do not guess missing facts that materially change the report scope.
48
+
49
+ ## Research Window Rules
50
+
51
+ 1. Always resolve the user's local date first.
52
+ 2. Use the most recent completed 7-day local window that ends on yesterday.
53
+ 3. State exact calendar dates in the report.
54
+ 4. Example:
55
+ - if the local date is Sunday, research the previous Sunday through Saturday
56
+ - if the local date is Friday, research the previous Friday through Thursday
57
+
58
+ ## Source Rules
59
+
60
+ - Use current web research for time-sensitive facts.
61
+ - Prefer primary or authoritative sources first:
62
+ - central banks
63
+ - government statistical agencies
64
+ - regulators
65
+ - exchange operators
66
+ - company filings or official releases
67
+ - Use high-quality financial reporting only to supplement, triangulate, or surface primary sources.
68
+ - Record the publication date or event date for all important claims.
69
+ - Separate verified facts from inference.
70
+
71
+ ## Workflow
72
+
73
+ ### 1) Lock the scope and timing
74
+
75
+ - Check the current local time before searching.
76
+ - Write down the exact research window with start date, end date, and timezone.
77
+ - If the user names a region or market, prioritize that scope while keeping enough macro context to explain cross-asset spillovers.
78
+
79
+ ### 2) Collect the week's market evidence
80
+
81
+ - Search the week's financial and economic news across the relevant markets.
82
+ - Cover at least the drivers that materially moved markets during the window, such as:
83
+ - central bank decisions and guidance
84
+ - inflation, employment, growth, and liquidity data
85
+ - major fiscal, regulatory, or geopolitical developments
86
+ - important earnings or sector shocks when they changed broader risk appetite
87
+ - large moves in rates, equities, foreign exchange, commodities, or crypto when relevant
88
+ - Build an evidence table before writing the report. For each item capture:
89
+ - date
90
+ - source
91
+ - event
92
+ - affected assets
93
+ - why it mattered during the week
94
+ - why it may matter next week
95
+
96
+ ### 3) Distill the market picture
97
+
98
+ - Produce a concise macro summary of the week.
99
+ - Judge the overall market sentiment from confirmed cross-asset behavior rather than headlines alone.
100
+ - Explain whether the tone was risk-on, risk-off, mixed, or regime-shifting, and why.
101
+ - Highlight conflicts in the evidence instead of forcing a single narrative.
102
+
103
+ ### 4) Select the instruments worth watching next week
104
+
105
+ - Rank the most important tradeable instruments by expected relevance for the coming week.
106
+ - Default to 3-8 instruments unless the user requests a different number.
107
+ - Prefer liquid, recognizable instruments:
108
+ - major indices or ETFs
109
+ - major FX pairs
110
+ - government bond futures or benchmark yields
111
+ - major commodities
112
+ - liquid crypto pairs only when they were materially relevant during the week
113
+ - For each selected instrument, include:
114
+ - instrument name and ticker or pair when available
115
+ - asset class
116
+ - this week's key driver
117
+ - what to watch next week
118
+ - base case or monitoring thesis
119
+ - main upside/downside risk or invalidation condition
120
+
121
+ ### 5) Write the standardized report
122
+
123
+ - Start from `assets/weekly_market_report_template.md`.
124
+ - Localize headings to the requested output language.
125
+ - Default to Chinese if the user did not specify a language.
126
+ - Use Chinese-compatible characters, punctuation, and fonts.
127
+ - On macOS, do not assume a font family is available just because it worked elsewhere.
128
+ - On macOS, prefer a locally verified CJK font in this order when the PDF workflow allows explicit font selection:
129
+ - `/System/Library/Fonts/Hiragino Sans GB.ttc`
130
+ - `/System/Library/Fonts/Supplemental/Songti.ttc`
131
+ - `/Library/Fonts/Arial Unicode.ttf`
132
+ - `/System/Library/Fonts/STHeiti Medium.ttc`
133
+ - `/System/Library/Fonts/STHeiti Light.ttc`
134
+ - Do not hardcode fonts that are missing or known to render poorly on the current macOS host.
135
+ - Do not assume `PingFang` exists on every macOS environment.
136
+ - If the `pdf` skill already has a verified CJK-safe default on the current machine, reuse that default instead of overriding it.
137
+ - Avoid emoji, decorative symbols, and unusual glyphs that often break in PDF rendering.
138
+ - Only include exact price levels, yields, or percentages when they were verified from current sources.
139
+
140
+ ### 6) Render the final PDF
141
+
142
+ - Convert the completed report into PDF through the `pdf` skill.
143
+ - Ensure the chosen font and renderer support Chinese text and common Markdown symbols.
144
+ - On macOS, verify the selected font path exists before rendering.
145
+ - If the renderer cannot safely output Chinese text, fix the font or rendering path before finishing.
146
+
147
+ ### 7) Perform visual QA before finishing
148
+
149
+ - Open the rendered PDF locally before completing the task.
150
+ - Inspect at least:
151
+ - the first page
152
+ - one section with a table
153
+ - one section with dense paragraph text
154
+ - Capture temporary screenshots of the rendered PDF pages as a final QA check before considering the report complete.
155
+ - Verify the temporary screenshots for:
156
+ - correct Chinese glyph rendering
157
+ - no missing characters or tofu boxes
158
+ - reasonable line wrapping
159
+ - table borders and columns staying readable
160
+ - page margins and spacing looking clean
161
+ - title, headings, and body hierarchy looking visually balanced
162
+ - If the layout or glyph rendering is wrong or unattractive, fix the font, spacing, or content structure and render again before finishing.
163
+ - Do not treat the task as complete until this screenshot-based visual check passes.
164
+
165
+ ## File Layout Rules
166
+
167
+ - Store reports inside a month-based folder named `YYYY-MM`.
168
+ - If the user gives a base output directory, create or reuse `YYYY-MM` beneath it.
169
+ - If the user does not give a base output directory, use the current working directory as the base.
170
+ - Keep only the final PDF report as the persistent output. Do not leave Markdown, DOCX, or temporary export files behind after rendering.
171
+ - Remove temporary working files before finishing.
172
+ - The QA screenshots are temporary and must be deleted after the visual check unless the user explicitly asks to keep them.
173
+
174
+ ## Report Naming Rules
175
+
176
+ - The visible report title must use this exact pattern:
177
+ - `[YYYY/M/D-YYYY/M/D]-market-research`
178
+ - Build the date range from the exact research window start and end dates.
179
+ - Example:
180
+ - `[2025/12/6-2025/12/13]-market-research`
181
+ - Because `/` is not safe in macOS filenames, do not use the visible title string directly as the filename.
182
+ - The PDF filename must be the filesystem-safe variant:
183
+ - `[YYYY-M-D-YYYY-M-D]-market-research.pdf`
184
+ - Example:
185
+ - `[2025-12-6-2025-12-13]-market-research.pdf`
186
+
187
+ ## Standard Report Requirements
188
+
189
+ The report must contain these sections in order:
190
+
191
+ 1. Report title and scope
192
+ 2. Generated time and research window
193
+ 3. Executive summary
194
+ 4. Weekly macro recap
195
+ 5. Overall market sentiment
196
+ 6. Key news and why each item mattered
197
+ 7. Instruments to watch next week
198
+ 8. Next-week watchpoints or catalyst calendar
199
+ 9. Risks and limitations
200
+
201
+ ## Output Rules
202
+
203
+ - The default deliverable is a PDF.
204
+ - Save the final file at `YYYY-MM/[YYYY-M-D-YYYY-M-D]-market-research.pdf`.
205
+ - The default language is Chinese unless the user explicitly asks for another language.
206
+ - Keep the report evidence-based, concise, and decision-useful.
207
+ - Call out uncertainty, disputed interpretations, and missing data explicitly.
208
+ - Do not present speculative trade recommendations as facts.
@@ -0,0 +1,4 @@
1
+ interface:
2
+ display_name: "Financial Research"
3
+ short_description: "Research the latest completed market week and produce a PDF watchlist"
4
+ default_prompt: "Use $financial-research to lock the user's local timezone and most recent completed 7-day window ending yesterday, gather authoritative financial and economic sources for that period, identify the key instruments to watch next week, and produce the final report as a PDF in Chinese by default via $pdf."
@@ -0,0 +1,45 @@
1
+ # {{report_title}}
2
+
3
+ - Generated at: {{generated_at}}
4
+ - Research window: {{window_start}} to {{window_end}} ({{timezone}})
5
+ - Output file: {{report_filename}}
6
+ - Verified font: {{font_name_or_path}}
7
+ - Scope: {{scope}}
8
+ - Language: {{language}}
9
+
10
+ ## {{executive_summary_heading}}
11
+
12
+ {{executive_summary}}
13
+
14
+ ## {{macro_recap_heading}}
15
+
16
+ {{macro_recap}}
17
+
18
+ ## {{market_sentiment_heading}}
19
+
20
+ - Overall tone: {{overall_tone}}
21
+ - Main drivers: {{main_drivers}}
22
+ - Cross-asset confirmation: {{cross_asset_confirmation}}
23
+ - What changed versus the prior regime: {{regime_shift}}
24
+
25
+ ## {{key_news_heading}}
26
+
27
+ | Date | Event | Why it mattered this week | Why it matters next week | Main assets |
28
+ | --- | --- | --- | --- | --- |
29
+ | {{date_1}} | {{event_1}} | {{impact_1}} | {{forward_link_1}} | {{assets_1}} |
30
+
31
+ ## {{watchlist_heading}}
32
+
33
+ | Instrument | Asset class | This week's driver | What to watch next week | Base case | Main risk |
34
+ | --- | --- | --- | --- | --- | --- |
35
+ | {{instrument_1}} | {{asset_class_1}} | {{driver_1}} | {{watchpoint_1}} | {{base_case_1}} | {{risk_1}} |
36
+
37
+ ## {{next_week_watchpoints_heading}}
38
+
39
+ | Date | Event | Why it matters | Instruments most exposed |
40
+ | --- | --- | --- | --- |
41
+ | {{next_date_1}} | {{next_event_1}} | {{next_impact_1}} | {{next_assets_1}} |
42
+
43
+ ## {{risks_and_limitations_heading}}
44
+
45
+ {{risks_and_limitations}}