opencodekit 0.20.7 → 0.21.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.js +1 -1
- package/dist/template/.opencode/AGENTS.md +60 -0
- package/dist/template/.opencode/agent/build.md +3 -2
- package/dist/template/.opencode/agent/explore.md +14 -14
- package/dist/template/.opencode/agent/general.md +1 -1
- package/dist/template/.opencode/agent/plan.md +1 -1
- package/dist/template/.opencode/agent/review.md +1 -1
- package/dist/template/.opencode/agent/vision.md +0 -9
- package/dist/template/.opencode/memory.db +0 -0
- package/dist/template/.opencode/memory.db-shm +0 -0
- package/dist/template/.opencode/memory.db-wal +0 -0
- package/dist/template/.opencode/opencode.json +83 -614
- package/dist/template/.opencode/opencodex-fast.jsonc +1 -1
- package/dist/template/.opencode/package.json +1 -1
- package/dist/template/.opencode/plugin/copilot-auth.ts +27 -12
- package/dist/template/.opencode/plugin/prompt-leverage.ts +193 -0
- package/dist/template/.opencode/plugin/prompt-leverage.ts.bak +228 -0
- package/dist/template/.opencode/plugin/sdk/copilot/copilot-provider.ts +14 -2
- package/dist/template/.opencode/plugin/sdk/copilot/index.ts +2 -2
- package/dist/template/.opencode/plugin/sdk/copilot/responses/convert-to-openai-responses-input.ts +335 -0
- package/dist/template/.opencode/plugin/sdk/copilot/responses/map-openai-responses-finish-reason.ts +22 -0
- package/dist/template/.opencode/plugin/sdk/copilot/responses/openai-config.ts +18 -0
- package/dist/template/.opencode/plugin/sdk/copilot/responses/openai-error.ts +22 -0
- package/dist/template/.opencode/plugin/sdk/copilot/responses/openai-responses-api-types.ts +214 -0
- package/dist/template/.opencode/plugin/sdk/copilot/responses/openai-responses-language-model.ts +1770 -0
- package/dist/template/.opencode/plugin/sdk/copilot/responses/openai-responses-prepare-tools.ts +173 -0
- package/dist/template/.opencode/plugin/sdk/copilot/responses/openai-responses-settings.ts +1 -0
- package/dist/template/.opencode/plugin/sdk/copilot/responses/tool/code-interpreter.ts +87 -0
- package/dist/template/.opencode/plugin/sdk/copilot/responses/tool/file-search.ts +127 -0
- package/dist/template/.opencode/plugin/sdk/copilot/responses/tool/image-generation.ts +114 -0
- package/dist/template/.opencode/plugin/sdk/copilot/responses/tool/local-shell.ts +64 -0
- package/dist/template/.opencode/plugin/sdk/copilot/responses/tool/web-search-preview.ts +103 -0
- package/dist/template/.opencode/plugin/sdk/copilot/responses/tool/web-search.ts +102 -0
- package/dist/template/.opencode/pnpm-lock.yaml +791 -9
- package/dist/template/.opencode/skill/api-and-interface-design/SKILL.md +162 -0
- package/dist/template/.opencode/skill/beads/SKILL.md +10 -9
- package/dist/template/.opencode/skill/beads/references/MULTI_AGENT.md +10 -10
- package/dist/template/.opencode/skill/ci-cd-and-automation/SKILL.md +202 -0
- package/dist/template/.opencode/skill/code-search-patterns/SKILL.md +253 -0
- package/dist/template/.opencode/skill/code-simplification/SKILL.md +211 -0
- package/dist/template/.opencode/skill/condition-based-waiting/SKILL.md +12 -0
- package/dist/template/.opencode/skill/defense-in-depth/SKILL.md +16 -6
- package/dist/template/.opencode/skill/deprecation-and-migration/SKILL.md +189 -0
- package/dist/template/.opencode/skill/development-lifecycle/SKILL.md +12 -48
- package/dist/template/.opencode/skill/documentation-and-adrs/SKILL.md +220 -0
- package/dist/template/.opencode/skill/gh-address-comments/SKILL.md +29 -0
- package/dist/template/.opencode/skill/gh-address-comments/scripts/fetch_comments.py +237 -0
- package/dist/template/.opencode/skill/gh-fix-ci/SKILL.md +38 -0
- package/dist/template/.opencode/skill/gh-fix-ci/scripts/inspect_pr_checks.py +509 -0
- package/dist/template/.opencode/skill/incremental-implementation/SKILL.md +191 -0
- package/dist/template/.opencode/skill/performance-optimization/SKILL.md +236 -0
- package/dist/template/.opencode/skill/prompt-leverage/SKILL.md +90 -0
- package/dist/template/.opencode/skill/prompt-leverage/references/framework.md +91 -0
- package/dist/template/.opencode/skill/prompt-leverage/scripts/augment_prompt.py +157 -0
- package/dist/template/.opencode/skill/receiving-code-review/SKILL.md +11 -0
- package/dist/template/.opencode/skill/screenshot/SKILL.md +48 -0
- package/dist/template/.opencode/skill/screenshot/scripts/ensure_macos_permissions.sh +54 -0
- package/dist/template/.opencode/skill/screenshot/scripts/macos_display_info.swift +22 -0
- package/dist/template/.opencode/skill/screenshot/scripts/macos_permissions.swift +40 -0
- package/dist/template/.opencode/skill/screenshot/scripts/macos_window_info.swift +126 -0
- package/dist/template/.opencode/skill/screenshot/scripts/take_screenshot.ps1 +163 -0
- package/dist/template/.opencode/skill/screenshot/scripts/take_screenshot.py +585 -0
- package/dist/template/.opencode/skill/security-and-hardening/SKILL.md +296 -0
- package/dist/template/.opencode/skill/security-threat-model/SKILL.md +36 -0
- package/dist/template/.opencode/skill/security-threat-model/references/prompt-template.md +255 -0
- package/dist/template/.opencode/skill/security-threat-model/references/security-controls-and-assets.md +32 -0
- package/dist/template/.opencode/skill/skill-installer/SKILL.md +58 -0
- package/dist/template/.opencode/skill/skill-installer/scripts/github_utils.py +21 -0
- package/dist/template/.opencode/skill/skill-installer/scripts/install-skill-from-github.py +313 -0
- package/dist/template/.opencode/skill/skill-installer/scripts/list-skills.py +106 -0
- package/dist/template/.opencode/skill/structured-edit/SKILL.md +10 -0
- package/dist/template/.opencode/skill/swarm-coordination/SKILL.md +66 -1
- package/package.json +1 -1
- package/dist/template/.opencode/skill/beads-bridge/SKILL.md +0 -321
- package/dist/template/.opencode/skill/code-navigation/SKILL.md +0 -130
- package/dist/template/.opencode/skill/mqdh/SKILL.md +0 -171
- package/dist/template/.opencode/skill/obsidian/SKILL.md +0 -192
- package/dist/template/.opencode/skill/obsidian/mcp.json +0 -22
- package/dist/template/.opencode/skill/pencil/SKILL.md +0 -72
- package/dist/template/.opencode/skill/ralph/SKILL.md +0 -296
- package/dist/template/.opencode/skill/tilth-cli/SKILL.md +0 -207
- package/dist/template/.opencode/skill/tool-priority/SKILL.md +0 -299
|
@@ -0,0 +1,236 @@
|
|
|
1
|
+
---
|
|
2
|
+
name: performance-optimization
|
|
3
|
+
description: Use when profiling, optimizing, or adding performance budgets to applications — covers measure-first workflow, Core Web Vitals, common anti-patterns, and performance regression prevention
|
|
4
|
+
version: 1.0.0
|
|
5
|
+
tags: [performance, code-quality]
|
|
6
|
+
dependencies: []
|
|
7
|
+
---
|
|
8
|
+
|
|
9
|
+
# Performance Optimization
|
|
10
|
+
|
|
11
|
+
> **Replaces** premature optimization and gut-feeling tuning with measurement-driven improvements that target actual bottlenecks
|
|
12
|
+
|
|
13
|
+
## When to Use
|
|
14
|
+
|
|
15
|
+
- Application is measurably slow (user reports, metrics, profiler data)
|
|
16
|
+
- Setting up performance budgets for a new project
|
|
17
|
+
- Reviewing code for common performance anti-patterns
|
|
18
|
+
- Performance regression detected in CI or monitoring
|
|
19
|
+
|
|
20
|
+
## When NOT to Use
|
|
21
|
+
|
|
22
|
+
- No evidence of a performance problem — premature optimization wastes time
|
|
23
|
+
- Micro-optimizations that save nanoseconds in non-hot paths
|
|
24
|
+
- Choosing "fast" over "correct" — correctness first, always
|
|
25
|
+
|
|
26
|
+
## Overview
|
|
27
|
+
|
|
28
|
+
Performance optimization is an empirical process. Measure, identify, fix, verify. Never optimize without profiling first.
|
|
29
|
+
|
|
30
|
+
**Core principle:** Measure before optimizing. Optimize the bottleneck, not the code you happen to be reading. Verify the improvement with numbers.
|
|
31
|
+
|
|
32
|
+
## Measure-First Workflow
|
|
33
|
+
|
|
34
|
+
```
|
|
35
|
+
1. MEASURE — Profile to identify actual bottlenecks (not guessed ones)
|
|
36
|
+
2. IDENTIFY — Find the specific hot path or resource constraint
|
|
37
|
+
3. FIX — Apply targeted optimization to the bottleneck
|
|
38
|
+
4. VERIFY — Measure again to confirm improvement
|
|
39
|
+
5. GUARD — Add budget/benchmark to prevent regression
|
|
40
|
+
```
|
|
41
|
+
|
|
42
|
+
**Rule:** Skip to step 3 only if you have measurement data that justifies the optimization.
|
|
43
|
+
|
|
44
|
+
## Performance Targets
|
|
45
|
+
|
|
46
|
+
### Core Web Vitals (Web)
|
|
47
|
+
|
|
48
|
+
| Metric | Good | Needs Improvement | Poor |
|
|
49
|
+
| ------------------------------- | ------- | ----------------- | ------- |
|
|
50
|
+
| LCP (Largest Contentful Paint) | ≤ 2.5s | ≤ 4.0s | > 4.0s |
|
|
51
|
+
| INP (Interaction to Next Paint) | ≤ 200ms | ≤ 500ms | > 500ms |
|
|
52
|
+
| CLS (Cumulative Layout Shift) | ≤ 0.1 | ≤ 0.25 | > 0.25 |
|
|
53
|
+
|
|
54
|
+
### General Targets
|
|
55
|
+
|
|
56
|
+
| Context | Target | Measure |
|
|
57
|
+
| ------------------- | ------------ | ---------------------- |
|
|
58
|
+
| API response (p95) | < 200ms | Server-side timing |
|
|
59
|
+
| CLI command startup | < 500ms | `time` or `perf_hooks` |
|
|
60
|
+
| Build time | < 60s | CI pipeline metrics |
|
|
61
|
+
| Bundle size (JS) | < 200KB gzip | Bundler output |
|
|
62
|
+
| Database query | < 50ms | Query EXPLAIN + timing |
|
|
63
|
+
|
|
64
|
+
## Common Anti-Patterns & Fixes
|
|
65
|
+
|
|
66
|
+
### N+1 Queries
|
|
67
|
+
|
|
68
|
+
```typescript
|
|
69
|
+
// ❌ N+1: One query per user
|
|
70
|
+
const users = await db.query("SELECT * FROM users");
|
|
71
|
+
for (const user of users) {
|
|
72
|
+
user.posts = await db.query("SELECT * FROM posts WHERE user_id = ?", [user.id]);
|
|
73
|
+
}
|
|
74
|
+
|
|
75
|
+
// ✅ Batch: Two queries total
|
|
76
|
+
const users = await db.query("SELECT * FROM users");
|
|
77
|
+
const userIds = users.map((u) => u.id);
|
|
78
|
+
const posts = await db.query("SELECT * FROM posts WHERE user_id IN (?)", [userIds]);
|
|
79
|
+
// Group posts by user_id in application code
|
|
80
|
+
```
|
|
81
|
+
|
|
82
|
+
### Unbounded Fetching
|
|
83
|
+
|
|
84
|
+
```typescript
|
|
85
|
+
// ❌ Fetch everything
|
|
86
|
+
const allItems = await db.query("SELECT * FROM items");
|
|
87
|
+
|
|
88
|
+
// ✅ Paginate
|
|
89
|
+
const items = await db.query("SELECT * FROM items LIMIT ? OFFSET ?", [pageSize, offset]);
|
|
90
|
+
```
|
|
91
|
+
|
|
92
|
+
### Missing Memoization
|
|
93
|
+
|
|
94
|
+
```typescript
|
|
95
|
+
// ❌ Recompute on every render
|
|
96
|
+
function ExpensiveList({ items }) {
|
|
97
|
+
const sorted = items.sort((a, b) => complexSort(a, b)); // sorts on every render
|
|
98
|
+
return <List items={sorted} />;
|
|
99
|
+
}
|
|
100
|
+
|
|
101
|
+
// ✅ Memoize expensive computation
|
|
102
|
+
function ExpensiveList({ items }) {
|
|
103
|
+
const sorted = useMemo(
|
|
104
|
+
() => [...items].sort((a, b) => complexSort(a, b)),
|
|
105
|
+
[items]
|
|
106
|
+
);
|
|
107
|
+
return <List items={sorted} />;
|
|
108
|
+
}
|
|
109
|
+
```
|
|
110
|
+
|
|
111
|
+
### Large Bundle Size
|
|
112
|
+
|
|
113
|
+
```typescript
|
|
114
|
+
// ❌ Import entire library
|
|
115
|
+
import _ from "lodash";
|
|
116
|
+
const result = _.debounce(fn, 300);
|
|
117
|
+
|
|
118
|
+
// ✅ Import only what you need
|
|
119
|
+
import debounce from "lodash/debounce";
|
|
120
|
+
const result = debounce(fn, 300);
|
|
121
|
+
|
|
122
|
+
// ✅✅ Use native (no dependency)
|
|
123
|
+
function debounce(fn, ms) {
|
|
124
|
+
/* ... */
|
|
125
|
+
}
|
|
126
|
+
```
|
|
127
|
+
|
|
128
|
+
### Missing Image Optimization
|
|
129
|
+
|
|
130
|
+
```html
|
|
131
|
+
<!-- ❌ Unoptimized -->
|
|
132
|
+
<img src="hero.png" />
|
|
133
|
+
|
|
134
|
+
<!-- ✅ Optimized -->
|
|
135
|
+
<img
|
|
136
|
+
src="hero.webp"
|
|
137
|
+
srcset="hero-400.webp 400w, hero-800.webp 800w, hero-1200.webp 1200w"
|
|
138
|
+
sizes="(max-width: 600px) 400px, (max-width: 1024px) 800px, 1200px"
|
|
139
|
+
loading="lazy"
|
|
140
|
+
decoding="async"
|
|
141
|
+
width="1200"
|
|
142
|
+
height="630"
|
|
143
|
+
alt="Hero image"
|
|
144
|
+
/>
|
|
145
|
+
```
|
|
146
|
+
|
|
147
|
+
### Unnecessary Re-renders (React)
|
|
148
|
+
|
|
149
|
+
```typescript
|
|
150
|
+
// ❌ New object every render causes child re-render
|
|
151
|
+
function Parent() {
|
|
152
|
+
return <Child style={{ color: 'red' }} onClick={() => doThing()} />;
|
|
153
|
+
}
|
|
154
|
+
|
|
155
|
+
// ✅ Stable references
|
|
156
|
+
const style = { color: 'red' };
|
|
157
|
+
function Parent() {
|
|
158
|
+
const handleClick = useCallback(() => doThing(), []);
|
|
159
|
+
return <Child style={style} onClick={handleClick} />;
|
|
160
|
+
}
|
|
161
|
+
```
|
|
162
|
+
|
|
163
|
+
## Profiling Tools
|
|
164
|
+
|
|
165
|
+
| Context | Tool | What It Shows |
|
|
166
|
+
| ---------------- | -------------------------------- | --------------------------------- |
|
|
167
|
+
| Web (browser) | Chrome DevTools Performance | Paint, scripting, layout, network |
|
|
168
|
+
| Web (field data) | CrUX, PageSpeed Insights | Real-user Core Web Vitals |
|
|
169
|
+
| Node.js | `node --prof` + `--prof-process` | V8 profiling ticks per function |
|
|
170
|
+
| Node.js | `clinic.js` | Flamegraphs, event loop delays |
|
|
171
|
+
| React | React DevTools Profiler | Component render times |
|
|
172
|
+
| SQL | `EXPLAIN ANALYZE` | Query execution plan |
|
|
173
|
+
| Bundle | `source-map-explorer` | Module size breakdown |
|
|
174
|
+
| Network | `lighthouse` | Loading performance audit |
|
|
175
|
+
|
|
176
|
+
## Performance Budget
|
|
177
|
+
|
|
178
|
+
### Setting Budgets
|
|
179
|
+
|
|
180
|
+
```json
|
|
181
|
+
{
|
|
182
|
+
"budgets": [
|
|
183
|
+
{ "metric": "js-bundle", "max": "200KB", "warn": "150KB" },
|
|
184
|
+
{ "metric": "css-bundle", "max": "50KB", "warn": "40KB" },
|
|
185
|
+
{ "metric": "lcp", "max": "2500ms", "warn": "2000ms" },
|
|
186
|
+
{ "metric": "api-p95", "max": "200ms", "warn": "150ms" }
|
|
187
|
+
]
|
|
188
|
+
}
|
|
189
|
+
```
|
|
190
|
+
|
|
191
|
+
### Enforcing Budgets in CI
|
|
192
|
+
|
|
193
|
+
```yaml
|
|
194
|
+
- name: Check bundle size
|
|
195
|
+
run: |
|
|
196
|
+
npx bundlesize --config .bundlesizerc.json
|
|
197
|
+
|
|
198
|
+
- name: Lighthouse audit
|
|
199
|
+
run: |
|
|
200
|
+
npx lighthouse $URL --output json --chrome-flags="--headless"
|
|
201
|
+
# Parse and assert against budgets
|
|
202
|
+
```
|
|
203
|
+
|
|
204
|
+
## Common Rationalizations
|
|
205
|
+
|
|
206
|
+
| Excuse | Rebuttal |
|
|
207
|
+
| --------------------------------- | ---------------------------------------------------------------------------------- |
|
|
208
|
+
| "It's fast enough on my machine" | Test on low-end devices and slow networks. Your machine isn't representative. |
|
|
209
|
+
| "We'll optimize later" | Performance debt compounds. Set budgets now, optimize when they're breached. |
|
|
210
|
+
| "This micro-optimization matters" | Profile first. If it's not in the hot path, it doesn't matter. |
|
|
211
|
+
| "Users won't notice 200ms" | Studies show 100ms delays reduce conversions. Users notice more than you think. |
|
|
212
|
+
| "Adding metrics is overhead" | The overhead of measurement is trivial compared to the cost of blind optimization. |
|
|
213
|
+
| "Caching will fix it" | Caching masks problems. Fix the root cause, then add caching as defense. |
|
|
214
|
+
|
|
215
|
+
## Red Flags — STOP
|
|
216
|
+
|
|
217
|
+
- Optimizing without profiling data
|
|
218
|
+
- Adding caching to mask a fundamentally slow operation
|
|
219
|
+
- Micro-optimizing code that runs once per request
|
|
220
|
+
- Bundle size growing without review
|
|
221
|
+
- No performance budget or monitoring in place
|
|
222
|
+
- Using `SELECT *` in production queries
|
|
223
|
+
|
|
224
|
+
## Verification
|
|
225
|
+
|
|
226
|
+
- [ ] Bottleneck identified with profiling data (not intuition)
|
|
227
|
+
- [ ] Optimization shows measurable improvement in profiler
|
|
228
|
+
- [ ] Performance budget is set and enforced in CI
|
|
229
|
+
- [ ] No regressions in existing benchmarks
|
|
230
|
+
- [ ] Optimization doesn't sacrifice correctness or readability
|
|
231
|
+
|
|
232
|
+
## See Also
|
|
233
|
+
|
|
234
|
+
- **react-best-practices** — React-specific performance patterns (server components, bundle optimization)
|
|
235
|
+
- **ci-cd-and-automation** — Enforcing performance budgets in CI
|
|
236
|
+
- **code-simplification** — Simplifying code often improves performance as a side effect
|
|
@@ -0,0 +1,90 @@
|
|
|
1
|
+
---
|
|
2
|
+
name: prompt-leverage
|
|
3
|
+
description: >-
|
|
4
|
+
Strengthen raw user prompts into execution-ready instruction sets. Use when processing
|
|
5
|
+
user input to upgrade it with clear objective, context, work style, tool rules, output
|
|
6
|
+
contract, verification, and done criteria before planning or execution.
|
|
7
|
+
metadata:
|
|
8
|
+
dependencies: []
|
|
9
|
+
---
|
|
10
|
+
|
|
11
|
+
# Prompt Leverage
|
|
12
|
+
|
|
13
|
+
Strengthen the user's current prompt into a stronger working instruction set without changing the underlying intent. Preserve the task, fill in missing execution structure, and add only enough scaffolding to improve reliability.
|
|
14
|
+
|
|
15
|
+
This skill acts as a **pre-processing layer** — it runs on user input BEFORE planning/execution to ensure every prompt is execution-ready.
|
|
16
|
+
|
|
17
|
+
## Workflow
|
|
18
|
+
|
|
19
|
+
1. Read the raw prompt and identify the real job to be done.
|
|
20
|
+
2. Infer the task type: coding, research, writing, analysis, planning, or review.
|
|
21
|
+
3. Rebuild the prompt with the framework blocks in `references/framework.md`.
|
|
22
|
+
4. Keep the result proportional: do not over-specify a simple task.
|
|
23
|
+
5. Return both the improved prompt and a short explanation of what changed when useful.
|
|
24
|
+
|
|
25
|
+
## Transformation Rules
|
|
26
|
+
|
|
27
|
+
- Preserve the user's objective, constraints, and tone unless they conflict.
|
|
28
|
+
- Prefer adding missing structure over rewriting everything stylistically.
|
|
29
|
+
- Add context requirements only when they improve correctness.
|
|
30
|
+
- Add tool rules only when tool use materially affects correctness.
|
|
31
|
+
- Add verification and completion criteria for non-trivial tasks.
|
|
32
|
+
- Keep prompts compact enough to be practical in repeated use.
|
|
33
|
+
|
|
34
|
+
## Framework Blocks
|
|
35
|
+
|
|
36
|
+
Use these blocks selectively.
|
|
37
|
+
|
|
38
|
+
- `Objective`: state the task and what success looks like.
|
|
39
|
+
- `Context`: list sources, files, constraints, and unknowns.
|
|
40
|
+
- `Work Style`: set depth, breadth, care, and first-principles expectations.
|
|
41
|
+
- `Tool Rules`: state when tools, browsing, or file inspection are required.
|
|
42
|
+
- `Output Contract`: define structure, formatting, and level of detail.
|
|
43
|
+
- `Verification`: require checks for correctness, edge cases, and better alternatives.
|
|
44
|
+
- `Done Criteria`: define when the agent should stop.
|
|
45
|
+
|
|
46
|
+
## Output Modes
|
|
47
|
+
|
|
48
|
+
Choose one mode based on the user request.
|
|
49
|
+
|
|
50
|
+
- `Inline upgrade`: provide the upgraded prompt only.
|
|
51
|
+
- `Upgrade + rationale`: provide the prompt plus a brief list of improvements.
|
|
52
|
+
- `Template extraction`: convert the prompt into a reusable fill-in-the-blank template.
|
|
53
|
+
- `Hook spec`: explain how to apply the framework automatically before execution.
|
|
54
|
+
|
|
55
|
+
## Quality Bar
|
|
56
|
+
|
|
57
|
+
Before finalizing, check the upgraded prompt:
|
|
58
|
+
|
|
59
|
+
- still matches the original intent
|
|
60
|
+
- does not add unnecessary ceremony
|
|
61
|
+
- includes the right verification level for the task
|
|
62
|
+
- gives the agent a clear definition of done
|
|
63
|
+
|
|
64
|
+
If the prompt is already strong, say so and make only minimal edits.
|
|
65
|
+
|
|
66
|
+
## Intensity Levels
|
|
67
|
+
|
|
68
|
+
Use the minimum level that matches the task.
|
|
69
|
+
|
|
70
|
+
- `Light`: simple edits, formatting, quick rewrites.
|
|
71
|
+
- `Standard`: typical coding, research, and drafting tasks.
|
|
72
|
+
- `Deep`: debugging, architecture, complex research, or high-stakes outputs.
|
|
73
|
+
|
|
74
|
+
## Task-Type Adjustments
|
|
75
|
+
|
|
76
|
+
### Coding
|
|
77
|
+
|
|
78
|
+
- Emphasize repo context, file inspection, smallest correct change, validation, and edge cases.
|
|
79
|
+
|
|
80
|
+
### Research
|
|
81
|
+
|
|
82
|
+
- Emphasize source quality, evidence gathering, synthesis, uncertainty, and citations.
|
|
83
|
+
|
|
84
|
+
### Writing
|
|
85
|
+
|
|
86
|
+
- Emphasize audience, tone, structure, constraints, and revision criteria.
|
|
87
|
+
|
|
88
|
+
### Review
|
|
89
|
+
|
|
90
|
+
- Emphasize fresh-eyes critique, failure modes, alternatives, and explicit severity.
|
|
@@ -0,0 +1,91 @@
|
|
|
1
|
+
# Prompt Leverage Framework
|
|
2
|
+
|
|
3
|
+
Reference for combining source ideas into a practical execution framework.
|
|
4
|
+
|
|
5
|
+
## Source Synthesis
|
|
6
|
+
|
|
7
|
+
- **Agent Flywheel** contributes behavior controls: intensity, wider search, deeper analysis, fresh eyes, first-principles thinking, and future-self clarity.
|
|
8
|
+
- **OpenAI prompt guidance** contributes execution controls: clear objectives, explicit output contracts, tool persistence, dependency checks, verification loops, and completion criteria.
|
|
9
|
+
|
|
10
|
+
Treat the final framework as:
|
|
11
|
+
|
|
12
|
+
`Objective -> Context -> Work Style -> Tool Rules -> Output Contract -> Verification -> Done`
|
|
13
|
+
|
|
14
|
+
## Block Definitions
|
|
15
|
+
|
|
16
|
+
### Objective
|
|
17
|
+
|
|
18
|
+
State the task in one or two lines. Define success in observable terms.
|
|
19
|
+
|
|
20
|
+
### Context
|
|
21
|
+
|
|
22
|
+
Specify relevant files, URLs, constraints, assumptions, and information boundaries. Say when the agent must retrieve facts instead of guessing.
|
|
23
|
+
|
|
24
|
+
### Work Style
|
|
25
|
+
|
|
26
|
+
Control how the agent approaches the task.
|
|
27
|
+
|
|
28
|
+
- Go broad first when system understanding matters.
|
|
29
|
+
- Go deep where risk or complexity is highest.
|
|
30
|
+
- Use first-principles reasoning before changing things.
|
|
31
|
+
- Re-check with fresh eyes for non-trivial tasks.
|
|
32
|
+
|
|
33
|
+
### Tool Rules
|
|
34
|
+
|
|
35
|
+
Define when browsing, file inspection, tests, or external tools are required. Prevent skipping prerequisite checks.
|
|
36
|
+
|
|
37
|
+
### Output Contract
|
|
38
|
+
|
|
39
|
+
Define exact structure, tone, formatting, depth, and any required sections or schemas.
|
|
40
|
+
|
|
41
|
+
### Verification
|
|
42
|
+
|
|
43
|
+
Require checks for correctness, grounding, completeness, side effects, and better alternatives.
|
|
44
|
+
|
|
45
|
+
### Done Criteria
|
|
46
|
+
|
|
47
|
+
Define what must be true before the agent stops.
|
|
48
|
+
|
|
49
|
+
## Intensity Levels
|
|
50
|
+
|
|
51
|
+
Use the minimum level that matches the task.
|
|
52
|
+
|
|
53
|
+
- `Light`: simple edits, formatting, quick rewrites.
|
|
54
|
+
- `Standard`: typical coding, research, and drafting tasks.
|
|
55
|
+
- `Deep`: debugging, architecture, complex research, or high-stakes outputs.
|
|
56
|
+
|
|
57
|
+
## Task-Type Adjustments
|
|
58
|
+
|
|
59
|
+
### Coding
|
|
60
|
+
|
|
61
|
+
- Emphasize repo context, file inspection, smallest correct change, validation, and edge cases.
|
|
62
|
+
|
|
63
|
+
### Research
|
|
64
|
+
|
|
65
|
+
- Emphasize source quality, evidence gathering, synthesis, uncertainty, and citations.
|
|
66
|
+
|
|
67
|
+
### Writing
|
|
68
|
+
|
|
69
|
+
- Emphasize audience, tone, structure, constraints, and revision criteria.
|
|
70
|
+
|
|
71
|
+
### Review
|
|
72
|
+
|
|
73
|
+
- Emphasize fresh-eyes critique, failure modes, alternatives, and explicit severity.
|
|
74
|
+
|
|
75
|
+
## Prompt Upgrade Heuristics
|
|
76
|
+
|
|
77
|
+
- Add missing blocks only when they materially improve execution.
|
|
78
|
+
- Do not turn a one-line request into a giant spec unless the task is genuinely complex.
|
|
79
|
+
- Preserve user language where possible so the upgraded prompt still feels native.
|
|
80
|
+
- Prefer concrete completion criteria over vague quality adjectives.
|
|
81
|
+
|
|
82
|
+
## Upgrade Rubric
|
|
83
|
+
|
|
84
|
+
An upgraded prompt is good when it:
|
|
85
|
+
|
|
86
|
+
1. preserves original intent
|
|
87
|
+
2. reduces ambiguity
|
|
88
|
+
3. sets the right depth and care level
|
|
89
|
+
4. defines the expected output clearly
|
|
90
|
+
5. includes an appropriate verification step
|
|
91
|
+
6. tells the agent when to stop
|
|
@@ -0,0 +1,157 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""Automated prompt upgrader using the Prompt Leverage Framework."""
|
|
3
|
+
|
|
4
|
+
from __future__ import annotations
|
|
5
|
+
|
|
6
|
+
import argparse
|
|
7
|
+
import re
|
|
8
|
+
from textwrap import dedent
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
TASK_KEYWORDS = {
|
|
12
|
+
"coding": [
|
|
13
|
+
"code",
|
|
14
|
+
"bug",
|
|
15
|
+
"repo",
|
|
16
|
+
"refactor",
|
|
17
|
+
"test",
|
|
18
|
+
"implement",
|
|
19
|
+
"fix",
|
|
20
|
+
"function",
|
|
21
|
+
"api",
|
|
22
|
+
],
|
|
23
|
+
"research": [
|
|
24
|
+
"research",
|
|
25
|
+
"compare",
|
|
26
|
+
"find",
|
|
27
|
+
"latest",
|
|
28
|
+
"sources",
|
|
29
|
+
"analyze market",
|
|
30
|
+
"look up",
|
|
31
|
+
],
|
|
32
|
+
"writing": ["write", "rewrite", "draft", "email", "memo", "blog", "copy", "tone"],
|
|
33
|
+
"review": ["review", "audit", "critique", "inspect", "evaluate", "assess"],
|
|
34
|
+
"planning": ["plan", "roadmap", "strategy", "framework", "outline"],
|
|
35
|
+
"analysis": ["analyze", "explain", "break down", "diagnose", "root cause"],
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
def detect_task(prompt: str) -> str:
|
|
40
|
+
"""Detect task type by keyword matching."""
|
|
41
|
+
lowered = prompt.lower()
|
|
42
|
+
scores = {
|
|
43
|
+
task: sum(1 for keyword in keywords if keyword in lowered)
|
|
44
|
+
for task, keywords in TASK_KEYWORDS.items()
|
|
45
|
+
}
|
|
46
|
+
best_task, best_score = max(scores.items(), key=lambda item: item[1])
|
|
47
|
+
return best_task if best_score > 0 else "analysis"
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
def infer_intensity(prompt: str, task: str) -> str:
|
|
51
|
+
"""Infer intensity level from prompt content."""
|
|
52
|
+
lowered = prompt.lower()
|
|
53
|
+
if any(
|
|
54
|
+
token in lowered
|
|
55
|
+
for token in [
|
|
56
|
+
"careful",
|
|
57
|
+
"deep",
|
|
58
|
+
"thorough",
|
|
59
|
+
"high stakes",
|
|
60
|
+
"production",
|
|
61
|
+
"critical",
|
|
62
|
+
]
|
|
63
|
+
):
|
|
64
|
+
return "Deep"
|
|
65
|
+
if task in {"coding", "research", "review"}:
|
|
66
|
+
return "Standard"
|
|
67
|
+
return "Light"
|
|
68
|
+
|
|
69
|
+
|
|
70
|
+
def build_tool_rules(task: str) -> str:
|
|
71
|
+
"""Build task-specific tool rules."""
|
|
72
|
+
rules = {
|
|
73
|
+
"coding": "Inspect the relevant files and dependencies first. Validate the final change with the narrowest useful checks before broadening scope.",
|
|
74
|
+
"research": "Retrieve evidence from reliable sources before concluding. Do not guess facts that can be checked.",
|
|
75
|
+
"review": "Read enough surrounding context to understand intent before critiquing. Distinguish confirmed issues from plausible risks.",
|
|
76
|
+
}
|
|
77
|
+
return rules.get(
|
|
78
|
+
task,
|
|
79
|
+
"Use tools or extra context only when they materially improve correctness or completeness.",
|
|
80
|
+
)
|
|
81
|
+
|
|
82
|
+
|
|
83
|
+
def build_output_contract(task: str) -> str:
|
|
84
|
+
"""Build task-specific output contract."""
|
|
85
|
+
contracts = {
|
|
86
|
+
"coding": "Return the result in a practical execution format: concise summary, concrete changes or code, validation notes, and any remaining risks.",
|
|
87
|
+
"research": "Return a structured synthesis with key findings, supporting evidence, uncertainty where relevant, and a concise bottom line.",
|
|
88
|
+
"writing": "Return polished final copy in the requested tone and format. If useful, include a short rationale for major editorial choices.",
|
|
89
|
+
"review": "Return findings grouped by severity or importance, explain why each matters, and suggest the smallest credible next step.",
|
|
90
|
+
}
|
|
91
|
+
return contracts.get(
|
|
92
|
+
task,
|
|
93
|
+
"Return a clear, well-structured response matched to the task, with no unnecessary verbosity.",
|
|
94
|
+
)
|
|
95
|
+
|
|
96
|
+
|
|
97
|
+
def upgrade_prompt(raw_prompt: str, task: str | None) -> str:
|
|
98
|
+
"""Upgrade a raw prompt using the framework."""
|
|
99
|
+
normalized = re.sub(r"\s+", " ", raw_prompt).strip()
|
|
100
|
+
detected_task = task or detect_task(normalized)
|
|
101
|
+
intensity = infer_intensity(normalized, detected_task)
|
|
102
|
+
tool_rules = build_tool_rules(detected_task)
|
|
103
|
+
output_contract = build_output_contract(detected_task)
|
|
104
|
+
|
|
105
|
+
return dedent(
|
|
106
|
+
f"""
|
|
107
|
+
Objective:
|
|
108
|
+
- Complete this task: {normalized}
|
|
109
|
+
- Optimize for a correct, useful result rather than a merely plausible one.
|
|
110
|
+
|
|
111
|
+
Context:
|
|
112
|
+
- Preserve the user's original intent and constraints.
|
|
113
|
+
- Surface any key assumptions if required information is missing.
|
|
114
|
+
|
|
115
|
+
Work Style:
|
|
116
|
+
- Task type: {detected_task}
|
|
117
|
+
- Effort level: {intensity}
|
|
118
|
+
- Understand the problem broadly enough to avoid narrow mistakes, then go deep where the risk or complexity is highest.
|
|
119
|
+
- Use first-principles reasoning before proposing changes.
|
|
120
|
+
- For non-trivial work, review the result once with fresh eyes before finalizing.
|
|
121
|
+
|
|
122
|
+
Tool Rules:
|
|
123
|
+
- {tool_rules}
|
|
124
|
+
|
|
125
|
+
Output Contract:
|
|
126
|
+
- {output_contract}
|
|
127
|
+
|
|
128
|
+
Verification:
|
|
129
|
+
- Check correctness, completeness, and edge cases.
|
|
130
|
+
- Improve obvious weaknesses if a better approach is available within scope.
|
|
131
|
+
|
|
132
|
+
Done Criteria:
|
|
133
|
+
- Stop only when the response satisfies the task, matches the requested format, and passes the verification step.
|
|
134
|
+
"""
|
|
135
|
+
).strip()
|
|
136
|
+
|
|
137
|
+
|
|
138
|
+
def parse_args() -> argparse.Namespace:
|
|
139
|
+
parser = argparse.ArgumentParser(
|
|
140
|
+
description="Upgrade a raw prompt into a framework-backed execution prompt."
|
|
141
|
+
)
|
|
142
|
+
parser.add_argument("prompt", help="Raw prompt text to upgrade.")
|
|
143
|
+
parser.add_argument(
|
|
144
|
+
"--task",
|
|
145
|
+
choices=sorted(TASK_KEYWORDS.keys()),
|
|
146
|
+
help="Optional explicit task type (auto-detected if not provided).",
|
|
147
|
+
)
|
|
148
|
+
return parser.parse_args()
|
|
149
|
+
|
|
150
|
+
|
|
151
|
+
def main() -> None:
|
|
152
|
+
args = parse_args()
|
|
153
|
+
print(upgrade_prompt(args.prompt, args.task))
|
|
154
|
+
|
|
155
|
+
|
|
156
|
+
if __name__ == "__main__":
|
|
157
|
+
main()
|
|
@@ -20,6 +20,17 @@ dependencies: []
|
|
|
20
20
|
- You already verified and accepted the feedback and are ready to implement
|
|
21
21
|
- You need to request a review (use requesting-code-review)
|
|
22
22
|
|
|
23
|
+
## Common Rationalizations
|
|
24
|
+
|
|
25
|
+
| Rationalization | Rebuttal |
|
|
26
|
+
| ------------------------------------------------- | -------------------------------------------------------------------------------------- |
|
|
27
|
+
| "The reviewer is experienced, they must be right" | Experience doesn't mean they have YOUR codebase context. Verify against reality |
|
|
28
|
+
| "It's faster to just implement it than to verify" | A wrong implementation costs more than the 2 minutes to check |
|
|
29
|
+
| "Pushing back will create conflict" | Technical correctness > social comfort. Shipping wrong code creates bigger conflict |
|
|
30
|
+
| "I'll fix it and verify later" | "Later" means after the wrong change is merged and depended upon |
|
|
31
|
+
| "The suggestion is small, no need to verify" | Small changes break things too. One wrong import can crash a module |
|
|
32
|
+
| "I understood the feedback, no need to restate" | Restating catches misunderstandings BEFORE you waste time implementing the wrong thing |
|
|
33
|
+
|
|
23
34
|
## Overview
|
|
24
35
|
|
|
25
36
|
Code review requires technical evaluation, not emotional performance.
|
|
@@ -0,0 +1,48 @@
|
|
|
1
|
+
---
|
|
2
|
+
name: screenshot
|
|
3
|
+
description: Use when the user explicitly asks for desktop/system screenshots or when browser/tool-specific capture is unavailable.
|
|
4
|
+
version: 1.0.0
|
|
5
|
+
tags: [debugging, ui, automation]
|
|
6
|
+
dependencies: []
|
|
7
|
+
---
|
|
8
|
+
|
|
9
|
+
# screenshot
|
|
10
|
+
|
|
11
|
+
Capture screenshots at OS level for desktop apps, windows, regions, or full screen.
|
|
12
|
+
|
|
13
|
+
## When to Use
|
|
14
|
+
|
|
15
|
+
- User asks for screenshot of desktop/app/window/region
|
|
16
|
+
- You need non-browser captures (native app, OS UI, Electron shell)
|
|
17
|
+
- Browser capture tools are unavailable or insufficient
|
|
18
|
+
|
|
19
|
+
## When NOT to Use
|
|
20
|
+
|
|
21
|
+
- Browser-only capture where Playwright/DevTools is enough
|
|
22
|
+
- Design-file capture where Figma skills are available
|
|
23
|
+
|
|
24
|
+
## Save Location Rules
|
|
25
|
+
|
|
26
|
+
1. If user gives a path, save there.
|
|
27
|
+
2. If user asks generally for a screenshot, use OS default screenshot location.
|
|
28
|
+
3. If screenshot is for agent inspection, save to temp location.
|
|
29
|
+
|
|
30
|
+
## Scripts
|
|
31
|
+
|
|
32
|
+
- `scripts/take_screenshot.py` (macOS/Linux)
|
|
33
|
+
- `scripts/take_screenshot.ps1` (Windows)
|
|
34
|
+
- `scripts/ensure_macos_permissions.sh` (macOS preflight)
|
|
35
|
+
|
|
36
|
+
## Quick Start
|
|
37
|
+
|
|
38
|
+
```bash
|
|
39
|
+
# macOS/Linux default capture
|
|
40
|
+
python3 .opencode/skill/screenshot/scripts/take_screenshot.py
|
|
41
|
+
|
|
42
|
+
# capture app window(s) on macOS to temp
|
|
43
|
+
bash .opencode/skill/screenshot/scripts/ensure_macos_permissions.sh && \
|
|
44
|
+
python3 .opencode/skill/screenshot/scripts/take_screenshot.py --app "Codex" --mode temp
|
|
45
|
+
|
|
46
|
+
# region capture
|
|
47
|
+
python3 .opencode/skill/screenshot/scripts/take_screenshot.py --mode temp --region 100,200,800,600
|
|
48
|
+
```
|