opencodekit 0.20.8 → 0.21.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (49) hide show
  1. package/dist/index.js +1 -1
  2. package/dist/template/.opencode/AGENTS.md +12 -0
  3. package/dist/template/.opencode/memory.db +0 -0
  4. package/dist/template/.opencode/memory.db-shm +0 -0
  5. package/dist/template/.opencode/memory.db-wal +0 -0
  6. package/dist/template/.opencode/opencode.json +83 -609
  7. package/dist/template/.opencode/opencodex-fast.jsonc +1 -1
  8. package/dist/template/.opencode/package.json +1 -1
  9. package/dist/template/.opencode/plugin/copilot-auth.ts +27 -12
  10. package/dist/template/.opencode/plugin/prompt-leverage.ts +193 -0
  11. package/dist/template/.opencode/plugin/prompt-leverage.ts.bak +228 -0
  12. package/dist/template/.opencode/plugin/sdk/copilot/copilot-provider.ts +14 -2
  13. package/dist/template/.opencode/plugin/sdk/copilot/index.ts +2 -2
  14. package/dist/template/.opencode/plugin/sdk/copilot/responses/convert-to-openai-responses-input.ts +335 -0
  15. package/dist/template/.opencode/plugin/sdk/copilot/responses/map-openai-responses-finish-reason.ts +22 -0
  16. package/dist/template/.opencode/plugin/sdk/copilot/responses/openai-config.ts +18 -0
  17. package/dist/template/.opencode/plugin/sdk/copilot/responses/openai-error.ts +22 -0
  18. package/dist/template/.opencode/plugin/sdk/copilot/responses/openai-responses-api-types.ts +214 -0
  19. package/dist/template/.opencode/plugin/sdk/copilot/responses/openai-responses-language-model.ts +1770 -0
  20. package/dist/template/.opencode/plugin/sdk/copilot/responses/openai-responses-prepare-tools.ts +173 -0
  21. package/dist/template/.opencode/plugin/sdk/copilot/responses/openai-responses-settings.ts +1 -0
  22. package/dist/template/.opencode/plugin/sdk/copilot/responses/tool/code-interpreter.ts +87 -0
  23. package/dist/template/.opencode/plugin/sdk/copilot/responses/tool/file-search.ts +127 -0
  24. package/dist/template/.opencode/plugin/sdk/copilot/responses/tool/image-generation.ts +114 -0
  25. package/dist/template/.opencode/plugin/sdk/copilot/responses/tool/local-shell.ts +64 -0
  26. package/dist/template/.opencode/plugin/sdk/copilot/responses/tool/web-search-preview.ts +103 -0
  27. package/dist/template/.opencode/plugin/sdk/copilot/responses/tool/web-search.ts +102 -0
  28. package/dist/template/.opencode/skill/gh-address-comments/SKILL.md +29 -0
  29. package/dist/template/.opencode/skill/gh-address-comments/scripts/fetch_comments.py +237 -0
  30. package/dist/template/.opencode/skill/gh-fix-ci/SKILL.md +38 -0
  31. package/dist/template/.opencode/skill/gh-fix-ci/scripts/inspect_pr_checks.py +509 -0
  32. package/dist/template/.opencode/skill/prompt-leverage/SKILL.md +90 -0
  33. package/dist/template/.opencode/skill/prompt-leverage/references/framework.md +91 -0
  34. package/dist/template/.opencode/skill/prompt-leverage/scripts/augment_prompt.py +157 -0
  35. package/dist/template/.opencode/skill/screenshot/SKILL.md +48 -0
  36. package/dist/template/.opencode/skill/screenshot/scripts/ensure_macos_permissions.sh +54 -0
  37. package/dist/template/.opencode/skill/screenshot/scripts/macos_display_info.swift +22 -0
  38. package/dist/template/.opencode/skill/screenshot/scripts/macos_permissions.swift +40 -0
  39. package/dist/template/.opencode/skill/screenshot/scripts/macos_window_info.swift +126 -0
  40. package/dist/template/.opencode/skill/screenshot/scripts/take_screenshot.ps1 +163 -0
  41. package/dist/template/.opencode/skill/screenshot/scripts/take_screenshot.py +585 -0
  42. package/dist/template/.opencode/skill/security-threat-model/SKILL.md +36 -0
  43. package/dist/template/.opencode/skill/security-threat-model/references/prompt-template.md +255 -0
  44. package/dist/template/.opencode/skill/security-threat-model/references/security-controls-and-assets.md +32 -0
  45. package/dist/template/.opencode/skill/skill-installer/SKILL.md +58 -0
  46. package/dist/template/.opencode/skill/skill-installer/scripts/github_utils.py +21 -0
  47. package/dist/template/.opencode/skill/skill-installer/scripts/install-skill-from-github.py +313 -0
  48. package/dist/template/.opencode/skill/skill-installer/scripts/list-skills.py +106 -0
  49. package/package.json +1 -1
@@ -0,0 +1,509 @@
1
+ #!/usr/bin/env python3
2
+ from __future__ import annotations
3
+
4
+ import argparse
5
+ import json
6
+ import re
7
+ import subprocess
8
+ import sys
9
+ from pathlib import Path
10
+ from shutil import which
11
+ from typing import Any, Iterable, Sequence
12
+
13
+ FAILURE_CONCLUSIONS = {
14
+ "failure",
15
+ "cancelled",
16
+ "timed_out",
17
+ "action_required",
18
+ }
19
+
20
+ FAILURE_STATES = {
21
+ "failure",
22
+ "error",
23
+ "cancelled",
24
+ "timed_out",
25
+ "action_required",
26
+ }
27
+
28
+ FAILURE_BUCKETS = {"fail"}
29
+
30
+ FAILURE_MARKERS = (
31
+ "error",
32
+ "fail",
33
+ "failed",
34
+ "traceback",
35
+ "exception",
36
+ "assert",
37
+ "panic",
38
+ "fatal",
39
+ "timeout",
40
+ "segmentation fault",
41
+ )
42
+
43
+ DEFAULT_MAX_LINES = 160
44
+ DEFAULT_CONTEXT_LINES = 30
45
+ PENDING_LOG_MARKERS = (
46
+ "still in progress",
47
+ "log will be available when it is complete",
48
+ )
49
+
50
+
51
+ class GhResult:
52
+ def __init__(self, returncode: int, stdout: str, stderr: str):
53
+ self.returncode = returncode
54
+ self.stdout = stdout
55
+ self.stderr = stderr
56
+
57
+
58
+ def run_gh_command(args: Sequence[str], cwd: Path) -> GhResult:
59
+ process = subprocess.run(
60
+ ["gh", *args],
61
+ cwd=cwd,
62
+ text=True,
63
+ capture_output=True,
64
+ )
65
+ return GhResult(process.returncode, process.stdout, process.stderr)
66
+
67
+
68
+ def run_gh_command_raw(args: Sequence[str], cwd: Path) -> tuple[int, bytes, str]:
69
+ process = subprocess.run(
70
+ ["gh", *args],
71
+ cwd=cwd,
72
+ capture_output=True,
73
+ )
74
+ stderr = process.stderr.decode(errors="replace")
75
+ return process.returncode, process.stdout, stderr
76
+
77
+
78
+ def parse_args() -> argparse.Namespace:
79
+ parser = argparse.ArgumentParser(
80
+ description=(
81
+ "Inspect failing GitHub PR checks, fetch GitHub Actions logs, and extract a "
82
+ "failure snippet."
83
+ ),
84
+ formatter_class=argparse.ArgumentDefaultsHelpFormatter,
85
+ )
86
+ parser.add_argument("--repo", default=".", help="Path inside the target Git repository.")
87
+ parser.add_argument(
88
+ "--pr", default=None, help="PR number or URL (defaults to current branch PR)."
89
+ )
90
+ parser.add_argument("--max-lines", type=int, default=DEFAULT_MAX_LINES)
91
+ parser.add_argument("--context", type=int, default=DEFAULT_CONTEXT_LINES)
92
+ parser.add_argument("--json", action="store_true", help="Emit JSON instead of text output.")
93
+ return parser.parse_args()
94
+
95
+
96
+ def main() -> int:
97
+ args = parse_args()
98
+ repo_root = find_git_root(Path(args.repo))
99
+ if repo_root is None:
100
+ print("Error: not inside a Git repository.", file=sys.stderr)
101
+ return 1
102
+
103
+ if not ensure_gh_available(repo_root):
104
+ return 1
105
+
106
+ pr_value = resolve_pr(args.pr, repo_root)
107
+ if pr_value is None:
108
+ return 1
109
+
110
+ checks = fetch_checks(pr_value, repo_root)
111
+ if checks is None:
112
+ return 1
113
+
114
+ failing = [c for c in checks if is_failing(c)]
115
+ if not failing:
116
+ print(f"PR #{pr_value}: no failing checks detected.")
117
+ return 0
118
+
119
+ results = []
120
+ for check in failing:
121
+ results.append(
122
+ analyze_check(
123
+ check,
124
+ repo_root=repo_root,
125
+ max_lines=max(1, args.max_lines),
126
+ context=max(1, args.context),
127
+ )
128
+ )
129
+
130
+ if args.json:
131
+ print(json.dumps({"pr": pr_value, "results": results}, indent=2))
132
+ else:
133
+ render_results(pr_value, results)
134
+
135
+ return 1
136
+
137
+
138
+ def find_git_root(start: Path) -> Path | None:
139
+ result = subprocess.run(
140
+ ["git", "rev-parse", "--show-toplevel"],
141
+ cwd=start,
142
+ text=True,
143
+ capture_output=True,
144
+ )
145
+ if result.returncode != 0:
146
+ return None
147
+ return Path(result.stdout.strip())
148
+
149
+
150
+ def ensure_gh_available(repo_root: Path) -> bool:
151
+ if which("gh") is None:
152
+ print("Error: gh is not installed or not on PATH.", file=sys.stderr)
153
+ return False
154
+ result = run_gh_command(["auth", "status"], cwd=repo_root)
155
+ if result.returncode == 0:
156
+ return True
157
+ message = (result.stderr or result.stdout or "").strip()
158
+ print(message or "Error: gh not authenticated.", file=sys.stderr)
159
+ return False
160
+
161
+
162
+ def resolve_pr(pr_value: str | None, repo_root: Path) -> str | None:
163
+ if pr_value:
164
+ return pr_value
165
+ result = run_gh_command(["pr", "view", "--json", "number"], cwd=repo_root)
166
+ if result.returncode != 0:
167
+ message = (result.stderr or result.stdout or "").strip()
168
+ print(message or "Error: unable to resolve PR.", file=sys.stderr)
169
+ return None
170
+ try:
171
+ data = json.loads(result.stdout or "{}")
172
+ except json.JSONDecodeError:
173
+ print("Error: unable to parse PR JSON.", file=sys.stderr)
174
+ return None
175
+ number = data.get("number")
176
+ if not number:
177
+ print("Error: no PR number found.", file=sys.stderr)
178
+ return None
179
+ return str(number)
180
+
181
+
182
+ def fetch_checks(pr_value: str, repo_root: Path) -> list[dict[str, Any]] | None:
183
+ primary_fields = ["name", "state", "conclusion", "detailsUrl", "startedAt", "completedAt"]
184
+ result = run_gh_command(
185
+ ["pr", "checks", pr_value, "--json", ",".join(primary_fields)],
186
+ cwd=repo_root,
187
+ )
188
+ if result.returncode != 0:
189
+ message = "\n".join(filter(None, [result.stderr, result.stdout])).strip()
190
+ available_fields = parse_available_fields(message)
191
+ if available_fields:
192
+ fallback_fields = [
193
+ "name",
194
+ "state",
195
+ "bucket",
196
+ "link",
197
+ "startedAt",
198
+ "completedAt",
199
+ "workflow",
200
+ ]
201
+ selected_fields = [field for field in fallback_fields if field in available_fields]
202
+ if not selected_fields:
203
+ print("Error: no usable fields available for gh pr checks.", file=sys.stderr)
204
+ return None
205
+ result = run_gh_command(
206
+ ["pr", "checks", pr_value, "--json", ",".join(selected_fields)],
207
+ cwd=repo_root,
208
+ )
209
+ if result.returncode != 0:
210
+ message = (result.stderr or result.stdout or "").strip()
211
+ print(message or "Error: gh pr checks failed.", file=sys.stderr)
212
+ return None
213
+ else:
214
+ print(message or "Error: gh pr checks failed.", file=sys.stderr)
215
+ return None
216
+ try:
217
+ data = json.loads(result.stdout or "[]")
218
+ except json.JSONDecodeError:
219
+ print("Error: unable to parse checks JSON.", file=sys.stderr)
220
+ return None
221
+ if not isinstance(data, list):
222
+ print("Error: unexpected checks JSON shape.", file=sys.stderr)
223
+ return None
224
+ return data
225
+
226
+
227
+ def is_failing(check: dict[str, Any]) -> bool:
228
+ conclusion = normalize_field(check.get("conclusion"))
229
+ if conclusion in FAILURE_CONCLUSIONS:
230
+ return True
231
+ state = normalize_field(check.get("state") or check.get("status"))
232
+ if state in FAILURE_STATES:
233
+ return True
234
+ bucket = normalize_field(check.get("bucket"))
235
+ return bucket in FAILURE_BUCKETS
236
+
237
+
238
+ def analyze_check(
239
+ check: dict[str, Any],
240
+ repo_root: Path,
241
+ max_lines: int,
242
+ context: int,
243
+ ) -> dict[str, Any]:
244
+ url = check.get("detailsUrl") or check.get("link") or ""
245
+ run_id = extract_run_id(url)
246
+ job_id = extract_job_id(url)
247
+ base: dict[str, Any] = {
248
+ "name": check.get("name", ""),
249
+ "detailsUrl": url,
250
+ "runId": run_id,
251
+ "jobId": job_id,
252
+ }
253
+
254
+ if run_id is None:
255
+ base["status"] = "external"
256
+ base["note"] = "No GitHub Actions run id detected in detailsUrl."
257
+ return base
258
+
259
+ metadata = fetch_run_metadata(run_id, repo_root)
260
+ log_text, log_error, log_status = fetch_check_log(
261
+ run_id=run_id,
262
+ job_id=job_id,
263
+ repo_root=repo_root,
264
+ )
265
+
266
+ if log_status == "pending":
267
+ base["status"] = "log_pending"
268
+ base["note"] = log_error or "Logs are not available yet."
269
+ if metadata:
270
+ base["run"] = metadata
271
+ return base
272
+
273
+ if log_error:
274
+ base["status"] = "log_unavailable"
275
+ base["error"] = log_error
276
+ if metadata:
277
+ base["run"] = metadata
278
+ return base
279
+
280
+ snippet = extract_failure_snippet(log_text, max_lines=max_lines, context=context)
281
+ base["status"] = "ok"
282
+ base["run"] = metadata or {}
283
+ base["logSnippet"] = snippet
284
+ base["logTail"] = tail_lines(log_text, max_lines)
285
+ return base
286
+
287
+
288
+ def extract_run_id(url: str) -> str | None:
289
+ if not url:
290
+ return None
291
+ for pattern in (r"/actions/runs/(\d+)", r"/runs/(\d+)"):
292
+ match = re.search(pattern, url)
293
+ if match:
294
+ return match.group(1)
295
+ return None
296
+
297
+
298
+ def extract_job_id(url: str) -> str | None:
299
+ if not url:
300
+ return None
301
+ match = re.search(r"/actions/runs/\d+/job/(\d+)", url)
302
+ if match:
303
+ return match.group(1)
304
+ match = re.search(r"/job/(\d+)", url)
305
+ if match:
306
+ return match.group(1)
307
+ return None
308
+
309
+
310
+ def fetch_run_metadata(run_id: str, repo_root: Path) -> dict[str, Any] | None:
311
+ fields = [
312
+ "conclusion",
313
+ "status",
314
+ "workflowName",
315
+ "name",
316
+ "event",
317
+ "headBranch",
318
+ "headSha",
319
+ "url",
320
+ ]
321
+ result = run_gh_command(["run", "view", run_id, "--json", ",".join(fields)], cwd=repo_root)
322
+ if result.returncode != 0:
323
+ return None
324
+ try:
325
+ data = json.loads(result.stdout or "{}")
326
+ except json.JSONDecodeError:
327
+ return None
328
+ if not isinstance(data, dict):
329
+ return None
330
+ return data
331
+
332
+
333
+ def fetch_check_log(
334
+ run_id: str,
335
+ job_id: str | None,
336
+ repo_root: Path,
337
+ ) -> tuple[str, str, str]:
338
+ log_text, log_error = fetch_run_log(run_id, repo_root)
339
+ if not log_error:
340
+ return log_text, "", "ok"
341
+
342
+ if is_log_pending_message(log_error) and job_id:
343
+ job_log, job_error = fetch_job_log(job_id, repo_root)
344
+ if job_log:
345
+ return job_log, "", "ok"
346
+ if job_error and is_log_pending_message(job_error):
347
+ return "", job_error, "pending"
348
+ if job_error:
349
+ return "", job_error, "error"
350
+ return "", log_error, "pending"
351
+
352
+ if is_log_pending_message(log_error):
353
+ return "", log_error, "pending"
354
+
355
+ return "", log_error, "error"
356
+
357
+
358
+ def fetch_run_log(run_id: str, repo_root: Path) -> tuple[str, str]:
359
+ result = run_gh_command(["run", "view", run_id, "--log"], cwd=repo_root)
360
+ if result.returncode != 0:
361
+ error = (result.stderr or result.stdout or "").strip()
362
+ return "", error or "gh run view failed"
363
+ return result.stdout, ""
364
+
365
+
366
+ def fetch_job_log(job_id: str, repo_root: Path) -> tuple[str, str]:
367
+ repo_slug = fetch_repo_slug(repo_root)
368
+ if not repo_slug:
369
+ return "", "Error: unable to resolve repository name for job logs."
370
+ endpoint = f"/repos/{repo_slug}/actions/jobs/{job_id}/logs"
371
+ returncode, stdout_bytes, stderr = run_gh_command_raw(["api", endpoint], cwd=repo_root)
372
+ if returncode != 0:
373
+ message = (stderr or stdout_bytes.decode(errors="replace")).strip()
374
+ return "", message or "gh api job logs failed"
375
+ if is_zip_payload(stdout_bytes):
376
+ return "", "Job logs returned a zip archive; unable to parse."
377
+ return stdout_bytes.decode(errors="replace"), ""
378
+
379
+
380
+ def fetch_repo_slug(repo_root: Path) -> str | None:
381
+ result = run_gh_command(["repo", "view", "--json", "nameWithOwner"], cwd=repo_root)
382
+ if result.returncode != 0:
383
+ return None
384
+ try:
385
+ data = json.loads(result.stdout or "{}")
386
+ except json.JSONDecodeError:
387
+ return None
388
+ name_with_owner = data.get("nameWithOwner")
389
+ if not name_with_owner:
390
+ return None
391
+ return str(name_with_owner)
392
+
393
+
394
+ def normalize_field(value: Any) -> str:
395
+ if value is None:
396
+ return ""
397
+ return str(value).strip().lower()
398
+
399
+
400
+ def parse_available_fields(message: str) -> list[str]:
401
+ if "Available fields:" not in message:
402
+ return []
403
+ fields: list[str] = []
404
+ collecting = False
405
+ for line in message.splitlines():
406
+ if "Available fields:" in line:
407
+ collecting = True
408
+ continue
409
+ if not collecting:
410
+ continue
411
+ field = line.strip()
412
+ if not field:
413
+ continue
414
+ fields.append(field)
415
+ return fields
416
+
417
+
418
+ def is_log_pending_message(message: str) -> bool:
419
+ lowered = message.lower()
420
+ return any(marker in lowered for marker in PENDING_LOG_MARKERS)
421
+
422
+
423
+ def is_zip_payload(payload: bytes) -> bool:
424
+ return payload.startswith(b"PK")
425
+
426
+
427
+ def extract_failure_snippet(log_text: str, max_lines: int, context: int) -> str:
428
+ lines = log_text.splitlines()
429
+ if not lines:
430
+ return ""
431
+
432
+ marker_index = find_failure_index(lines)
433
+ if marker_index is None:
434
+ return "\n".join(lines[-max_lines:])
435
+
436
+ start = max(0, marker_index - context)
437
+ end = min(len(lines), marker_index + context)
438
+ window = lines[start:end]
439
+ if len(window) > max_lines:
440
+ window = window[-max_lines:]
441
+ return "\n".join(window)
442
+
443
+
444
+ def find_failure_index(lines: Sequence[str]) -> int | None:
445
+ for idx in range(len(lines) - 1, -1, -1):
446
+ lowered = lines[idx].lower()
447
+ if any(marker in lowered for marker in FAILURE_MARKERS):
448
+ return idx
449
+ return None
450
+
451
+
452
+ def tail_lines(text: str, max_lines: int) -> str:
453
+ if max_lines <= 0:
454
+ return ""
455
+ lines = text.splitlines()
456
+ return "\n".join(lines[-max_lines:])
457
+
458
+
459
+ def render_results(pr_number: str, results: Iterable[dict[str, Any]]) -> None:
460
+ results_list = list(results)
461
+ print(f"PR #{pr_number}: {len(results_list)} failing checks analyzed.")
462
+ for result in results_list:
463
+ print("-" * 60)
464
+ print(f"Check: {result.get('name', '')}")
465
+ if result.get("detailsUrl"):
466
+ print(f"Details: {result['detailsUrl']}")
467
+ run_id = result.get("runId")
468
+ if run_id:
469
+ print(f"Run ID: {run_id}")
470
+ job_id = result.get("jobId")
471
+ if job_id:
472
+ print(f"Job ID: {job_id}")
473
+ status = result.get("status", "unknown")
474
+ print(f"Status: {status}")
475
+
476
+ run_meta = result.get("run", {})
477
+ if run_meta:
478
+ branch = run_meta.get("headBranch", "")
479
+ sha = (run_meta.get("headSha") or "")[:12]
480
+ workflow = run_meta.get("workflowName") or run_meta.get("name") or ""
481
+ conclusion = run_meta.get("conclusion") or run_meta.get("status") or ""
482
+ print(f"Workflow: {workflow} ({conclusion})")
483
+ if branch or sha:
484
+ print(f"Branch/SHA: {branch} {sha}")
485
+ if run_meta.get("url"):
486
+ print(f"Run URL: {run_meta['url']}")
487
+
488
+ if result.get("note"):
489
+ print(f"Note: {result['note']}")
490
+
491
+ if result.get("error"):
492
+ print(f"Error fetching logs: {result['error']}")
493
+ continue
494
+
495
+ snippet = result.get("logSnippet") or ""
496
+ if snippet:
497
+ print("Failure snippet:")
498
+ print(indent_block(snippet, prefix=" "))
499
+ else:
500
+ print("No snippet available.")
501
+ print("-" * 60)
502
+
503
+
504
+ def indent_block(text: str, prefix: str = " ") -> str:
505
+ return "\n".join(f"{prefix}{line}" for line in text.splitlines())
506
+
507
+
508
+ if __name__ == "__main__":
509
+ raise SystemExit(main())
@@ -0,0 +1,90 @@
1
+ ---
2
+ name: prompt-leverage
3
+ description: >-
4
+ Strengthen raw user prompts into execution-ready instruction sets. Use when processing
5
+ user input to upgrade it with clear objective, context, work style, tool rules, output
6
+ contract, verification, and done criteria before planning or execution.
7
+ metadata:
8
+ dependencies: []
9
+ ---
10
+
11
+ # Prompt Leverage
12
+
13
+ Strengthen the user's current prompt into a stronger working instruction set without changing the underlying intent. Preserve the task, fill in missing execution structure, and add only enough scaffolding to improve reliability.
14
+
15
+ This skill acts as a **pre-processing layer** — it runs on user input BEFORE planning/execution to ensure every prompt is execution-ready.
16
+
17
+ ## Workflow
18
+
19
+ 1. Read the raw prompt and identify the real job to be done.
20
+ 2. Infer the task type: coding, research, writing, analysis, planning, or review.
21
+ 3. Rebuild the prompt with the framework blocks in `references/framework.md`.
22
+ 4. Keep the result proportional: do not over-specify a simple task.
23
+ 5. Return both the improved prompt and a short explanation of what changed when useful.
24
+
25
+ ## Transformation Rules
26
+
27
+ - Preserve the user's objective, constraints, and tone unless they conflict.
28
+ - Prefer adding missing structure over rewriting everything stylistically.
29
+ - Add context requirements only when they improve correctness.
30
+ - Add tool rules only when tool use materially affects correctness.
31
+ - Add verification and completion criteria for non-trivial tasks.
32
+ - Keep prompts compact enough to be practical in repeated use.
33
+
34
+ ## Framework Blocks
35
+
36
+ Use these blocks selectively.
37
+
38
+ - `Objective`: state the task and what success looks like.
39
+ - `Context`: list sources, files, constraints, and unknowns.
40
+ - `Work Style`: set depth, breadth, care, and first-principles expectations.
41
+ - `Tool Rules`: state when tools, browsing, or file inspection are required.
42
+ - `Output Contract`: define structure, formatting, and level of detail.
43
+ - `Verification`: require checks for correctness, edge cases, and better alternatives.
44
+ - `Done Criteria`: define when the agent should stop.
45
+
46
+ ## Output Modes
47
+
48
+ Choose one mode based on the user request.
49
+
50
+ - `Inline upgrade`: provide the upgraded prompt only.
51
+ - `Upgrade + rationale`: provide the prompt plus a brief list of improvements.
52
+ - `Template extraction`: convert the prompt into a reusable fill-in-the-blank template.
53
+ - `Hook spec`: explain how to apply the framework automatically before execution.
54
+
55
+ ## Quality Bar
56
+
57
+ Before finalizing, check the upgraded prompt:
58
+
59
+ - still matches the original intent
60
+ - does not add unnecessary ceremony
61
+ - includes the right verification level for the task
62
+ - gives the agent a clear definition of done
63
+
64
+ If the prompt is already strong, say so and make only minimal edits.
65
+
66
+ ## Intensity Levels
67
+
68
+ Use the minimum level that matches the task.
69
+
70
+ - `Light`: simple edits, formatting, quick rewrites.
71
+ - `Standard`: typical coding, research, and drafting tasks.
72
+ - `Deep`: debugging, architecture, complex research, or high-stakes outputs.
73
+
74
+ ## Task-Type Adjustments
75
+
76
+ ### Coding
77
+
78
+ - Emphasize repo context, file inspection, smallest correct change, validation, and edge cases.
79
+
80
+ ### Research
81
+
82
+ - Emphasize source quality, evidence gathering, synthesis, uncertainty, and citations.
83
+
84
+ ### Writing
85
+
86
+ - Emphasize audience, tone, structure, constraints, and revision criteria.
87
+
88
+ ### Review
89
+
90
+ - Emphasize fresh-eyes critique, failure modes, alternatives, and explicit severity.
@@ -0,0 +1,91 @@
1
+ # Prompt Leverage Framework
2
+
3
+ Reference for combining source ideas into a practical execution framework.
4
+
5
+ ## Source Synthesis
6
+
7
+ - **Agent Flywheel** contributes behavior controls: intensity, wider search, deeper analysis, fresh eyes, first-principles thinking, and future-self clarity.
8
+ - **OpenAI prompt guidance** contributes execution controls: clear objectives, explicit output contracts, tool persistence, dependency checks, verification loops, and completion criteria.
9
+
10
+ Treat the final framework as:
11
+
12
+ `Objective -> Context -> Work Style -> Tool Rules -> Output Contract -> Verification -> Done`
13
+
14
+ ## Block Definitions
15
+
16
+ ### Objective
17
+
18
+ State the task in one or two lines. Define success in observable terms.
19
+
20
+ ### Context
21
+
22
+ Specify relevant files, URLs, constraints, assumptions, and information boundaries. Say when the agent must retrieve facts instead of guessing.
23
+
24
+ ### Work Style
25
+
26
+ Control how the agent approaches the task.
27
+
28
+ - Go broad first when system understanding matters.
29
+ - Go deep where risk or complexity is highest.
30
+ - Use first-principles reasoning before changing things.
31
+ - Re-check with fresh eyes for non-trivial tasks.
32
+
33
+ ### Tool Rules
34
+
35
+ Define when browsing, file inspection, tests, or external tools are required. Prevent skipping prerequisite checks.
36
+
37
+ ### Output Contract
38
+
39
+ Define exact structure, tone, formatting, depth, and any required sections or schemas.
40
+
41
+ ### Verification
42
+
43
+ Require checks for correctness, grounding, completeness, side effects, and better alternatives.
44
+
45
+ ### Done Criteria
46
+
47
+ Define what must be true before the agent stops.
48
+
49
+ ## Intensity Levels
50
+
51
+ Use the minimum level that matches the task.
52
+
53
+ - `Light`: simple edits, formatting, quick rewrites.
54
+ - `Standard`: typical coding, research, and drafting tasks.
55
+ - `Deep`: debugging, architecture, complex research, or high-stakes outputs.
56
+
57
+ ## Task-Type Adjustments
58
+
59
+ ### Coding
60
+
61
+ - Emphasize repo context, file inspection, smallest correct change, validation, and edge cases.
62
+
63
+ ### Research
64
+
65
+ - Emphasize source quality, evidence gathering, synthesis, uncertainty, and citations.
66
+
67
+ ### Writing
68
+
69
+ - Emphasize audience, tone, structure, constraints, and revision criteria.
70
+
71
+ ### Review
72
+
73
+ - Emphasize fresh-eyes critique, failure modes, alternatives, and explicit severity.
74
+
75
+ ## Prompt Upgrade Heuristics
76
+
77
+ - Add missing blocks only when they materially improve execution.
78
+ - Do not turn a one-line request into a giant spec unless the task is genuinely complex.
79
+ - Preserve user language where possible so the upgraded prompt still feels native.
80
+ - Prefer concrete completion criteria over vague quality adjectives.
81
+
82
+ ## Upgrade Rubric
83
+
84
+ An upgraded prompt is good when it:
85
+
86
+ 1. preserves original intent
87
+ 2. reduces ambiguity
88
+ 3. sets the right depth and care level
89
+ 4. defines the expected output clearly
90
+ 5. includes an appropriate verification step
91
+ 6. tells the agent when to stop