opencode-multiagent 0.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (106) hide show
  1. package/LICENSE +21 -0
  2. package/README.md +209 -0
  3. package/agents/advisor.md +57 -0
  4. package/agents/auditor.md +45 -0
  5. package/agents/critic.md +127 -0
  6. package/agents/deep-worker.md +65 -0
  7. package/agents/devil.md +36 -0
  8. package/agents/executor.md +141 -0
  9. package/agents/heavy-worker.md +68 -0
  10. package/agents/lead.md +155 -0
  11. package/agents/librarian.md +62 -0
  12. package/agents/planner.md +121 -0
  13. package/agents/qa.md +50 -0
  14. package/agents/quick.md +65 -0
  15. package/agents/reviewer.md +55 -0
  16. package/agents/scout.md +58 -0
  17. package/agents/scribe.md +78 -0
  18. package/agents/strategist.md +63 -0
  19. package/agents/ui-heavy-worker.md +62 -0
  20. package/agents/ui-worker.md +69 -0
  21. package/agents/validator.md +47 -0
  22. package/agents/worker.md +68 -0
  23. package/commands/execute.md +14 -0
  24. package/commands/init-deep.md +18 -0
  25. package/commands/init.md +18 -0
  26. package/commands/inspect.md +13 -0
  27. package/commands/plan.md +15 -0
  28. package/commands/quality.md +14 -0
  29. package/commands/review.md +14 -0
  30. package/commands/status.md +15 -0
  31. package/defaults/agent-settings.json +102 -0
  32. package/defaults/agent-settings.schema.json +25 -0
  33. package/defaults/flags.json +35 -0
  34. package/defaults/flags.schema.json +119 -0
  35. package/defaults/mcp-defaults.json +47 -0
  36. package/defaults/mcp-defaults.schema.json +38 -0
  37. package/defaults/profiles.json +53 -0
  38. package/defaults/profiles.schema.json +60 -0
  39. package/defaults/team-profiles.json +83 -0
  40. package/examples/opencode.json +4 -0
  41. package/examples/opencode.with-overrides.json +23 -0
  42. package/package.json +62 -0
  43. package/skills/advanced-evaluation/SKILL.md +454 -0
  44. package/skills/advanced-evaluation/manifest.json +20 -0
  45. package/skills/cek-context-engineering/SKILL.md +1261 -0
  46. package/skills/cek-context-engineering/manifest.json +17 -0
  47. package/skills/cek-prompt-engineering/SKILL.md +559 -0
  48. package/skills/cek-prompt-engineering/manifest.json +17 -0
  49. package/skills/cek-test-prompt/SKILL.md +714 -0
  50. package/skills/cek-test-prompt/manifest.json +17 -0
  51. package/skills/cek-thought-based-reasoning/SKILL.md +658 -0
  52. package/skills/cek-thought-based-reasoning/manifest.json +17 -0
  53. package/skills/context-degradation/SKILL.md +231 -0
  54. package/skills/context-degradation/manifest.json +17 -0
  55. package/skills/debate/SKILL.md +316 -0
  56. package/skills/debate/manifest.json +19 -0
  57. package/skills/design-first/SKILL.md +5 -0
  58. package/skills/design-first/manifest.json +20 -0
  59. package/skills/dispatching-parallel-agents/SKILL.md +180 -0
  60. package/skills/dispatching-parallel-agents/manifest.json +18 -0
  61. package/skills/drift-analysis/SKILL.md +324 -0
  62. package/skills/drift-analysis/manifest.json +19 -0
  63. package/skills/evaluation/SKILL.md +5 -0
  64. package/skills/evaluation/manifest.json +19 -0
  65. package/skills/executing-plans/SKILL.md +70 -0
  66. package/skills/executing-plans/manifest.json +17 -0
  67. package/skills/handoff-protocols/SKILL.md +5 -0
  68. package/skills/handoff-protocols/manifest.json +19 -0
  69. package/skills/parallel-investigation/SKILL.md +206 -0
  70. package/skills/parallel-investigation/manifest.json +18 -0
  71. package/skills/reflexion-critique/SKILL.md +477 -0
  72. package/skills/reflexion-critique/manifest.json +17 -0
  73. package/skills/reflexion-reflect/SKILL.md +650 -0
  74. package/skills/reflexion-reflect/manifest.json +17 -0
  75. package/skills/root-cause-analysis/SKILL.md +5 -0
  76. package/skills/root-cause-analysis/manifest.json +20 -0
  77. package/skills/sadd-judge-with-debate/SKILL.md +426 -0
  78. package/skills/sadd-judge-with-debate/manifest.json +17 -0
  79. package/skills/structured-code-review/SKILL.md +5 -0
  80. package/skills/structured-code-review/manifest.json +18 -0
  81. package/skills/task-decomposition/SKILL.md +5 -0
  82. package/skills/task-decomposition/manifest.json +20 -0
  83. package/skills/verification-before-completion/SKILL.md +5 -0
  84. package/skills/verification-before-completion/manifest.json +22 -0
  85. package/skills/verification-gates/SKILL.md +281 -0
  86. package/skills/verification-gates/manifest.json +19 -0
  87. package/src/control-plane.ts +21 -0
  88. package/src/index.ts +8 -0
  89. package/src/opencode-multiagent/compiler.ts +168 -0
  90. package/src/opencode-multiagent/constants.ts +178 -0
  91. package/src/opencode-multiagent/file-lock.ts +90 -0
  92. package/src/opencode-multiagent/hooks.ts +599 -0
  93. package/src/opencode-multiagent/log.ts +12 -0
  94. package/src/opencode-multiagent/mailbox.ts +287 -0
  95. package/src/opencode-multiagent/markdown.ts +99 -0
  96. package/src/opencode-multiagent/mcp.ts +35 -0
  97. package/src/opencode-multiagent/policy.ts +67 -0
  98. package/src/opencode-multiagent/quality.ts +140 -0
  99. package/src/opencode-multiagent/runtime.ts +55 -0
  100. package/src/opencode-multiagent/skills.ts +144 -0
  101. package/src/opencode-multiagent/supervision.ts +156 -0
  102. package/src/opencode-multiagent/task-manager.ts +148 -0
  103. package/src/opencode-multiagent/team-manager.ts +219 -0
  104. package/src/opencode-multiagent/team-tools.ts +359 -0
  105. package/src/opencode-multiagent/telemetry.ts +124 -0
  106. package/src/opencode-multiagent/utils.ts +54 -0
@@ -0,0 +1,17 @@
1
+ {
2
+ "name": "cek-context-engineering",
3
+ "version": "1.0.0",
4
+ "description": "Context engineering guidance for prompts, hooks, commands, and agents",
5
+ "triggers": [
6
+ "context engineering",
7
+ "context window",
8
+ "prompt context",
9
+ "lost in middle",
10
+ "context design"
11
+ ],
12
+ "applicable_agents": [
13
+ "critic"
14
+ ],
15
+ "max_context_tokens": 2600,
16
+ "entry_file": "SKILL.md"
17
+ }
@@ -0,0 +1,559 @@
1
+ ---
2
+ name: cek-prompt-engineering
3
+ description: Use this skill when you writing commands, hooks, skills for Agent, or prompts for sub agents or any other LLM interaction, including optimizing prompts, improving LLM outputs, or designing production prompt templates.
4
+ ---
5
+
6
+ # Prompt Engineering Patterns
7
+
8
+ Advanced prompt engineering techniques to maximize LLM performance, reliability, and controllability.
9
+
10
+ ## Core Capabilities
11
+
12
+ ### 1. Few-Shot Learning
13
+
14
+ Teach the model by showing examples instead of explaining rules. Include 2-5 input-output pairs that demonstrate the desired behavior. Use when you need consistent formatting, specific reasoning patterns, or handling of edge cases. More examples improve accuracy but consume tokens—balance based on task complexity.
15
+
16
+ **Example:**
17
+
18
+ ```markdown
19
+ Extract key information from support tickets:
20
+
21
+ Input: "My login doesn't work and I keep getting error 403"
22
+ Output: {"issue": "authentication", "error_code": "403", "priority": "high"}
23
+
24
+ Input: "Feature request: add dark mode to settings"
25
+ Output: {"issue": "feature_request", "error_code": null, "priority": "low"}
26
+
27
+ Now process: "Can't upload files larger than 10MB, getting timeout"
28
+ ```
29
+
30
+ ### 2. Chain-of-Thought Prompting
31
+
32
+ Request step-by-step reasoning before the final answer. Add "Let's think step by step" (zero-shot) or include example reasoning traces (few-shot). Use for complex problems requiring multi-step logic, mathematical reasoning, or when you need to verify the model's thought process. Improves accuracy on analytical tasks by 30-50%.
33
+
34
+ **Example:**
35
+
36
+ ```markdown
37
+ Analyze this bug report and determine root cause.
38
+
39
+ Think step by step:
40
+ 1. What is the expected behavior?
41
+ 2. What is the actual behavior?
42
+ 3. What changed recently that could cause this?
43
+ 4. What components are involved?
44
+ 5. What is the most likely root cause?
45
+
46
+ Bug: "Users can't save drafts after the cache update deployed yesterday"
47
+ ```
48
+
49
+ ### 3. Prompt Optimization
50
+
51
+ Systematically improve prompts through testing and refinement. Start simple, measure performance (accuracy, consistency, token usage), then iterate. Test on diverse inputs including edge cases. Use A/B testing to compare variations. Critical for production prompts where consistency and cost matter.
52
+
53
+ **Example:**
54
+
55
+ ```markdown
56
+ Version 1 (Simple): "Summarize this article"
57
+ → Result: Inconsistent length, misses key points
58
+
59
+ Version 2 (Add constraints): "Summarize in 3 bullet points"
60
+ → Result: Better structure, but still misses nuance
61
+
62
+ Version 3 (Add reasoning): "Identify the 3 main findings, then summarize each"
63
+ → Result: Consistent, accurate, captures key information
64
+ ```
65
+
66
+ ### 4. Template Systems
67
+
68
+ Build reusable prompt structures with variables, conditional sections, and modular components. Use for multi-turn conversations, role-based interactions, or when the same pattern applies to different inputs. Reduces duplication and ensures consistency across similar tasks.
69
+
70
+ **Example:**
71
+
72
+ ```python
73
+ # Reusable code review template
74
+ template = """
75
+ Review this {language} code for {focus_area}.
76
+
77
+ Code:
78
+ {code_block}
79
+
80
+ Provide feedback on:
81
+ {checklist}
82
+ """
83
+
84
+ # Usage
85
+ prompt = template.format(
86
+ language="Python",
87
+ focus_area="security vulnerabilities",
88
+ code_block=user_code,
89
+ checklist="1. SQL injection\n2. XSS risks\n3. Authentication"
90
+ )
91
+ ```
92
+
93
+ ### 5. System Prompt Design
94
+
95
+ Set global behavior and constraints that persist across the conversation. Define the model's role, expertise level, output format, and safety guidelines. Use system prompts for stable instructions that shouldn't change turn-to-turn, freeing up user message tokens for variable content.
96
+
97
+ **Example:**
98
+
99
+ ```markdown
100
+ System: You are a senior backend engineer specializing in API design.
101
+
102
+ Rules:
103
+ - Always consider scalability and performance
104
+ - Suggest RESTful patterns by default
105
+ - Flag security concerns immediately
106
+ - Provide code examples in Python
107
+ - Use early return pattern
108
+
109
+ Format responses as:
110
+ 1. Analysis
111
+ 2. Recommendation
112
+ 3. Code example
113
+ 4. Trade-offs
114
+ ```
115
+
116
+ ## Key Patterns
117
+
118
+ ### Progressive Disclosure
119
+
120
+ Start with simple prompts, add complexity only when needed:
121
+
122
+ 1. **Level 1**: Direct instruction
123
+ - "Summarize this article"
124
+
125
+ 2. **Level 2**: Add constraints
126
+ - "Summarize this article in 3 bullet points, focusing on key findings"
127
+
128
+ 3. **Level 3**: Add reasoning
129
+ - "Read this article, identify the main findings, then summarize in 3 bullet points"
130
+
131
+ 4. **Level 4**: Add examples
132
+ - Include 2-3 example summaries with input-output pairs
133
+
134
+ ### Instruction Hierarchy
135
+
136
+ ```
137
+ [System Context] → [Task Instruction] → [Examples] → [Input Data] → [Output Format]
138
+ ```
139
+
140
+ ### Error Recovery
141
+
142
+ Build prompts that gracefully handle failures:
143
+
144
+ - Include fallback instructions
145
+ - Request confidence scores
146
+ - Ask for alternative interpretations when uncertain
147
+ - Specify how to indicate missing information
148
+
149
+ ## Best Practices
150
+
151
+ 1. **Be Specific**: Vague prompts produce inconsistent results
152
+ 2. **Show, Don't Tell**: Examples are more effective than descriptions
153
+ 3. **Test Extensively**: Evaluate on diverse, representative inputs
154
+ 4. **Iterate Rapidly**: Small changes can have large impacts
155
+ 5. **Monitor Performance**: Track metrics in production
156
+ 6. **Version Control**: Treat prompts as code with proper versioning
157
+ 7. **Document Intent**: Explain why prompts are structured as they are
158
+
159
+ ## Common Pitfalls
160
+
161
+ - **Over-engineering**: Starting with complex prompts before trying simple ones
162
+ - **Example pollution**: Using examples that don't match the target task
163
+ - **Context overflow**: Exceeding token limits with excessive examples
164
+ - **Ambiguous instructions**: Leaving room for multiple interpretations
165
+ - **Ignoring edge cases**: Not testing on unusual or boundary inputs
166
+
167
+ ## Integration Patterns
168
+
169
+ ### With RAG Systems
170
+
171
+ ```python
172
+ # Combine retrieved context with prompt engineering
173
+ prompt = f"""Given the following context:
174
+ {retrieved_context}
175
+
176
+ {few_shot_examples}
177
+
178
+ Question: {user_question}
179
+
180
+ Provide a detailed answer based solely on the context above. If the context doesn't contain enough information, explicitly state what's missing."""
181
+ ```
182
+
183
+ ### With Validation
184
+
185
+ ```python
186
+ # Add self-verification step
187
+ prompt = f"""{main_task_prompt}
188
+
189
+ After generating your response, verify it meets these criteria:
190
+ 1. Answers the question directly
191
+ 2. Uses only information from provided context
192
+ 3. Cites specific sources
193
+ 4. Acknowledges any uncertainty
194
+
195
+ If verification fails, revise your response."""
196
+ ```
197
+
198
+ ## Performance Optimization
199
+
200
+ ### Token Efficiency
201
+
202
+ - Remove redundant words and phrases
203
+ - Use abbreviations consistently after first definition
204
+ - Consolidate similar instructions
205
+ - Move stable content to system prompts
206
+
207
+ ### Latency Reduction
208
+
209
+ - Minimize prompt length without sacrificing quality
210
+ - Use streaming for long-form outputs
211
+ - Cache common prompt prefixes
212
+ - Batch similar requests when possible
213
+
214
+ ---
215
+
216
+ # Agent Prompting Best Practices
217
+
218
+ Based on Anthropic's official best practices for agent prompting.
219
+
220
+ ## Core principles
221
+
222
+ ### Context Window
223
+
224
+ The “context window” refers to the entirety of the amount of text a language model can look back on and reference when generating new text plus the new text it generates. This is different from the large corpus of data the language model was trained on, and instead represents a “working memory” for the model. A larger context window allows the model to understand and respond to more complex and lengthy prompts, while a smaller context window may limit the model’s ability to handle longer prompts or maintain coherence over extended conversations.
225
+
226
+ - Progressive token accumulation: As the conversation advances through turns, each user message and assistant response accumulates within the context window. Previous turns are preserved completely.
227
+ - Linear growth pattern: The context usage grows linearly with each turn, with previous turns preserved completely.
228
+ - 200K token capacity: The total available context window (200,000 tokens) represents the maximum capacity for storing conversation history and generating new output from Claude.
229
+ - Input-output flow: Each turn consists of:
230
+ - Input phase: Contains all previous conversation history plus the current user message
231
+ - Output phase: Generates a text response that becomes part of a future input
232
+
233
+ ### Concise is key
234
+
235
+ The context window is a public good. Your prompt, command, skill shares the context window with everything else Claude needs to know, including:
236
+
237
+ - The system prompt
238
+ - Conversation history
239
+ - Other commands, skills, hooks, metadata
240
+ - Your actual request
241
+
242
+ **Default assumption**: Claude is already very smart
243
+
244
+ Only add context Claude doesn't already have. Challenge each piece of information:
245
+
246
+ - "Does Claude really need this explanation?"
247
+ - "Can I assume Claude knows this?"
248
+ - "Does this paragraph justify its token cost?"
249
+
250
+ **Good example: Concise** (approximately 50 tokens):
251
+
252
+ ````markdown theme={null}
253
+ ## Extract PDF text
254
+
255
+ Use pdfplumber for text extraction:
256
+
257
+ ```python
258
+ import pdfplumber
259
+
260
+ with pdfplumber.open("file.pdf") as pdf:
261
+ text = pdf.pages[0].extract_text()
262
+ ```
263
+ ````
264
+
265
+ **Bad example: Too verbose** (approximately 150 tokens):
266
+
267
+ ```markdown theme={null}
268
+ ## Extract PDF text
269
+
270
+ PDF (Portable Document Format) files are a common file format that contains
271
+ text, images, and other content. To extract text from a PDF, you'll need to
272
+ use a library. There are many libraries available for PDF processing, but we
273
+ recommend pdfplumber because it's easy to use and handles most cases well.
274
+ First, you'll need to install it using pip. Then you can use the code below...
275
+ ```
276
+
277
+ The concise version assumes Claude knows what PDFs are and how libraries work.
278
+
279
+ ### Set appropriate degrees of freedom
280
+
281
+ Match the level of specificity to the task's fragility and variability.
282
+
283
+ **High freedom** (text-based instructions):
284
+
285
+ Use when:
286
+
287
+ - Multiple approaches are valid
288
+ - Decisions depend on context
289
+ - Heuristics guide the approach
290
+
291
+ Example:
292
+
293
+ ```markdown theme={null}
294
+ ## Code review process
295
+
296
+ 1. Analyze the code structure and organization
297
+ 2. Check for potential bugs or edge cases
298
+ 3. Suggest improvements for readability and maintainability
299
+ 4. Verify adherence to project conventions
300
+ ```
301
+
302
+ **Medium freedom** (pseudocode or scripts with parameters):
303
+
304
+ Use when:
305
+
306
+ - A preferred pattern exists
307
+ - Some variation is acceptable
308
+ - Configuration affects behavior
309
+
310
+ Example:
311
+
312
+ ````markdown theme={null}
313
+ ## Generate report
314
+
315
+ Use this template and customize as needed:
316
+
317
+ ```python
318
+ def generate_report(data, format="markdown", include_charts=True):
319
+ # Process data
320
+ # Generate output in specified format
321
+ # Optionally include visualizations
322
+ ```
323
+ ````
324
+
325
+ **Low freedom** (specific scripts, few or no parameters):
326
+
327
+ Use when:
328
+
329
+ - Operations are fragile and error-prone
330
+ - Consistency is critical
331
+ - A specific sequence must be followed
332
+
333
+ Example:
334
+
335
+ ````markdown theme={null}
336
+ ## Database migration
337
+
338
+ Run exactly this script:
339
+
340
+ ```bash
341
+ python scripts/migrate.py --verify --backup
342
+ ```
343
+
344
+ Do not modify the command or add additional flags.
345
+ ````
346
+
347
+ **Analogy**: Think of Claude as a robot exploring a path:
348
+
349
+ - **Narrow bridge with cliffs on both sides**: There's only one safe way forward. Provide specific guardrails and exact instructions (low freedom). Example: database migrations that must run in exact sequence.
350
+ - **Open field with no hazards**: Many paths lead to success. Give general direction and trust Claude to find the best route (high freedom). Example: code reviews where context determines the best approach.
351
+
352
+ # Persuasion Principles for Agent Communication
353
+
354
+ Usefull for writing prompts, including but not limited to: commands, hooks, skills for Claude Code, or prompts for sub agents or any other LLM interaction.
355
+
356
+ ## Overview
357
+
358
+ LLMs respond to the same persuasion principles as humans. Understanding this psychology helps you design more effective skills - not to manipulate, but to ensure critical practices are followed even under pressure.
359
+
360
+ **Research foundation:** Meincke et al. (2025) tested 7 persuasion principles with N=28,000 AI conversations. Persuasion techniques more than doubled compliance rates (33% → 72%, p < .001).
361
+
362
+ ## The Seven Principles
363
+
364
+ ### 1. Authority
365
+
366
+ **What it is:** Deference to expertise, credentials, or official sources.
367
+
368
+ **How it works in prompts:**
369
+
370
+ - Imperative language: "YOU MUST", "Never", "Always"
371
+ - Non-negotiable framing: "No exceptions"
372
+ - Eliminates decision fatigue and rationalization
373
+
374
+ **When to use:**
375
+
376
+ - Discipline-enforcing skills (TDD, verification requirements)
377
+ - Safety-critical practices
378
+ - Established best practices
379
+
380
+ **Example:**
381
+
382
+ ```markdown
383
+ ✅ Write code before test? Delete it. Start over. No exceptions.
384
+ ❌ Consider writing tests first when feasible.
385
+ ```
386
+
387
+ ### 2. Commitment
388
+
389
+ **What it is:** Consistency with prior actions, statements, or public declarations.
390
+
391
+ **How it works in prompts:**
392
+
393
+ - Require announcements: "Announce skill usage"
394
+ - Force explicit choices: "Choose A, B, or C"
395
+ - Use tracking: TodoWrite for checklists
396
+
397
+ **When to use:**
398
+
399
+ - Ensuring skills are actually followed
400
+ - Multi-step processes
401
+ - Accountability mechanisms
402
+
403
+ **Example:**
404
+
405
+ ```markdown
406
+ ✅ When you find a skill, you MUST announce: "I'm using [Skill Name]"
407
+ ❌ Consider letting your partner know which skill you're using.
408
+ ```
409
+
410
+ ### 3. Scarcity
411
+
412
+ **What it is:** Urgency from time limits or limited availability.
413
+
414
+ **How it works in prompts:**
415
+
416
+ - Time-bound requirements: "Before proceeding"
417
+ - Sequential dependencies: "Immediately after X"
418
+ - Prevents procrastination
419
+
420
+ **When to use:**
421
+
422
+ - Immediate verification requirements
423
+ - Time-sensitive workflows
424
+ - Preventing "I'll do it later"
425
+
426
+ **Example:**
427
+
428
+ ```markdown
429
+ ✅ After completing a task, IMMEDIATELY request code review before proceeding.
430
+ ❌ You can review code when convenient.
431
+ ```
432
+
433
+ ### 4. Social Proof
434
+
435
+ **What it is:** Conformity to what others do or what's considered normal.
436
+
437
+ **How it works in prompts:**
438
+
439
+ - Universal patterns: "Every time", "Always"
440
+ - Failure modes: "X without Y = failure"
441
+ - Establishes norms
442
+
443
+ **When to use:**
444
+
445
+ - Documenting universal practices
446
+ - Warning about common failures
447
+ - Reinforcing standards
448
+
449
+ **Example:**
450
+
451
+ ```markdown
452
+ ✅ Checklists without TodoWrite tracking = steps get skipped. Every time.
453
+ ❌ Some people find TodoWrite helpful for checklists.
454
+ ```
455
+
456
+ ### 5. Unity
457
+
458
+ **What it is:** Shared identity, "we-ness", in-group belonging.
459
+
460
+ **How it works in prompts:**
461
+
462
+ - Collaborative language: "our codebase", "we're colleagues"
463
+ - Shared goals: "we both want quality"
464
+
465
+ **When to use:**
466
+
467
+ - Collaborative workflows
468
+ - Establishing team culture
469
+ - Non-hierarchical practices
470
+
471
+ **Example:**
472
+
473
+ ```markdown
474
+ ✅ We're colleagues working together. I need your honest technical judgment.
475
+ ❌ You should probably tell me if I'm wrong.
476
+ ```
477
+
478
+ ### 6. Reciprocity
479
+
480
+ **What it is:** Obligation to return benefits received.
481
+
482
+ **How it works:**
483
+
484
+ - Use sparingly - can feel manipulative
485
+ - Rarely needed in prompts
486
+
487
+ **When to avoid:**
488
+
489
+ - Almost always (other principles more effective)
490
+
491
+ ### 7. Liking
492
+
493
+ **What it is:** Preference for cooperating with those we like.
494
+
495
+ **How it works:**
496
+
497
+ - **DON'T USE for compliance**
498
+ - Conflicts with honest feedback culture
499
+ - Creates sycophancy
500
+
501
+ **When to avoid:**
502
+
503
+ - Always for discipline enforcement
504
+
505
+ ## Principle Combinations by Prompt Type
506
+
507
+ | Prompt Type | Use | Avoid |
508
+ |------------|-----|-------|
509
+ | Discipline-enforcing | Authority + Commitment + Social Proof | Liking, Reciprocity |
510
+ | Guidance/technique | Moderate Authority + Unity | Heavy authority |
511
+ | Collaborative | Unity + Commitment | Authority, Liking |
512
+ | Reference | Clarity only | All persuasion |
513
+
514
+ ## Why This Works: The Psychology
515
+
516
+ **Bright-line rules reduce rationalization:**
517
+
518
+ - "YOU MUST" removes decision fatigue
519
+ - Absolute language eliminates "is this an exception?" questions
520
+ - Explicit anti-rationalization counters close specific loopholes
521
+
522
+ **Implementation intentions create automatic behavior:**
523
+
524
+ - Clear triggers + required actions = automatic execution
525
+ - "When X, do Y" more effective than "generally do Y"
526
+ - Reduces cognitive load on compliance
527
+
528
+ **LLMs are parahuman:**
529
+
530
+ - Trained on human text containing these patterns
531
+ - Authority language precedes compliance in training data
532
+ - Commitment sequences (statement → action) frequently modeled
533
+ - Social proof patterns (everyone does X) establish norms
534
+
535
+ ## Ethical Use
536
+
537
+ **Legitimate:**
538
+
539
+ - Ensuring critical practices are followed
540
+ - Creating effective documentation
541
+ - Preventing predictable failures
542
+
543
+ **Illegitimate:**
544
+
545
+ - Manipulating for personal gain
546
+ - Creating false urgency
547
+ - Guilt-based compliance
548
+
549
+ **The test:** Would this technique serve the user's genuine interests if they fully understood it?
550
+
551
+ ## Quick Reference
552
+
553
+ When designing a prompt, ask:
554
+
555
+ 1. **What type is it?** (Discipline vs. guidance vs. reference)
556
+ 2. **What behavior am I trying to change?**
557
+ 3. **Which principle(s) apply?** (Usually authority + commitment for discipline)
558
+ 4. **Am I combining too many?** (Don't use all seven)
559
+ 5. **Is this ethical?** (Serves user's genuine interests?)
@@ -0,0 +1,17 @@
1
+ {
2
+ "name": "cek-prompt-engineering",
3
+ "version": "1.0.0",
4
+ "description": "Prompt engineering patterns for commands, hooks, skills, and subagents",
5
+ "triggers": [
6
+ "prompt",
7
+ "prompt engineering",
8
+ "optimize prompt",
9
+ "rewrite prompt",
10
+ "instruction design"
11
+ ],
12
+ "applicable_agents": [
13
+ "critic"
14
+ ],
15
+ "max_context_tokens": 2200,
16
+ "entry_file": "SKILL.md"
17
+ }