gobby 0.2.6__py3-none-any.whl → 0.2.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (146) hide show
  1. gobby/__init__.py +1 -1
  2. gobby/adapters/__init__.py +2 -1
  3. gobby/adapters/codex_impl/__init__.py +28 -0
  4. gobby/adapters/codex_impl/adapter.py +722 -0
  5. gobby/adapters/codex_impl/client.py +679 -0
  6. gobby/adapters/codex_impl/protocol.py +20 -0
  7. gobby/adapters/codex_impl/types.py +68 -0
  8. gobby/agents/definitions.py +11 -1
  9. gobby/agents/isolation.py +395 -0
  10. gobby/agents/sandbox.py +261 -0
  11. gobby/agents/spawn.py +42 -287
  12. gobby/agents/spawn_executor.py +385 -0
  13. gobby/agents/spawners/__init__.py +24 -0
  14. gobby/agents/spawners/command_builder.py +189 -0
  15. gobby/agents/spawners/embedded.py +21 -2
  16. gobby/agents/spawners/headless.py +21 -2
  17. gobby/agents/spawners/prompt_manager.py +125 -0
  18. gobby/cli/install.py +4 -4
  19. gobby/cli/installers/claude.py +6 -0
  20. gobby/cli/installers/gemini.py +6 -0
  21. gobby/cli/installers/shared.py +103 -4
  22. gobby/cli/sessions.py +1 -1
  23. gobby/cli/utils.py +9 -2
  24. gobby/config/__init__.py +12 -97
  25. gobby/config/app.py +10 -94
  26. gobby/config/extensions.py +2 -2
  27. gobby/config/features.py +7 -130
  28. gobby/config/tasks.py +4 -28
  29. gobby/hooks/__init__.py +0 -13
  30. gobby/hooks/event_handlers.py +45 -2
  31. gobby/hooks/hook_manager.py +2 -2
  32. gobby/hooks/plugins.py +1 -1
  33. gobby/hooks/webhooks.py +1 -1
  34. gobby/llm/resolver.py +3 -2
  35. gobby/mcp_proxy/importer.py +62 -4
  36. gobby/mcp_proxy/instructions.py +2 -0
  37. gobby/mcp_proxy/registries.py +1 -4
  38. gobby/mcp_proxy/services/recommendation.py +43 -11
  39. gobby/mcp_proxy/tools/agents.py +31 -731
  40. gobby/mcp_proxy/tools/clones.py +0 -385
  41. gobby/mcp_proxy/tools/memory.py +2 -2
  42. gobby/mcp_proxy/tools/sessions/__init__.py +14 -0
  43. gobby/mcp_proxy/tools/sessions/_commits.py +232 -0
  44. gobby/mcp_proxy/tools/sessions/_crud.py +253 -0
  45. gobby/mcp_proxy/tools/sessions/_factory.py +63 -0
  46. gobby/mcp_proxy/tools/sessions/_handoff.py +499 -0
  47. gobby/mcp_proxy/tools/sessions/_messages.py +138 -0
  48. gobby/mcp_proxy/tools/skills/__init__.py +14 -29
  49. gobby/mcp_proxy/tools/spawn_agent.py +417 -0
  50. gobby/mcp_proxy/tools/tasks/_lifecycle.py +52 -18
  51. gobby/mcp_proxy/tools/tasks/_lifecycle_validation.py +1 -1
  52. gobby/mcp_proxy/tools/worktrees.py +0 -343
  53. gobby/memory/ingestion/__init__.py +5 -0
  54. gobby/memory/ingestion/multimodal.py +221 -0
  55. gobby/memory/manager.py +62 -283
  56. gobby/memory/search/__init__.py +10 -0
  57. gobby/memory/search/coordinator.py +248 -0
  58. gobby/memory/services/__init__.py +5 -0
  59. gobby/memory/services/crossref.py +142 -0
  60. gobby/prompts/loader.py +5 -2
  61. gobby/servers/http.py +1 -4
  62. gobby/servers/routes/admin.py +14 -0
  63. gobby/servers/routes/mcp/endpoints/__init__.py +61 -0
  64. gobby/servers/routes/mcp/endpoints/discovery.py +405 -0
  65. gobby/servers/routes/mcp/endpoints/execution.py +568 -0
  66. gobby/servers/routes/mcp/endpoints/registry.py +378 -0
  67. gobby/servers/routes/mcp/endpoints/server.py +304 -0
  68. gobby/servers/routes/mcp/hooks.py +1 -1
  69. gobby/servers/routes/mcp/tools.py +48 -1506
  70. gobby/sessions/lifecycle.py +1 -1
  71. gobby/sessions/processor.py +10 -0
  72. gobby/sessions/transcripts/base.py +1 -0
  73. gobby/sessions/transcripts/claude.py +15 -5
  74. gobby/skills/parser.py +30 -2
  75. gobby/storage/migrations.py +159 -372
  76. gobby/storage/sessions.py +43 -7
  77. gobby/storage/skills.py +37 -4
  78. gobby/storage/tasks/_lifecycle.py +18 -3
  79. gobby/sync/memories.py +1 -1
  80. gobby/tasks/external_validator.py +1 -1
  81. gobby/tasks/validation.py +22 -20
  82. gobby/tools/summarizer.py +91 -10
  83. gobby/utils/project_context.py +2 -3
  84. gobby/utils/status.py +13 -0
  85. gobby/workflows/actions.py +221 -1217
  86. gobby/workflows/artifact_actions.py +31 -0
  87. gobby/workflows/autonomous_actions.py +11 -0
  88. gobby/workflows/context_actions.py +50 -1
  89. gobby/workflows/enforcement/__init__.py +47 -0
  90. gobby/workflows/enforcement/blocking.py +269 -0
  91. gobby/workflows/enforcement/commit_policy.py +283 -0
  92. gobby/workflows/enforcement/handlers.py +269 -0
  93. gobby/workflows/enforcement/task_policy.py +542 -0
  94. gobby/workflows/git_utils.py +106 -0
  95. gobby/workflows/llm_actions.py +30 -0
  96. gobby/workflows/mcp_actions.py +20 -1
  97. gobby/workflows/memory_actions.py +80 -0
  98. gobby/workflows/safe_evaluator.py +183 -0
  99. gobby/workflows/session_actions.py +44 -0
  100. gobby/workflows/state_actions.py +60 -1
  101. gobby/workflows/stop_signal_actions.py +55 -0
  102. gobby/workflows/summary_actions.py +94 -1
  103. gobby/workflows/task_sync_actions.py +347 -0
  104. gobby/workflows/todo_actions.py +34 -1
  105. gobby/workflows/webhook_actions.py +185 -0
  106. {gobby-0.2.6.dist-info → gobby-0.2.7.dist-info}/METADATA +6 -1
  107. {gobby-0.2.6.dist-info → gobby-0.2.7.dist-info}/RECORD +111 -111
  108. {gobby-0.2.6.dist-info → gobby-0.2.7.dist-info}/WHEEL +1 -1
  109. gobby/adapters/codex.py +0 -1332
  110. gobby/install/claude/commands/gobby/bug.md +0 -51
  111. gobby/install/claude/commands/gobby/chore.md +0 -51
  112. gobby/install/claude/commands/gobby/epic.md +0 -52
  113. gobby/install/claude/commands/gobby/eval.md +0 -235
  114. gobby/install/claude/commands/gobby/feat.md +0 -49
  115. gobby/install/claude/commands/gobby/nit.md +0 -52
  116. gobby/install/claude/commands/gobby/ref.md +0 -52
  117. gobby/mcp_proxy/tools/session_messages.py +0 -1055
  118. gobby/prompts/defaults/expansion/system.md +0 -119
  119. gobby/prompts/defaults/expansion/user.md +0 -48
  120. gobby/prompts/defaults/external_validation/agent.md +0 -72
  121. gobby/prompts/defaults/external_validation/external.md +0 -63
  122. gobby/prompts/defaults/external_validation/spawn.md +0 -83
  123. gobby/prompts/defaults/external_validation/system.md +0 -6
  124. gobby/prompts/defaults/features/import_mcp.md +0 -22
  125. gobby/prompts/defaults/features/import_mcp_github.md +0 -17
  126. gobby/prompts/defaults/features/import_mcp_search.md +0 -16
  127. gobby/prompts/defaults/features/recommend_tools.md +0 -32
  128. gobby/prompts/defaults/features/recommend_tools_hybrid.md +0 -35
  129. gobby/prompts/defaults/features/recommend_tools_llm.md +0 -30
  130. gobby/prompts/defaults/features/server_description.md +0 -20
  131. gobby/prompts/defaults/features/server_description_system.md +0 -6
  132. gobby/prompts/defaults/features/task_description.md +0 -31
  133. gobby/prompts/defaults/features/task_description_system.md +0 -6
  134. gobby/prompts/defaults/features/tool_summary.md +0 -17
  135. gobby/prompts/defaults/features/tool_summary_system.md +0 -6
  136. gobby/prompts/defaults/handoff/compact.md +0 -63
  137. gobby/prompts/defaults/handoff/session_end.md +0 -57
  138. gobby/prompts/defaults/memory/extract.md +0 -61
  139. gobby/prompts/defaults/research/step.md +0 -58
  140. gobby/prompts/defaults/validation/criteria.md +0 -47
  141. gobby/prompts/defaults/validation/validate.md +0 -38
  142. gobby/storage/migrations_legacy.py +0 -1359
  143. gobby/workflows/task_enforcement_actions.py +0 -1343
  144. {gobby-0.2.6.dist-info → gobby-0.2.7.dist-info}/entry_points.txt +0 -0
  145. {gobby-0.2.6.dist-info → gobby-0.2.7.dist-info}/licenses/LICENSE.md +0 -0
  146. {gobby-0.2.6.dist-info → gobby-0.2.7.dist-info}/top_level.txt +0 -0
@@ -1,51 +0,0 @@
1
- ---
2
- name: bug
3
- description: Quickly create a bug task. Usage: /bug <title> [description]
4
- ---
5
-
6
- # /bug - Create Bug Task
7
-
8
- Create a bug/defect task with the provided title and optional description.
9
-
10
- ## Usage
11
-
12
- ```text
13
- /bug <title>
14
- /bug <title> - <description>
15
- ```
16
-
17
- ## Examples
18
-
19
- ```text
20
- /bug Fix login timeout
21
- /bug Database connection drops - Users report intermittent connection failures after 5 minutes of inactivity
22
- ```
23
-
24
- ## Action
25
-
26
- Call `gobby-tasks.create_task` with:
27
-
28
- - `title`: The bug title from user input
29
- - `task_type`: "bug"
30
- - `priority`: 1 (high - bugs are important)
31
-
32
- Parse the user input:
33
-
34
- - If input contains " - ", split into title and description
35
- - Otherwise, use entire input as title
36
-
37
- ```python
38
- call_tool(
39
- server_name="gobby-tasks",
40
- tool_name="create_task",
41
- arguments={
42
- "title": "<parsed title>",
43
- "description": "<parsed description if any>",
44
- "task_type": "bug",
45
- "priority": 1,
46
- "session_id": "<session_id>" # Required - from session context
47
- }
48
- )
49
- ```
50
-
51
- After creating, confirm with the task reference (e.g., "Created bug #123: Fix login timeout").
@@ -1,51 +0,0 @@
1
- ---
2
- name: chore
3
- description: Quickly create a chore/maintenance task. Usage: /chore <title> [description]
4
- ---
5
-
6
- # /chore - Create Chore Task
7
-
8
- Create a maintenance or housekeeping task with the provided title and optional description. For tasks that keep the codebase healthy but aren't features or bugs.
9
-
10
- ## Usage
11
-
12
- ```
13
- /chore <title>
14
- /chore <title> - <description>
15
- ```
16
-
17
- ## Examples
18
-
19
- ```
20
- /chore Update dependencies
21
- /chore Clean up CI pipeline - Remove deprecated jobs and consolidate test stages
22
- /chore Add missing type hints to utils module
23
- /chore Rotate API keys
24
- ```
25
-
26
- ## Action
27
-
28
- Call `gobby-tasks.create_task` with:
29
- - `title`: The chore title from user input
30
- - `task_type`: "chore"
31
- - `priority`: 3 (low - maintenance tasks are important but rarely urgent)
32
-
33
- Parse the user input:
34
- - If input contains " - ", split into title and description
35
- - Otherwise, use entire input as title
36
-
37
- ```python
38
- call_tool(
39
- server_name="gobby-tasks",
40
- tool_name="create_task",
41
- arguments={
42
- "title": "<parsed title>",
43
- "description": "<parsed description if any>",
44
- "task_type": "chore",
45
- "priority": 3,
46
- "session_id": "<session_id>" # Required - from session context
47
- }
48
- )
49
- ```
50
-
51
- After creating, confirm with the task reference (e.g., "Created chore #128: Update dependencies").
@@ -1,52 +0,0 @@
1
- ---
2
- name: epic
3
- description: Quickly create an epic (parent task for large features). Usage: /epic <title> [description]
4
- ---
5
-
6
- # /epic - Create Epic Task
7
-
8
- Create an epic task - a parent container for a large feature or initiative that will be broken down into subtasks.
9
-
10
- ## Usage
11
-
12
- ```
13
- /epic <title>
14
- /epic <title> - <description>
15
- ```
16
-
17
- ## Examples
18
-
19
- ```
20
- /epic User authentication system
21
- /epic API v2 migration - Migrate all endpoints from REST to GraphQL with backwards compatibility
22
- /epic Performance optimization sprint
23
- ```
24
-
25
- ## Action
26
-
27
- Call `gobby-tasks.create_task` with:
28
- - `title`: The epic title from user input
29
- - `task_type`: "epic"
30
- - `priority`: 2 (medium - epics are tracked but individual subtasks drive priority)
31
-
32
- Parse the user input:
33
- - If input contains " - ", split into title and description
34
- - Otherwise, use entire input as title
35
-
36
- ```python
37
- call_tool(
38
- server_name="gobby-tasks",
39
- tool_name="create_task",
40
- arguments={
41
- "title": "<parsed title>",
42
- "description": "<parsed description if any>",
43
- "task_type": "epic",
44
- "priority": 2,
45
- "session_id": "<session_id>" # Required - from session context
46
- }
47
- )
48
- ```
49
-
50
- After creating, confirm with the task reference and suggest next steps:
51
- - "Created epic #127: User authentication system"
52
- - "Use `expand_task` to break this down into subtasks."
@@ -1,235 +0,0 @@
1
- ---
2
- name: eval
3
- description: This skill should be used when the user asks to "evaluate agent performance", "build test framework", "measure agent quality", "create evaluation rubrics", or mentions LLM-as-judge, multi-dimensional evaluation, agent testing, or quality gates for agent pipelines.
4
- ---
5
-
6
- # Evaluation Methods for Agent Systems
7
-
8
- Evaluation of agent systems requires different approaches than traditional software or even standard language model applications. Agents make dynamic decisions, are non-deterministic between runs, and often lack single correct answers. Effective evaluation must account for these characteristics while providing actionable feedback. A robust evaluation framework enables continuous improvement, catches regressions, and validates that context engineering choices achieve intended effects.
9
-
10
- ## When to Activate
11
-
12
- Activate this skill when:
13
-
14
- - Testing agent performance systematically
15
- - Validating context engineering choices
16
- - Measuring improvements over time
17
- - Catching regressions before deployment
18
- - Building quality gates for agent pipelines
19
- - Comparing different agent configurations
20
- - Evaluating production systems continuously
21
-
22
- ## Core Concepts
23
-
24
- Agent evaluation requires outcome-focused approaches that account for non-determinism and multiple valid paths. Multi-dimensional rubrics capture various quality aspects: factual accuracy, completeness, citation accuracy, source quality, and tool efficiency. LLM-as-judge provides scalable evaluation while human evaluation catches edge cases.
25
-
26
- The key insight is that agents may find alternative paths to goals—the evaluation should judge whether they achieve right outcomes while following reasonable processes.
27
-
28
- **Performance Drivers: The 95% Finding**
29
- Research on the BrowseComp evaluation (which tests browsing agents' ability to locate hard-to-find information) found that three factors explain 95% of performance variance:
30
-
31
- | Factor | Variance Explained | Implication |
32
- | :--- | :--- | :--- |
33
- | Token usage | 80% | More tokens = better performance |
34
- | Number of tool calls | ~10% | More exploration helps |
35
- | Model choice | ~5% | Better models multiply efficiency |
36
-
37
- This finding has significant implications for evaluation design:
38
-
39
- - **Token budgets matter**: Evaluate agents with realistic token budgets, not unlimited resources
40
- - **Model upgrades beat token increases**: Upgrading to Claude Sonnet 4.5 or GPT-5.2 provides larger gains than doubling token budgets on previous versions
41
- - **Multi-agent validation**: The finding validates architectures that distribute work across agents with separate context windows
42
-
43
- ## Detailed Topics
44
-
45
- ### Evaluation Challenges
46
-
47
- **Non-Determinism and Multiple Valid Paths**
48
- Agents may take completely different valid paths to reach goals. One agent might search three sources while another searches ten. They might use different tools to find the same answer. Traditional evaluations that check for specific steps fail in this context.
49
-
50
- The solution is outcome-focused evaluation that judges whether agents achieve right outcomes while following reasonable processes.
51
-
52
- **Context-Dependent Failures**
53
- Agent failures often depend on context in subtle ways. An agent might succeed on simple queries but fail on complex ones. It might work well with one tool set but fail with another. Failures may emerge only after extended interaction when context accumulates.
54
-
55
- Evaluation must cover a range of complexity levels and test extended interactions, not just isolated queries.
56
-
57
- **Composite Quality Dimensions**
58
- Agent quality is not a single dimension. It includes factual accuracy, completeness, coherence, tool efficiency, and process quality. An agent might score high on accuracy but low in efficiency, or vice versa.
59
-
60
- Evaluation rubrics must capture multiple dimensions with appropriate weighting for the use case.
61
-
62
- ### Evaluation Rubric Design
63
-
64
- **Multi-Dimensional Rubric**
65
- Effective rubrics cover key dimensions with descriptive levels:
66
-
67
- Factual accuracy: Claims match ground truth (excellent to failed)
68
-
69
- Completeness: Output covers requested aspects (excellent to failed)
70
-
71
- Citation accuracy: Citations match claimed sources (excellent to failed)
72
-
73
- Source quality: Uses appropriate primary sources (excellent to failed)
74
-
75
- Tool efficiency: Uses right tools reasonable number of times (excellent to failed)
76
-
77
- **Rubric Scoring**
78
- Convert dimension assessments to numeric scores (0.0 to 1.0) with appropriate weighting. Calculate weighted overall scores. Determine passing threshold based on use case requirements.
79
-
80
- ### Evaluation Methodologies
81
-
82
- **LLM-as-Judge**
83
- LLM-based evaluation scales to large test sets and provides consistent judgments. The key is designing effective evaluation prompts that capture the dimensions of interest.
84
-
85
- Provide clear task description, agent output, ground truth (if available), evaluation scale with level descriptions, and request structured judgment.
86
-
87
- **Human Evaluation**
88
- Human evaluation catches what automation misses. Humans notice hallucinated answers on unusual queries, system failures, and subtle biases that automated evaluation misses.
89
-
90
- Effective human evaluation covers edge cases, samples systematically, tracks patterns, and provides contextual understanding.
91
-
92
- **End-State Evaluation**
93
- For agents that mutate persistent state, end-state evaluation focuses on whether the final state matches expectations rather than how the agent got there.
94
-
95
- ### Test Set Design
96
-
97
- **Sample Selection**
98
- Start with small samples during development. Early in agent development, changes have dramatic impacts because there is abundant low-hanging fruit. Small test sets reveal large effects.
99
-
100
- Sample from real usage patterns. Add known edge cases. Ensure coverage across complexity levels.
101
-
102
- **Complexity Stratification**
103
- Test sets should span complexity levels: simple (single tool call), medium (multiple tool calls), complex (many tool calls, significant ambiguity), and very complex (extended interaction, deep reasoning).
104
-
105
- ### Context Engineering Evaluation
106
-
107
- **Testing Context Strategies**
108
- Context engineering choices should be validated through systematic evaluation. Run agents with different context strategies on the same test set. Compare quality scores, token usage, and efficiency metrics.
109
-
110
- **Degradation Testing**
111
- Test how context degradation affects performance by running agents at different context sizes. Identify performance cliffs where context becomes problematic. Establish safe operating limits.
112
-
113
- ### Continuous Evaluation
114
-
115
- **Evaluation Pipeline**
116
- Build evaluation pipelines that run automatically on agent changes. Track results over time. Compare versions to identify improvements or regressions.
117
-
118
- **Monitoring Production**
119
- Track evaluation metrics in production by sampling interactions and evaluating randomly. Set alerts for quality drops. Maintain dashboards for trend analysis.
120
-
121
- ## Practical Guidance
122
-
123
- ### Building Evaluation Frameworks
124
-
125
- 1. Define quality dimensions relevant to your use case
126
- 2. Create rubrics with clear, actionable level descriptions
127
- 3. Build test sets from real usage patterns and edge cases
128
- 4. Implement automated evaluation pipelines
129
- 5. Establish baseline metrics before making changes
130
- 6. Run evaluations on all significant changes
131
- 7. Track metrics over time for trend analysis
132
- 8. Supplement automated evaluation with human review
133
-
134
- ### Avoiding Evaluation Pitfalls
135
-
136
- Overfitting to specific paths: Evaluate outcomes, not specific steps.
137
- Ignoring edge cases: Include diverse test scenarios.
138
- Single-metric obsession: Use multi-dimensional rubrics.
139
- Neglecting context effects: Test with realistic context sizes.
140
- Skipping human evaluation: Automated evaluation misses subtle issues.
141
-
142
- ## Examples
143
-
144
- ### Example 1: Simple Evaluation
145
-
146
- ```python
147
- def evaluate_agent_response(response, expected):
148
- rubric = load_rubric()
149
- scores = {}
150
- for dimension, config in rubric.items():
151
- scores[dimension] = assess_dimension(response, expected, dimension)
152
- overall = weighted_average(scores, config["weights"])
153
- return {"passed": overall >= 0.7, "scores": scores}
154
- ```
155
-
156
- ### Example 2: Test Set Structure
157
-
158
- Test sets should span multiple complexity levels to ensure comprehensive evaluation:
159
-
160
- ```python
161
- test_set = [
162
- {
163
- "name": "simple_lookup",
164
- "input": "What is the capital of France?",
165
- "expected": {"type": "fact", "answer": "Paris"},
166
- "complexity": "simple",
167
- "description": "Single tool call, factual lookup"
168
- },
169
- {
170
- "name": "medium_query",
171
- "input": "Compare the revenue of Apple and Microsoft last quarter",
172
- "complexity": "medium",
173
- "description": "Multiple tool calls, comparison logic"
174
- },
175
- {
176
- "name": "multi_step_reasoning",
177
- "input": "Analyze sales data from Q1-Q4 and create a summary report with trends",
178
- "complexity": "complex",
179
- "description": "Many tool calls, aggregation, analysis"
180
- },
181
- {
182
- "name": "research_synthesis",
183
- "input": "Research emerging AI technologies, evaluate their potential impact, and recommend adoption strategy",
184
- "complexity": "very_complex",
185
- "description": "Extended interaction, deep reasoning, synthesis"
186
- }
187
- ]
188
- ```
189
-
190
- ## Guidelines
191
-
192
- 1. Use multi-dimensional rubrics, not single metrics
193
- 2. Evaluate outcomes, not specific execution paths
194
- 3. Cover complexity levels from simple to complex
195
- 4. Test with realistic context sizes and histories
196
- 5. Run evaluations continuously, not just before release
197
- 6. Supplement LLM evaluation with human review
198
- 7. Track metrics over time for trend detection
199
- 8. Set clear pass/fail thresholds based on use case
200
-
201
- ## Integration
202
-
203
- This skill connects to all other skills as a cross-cutting concern:
204
-
205
- - context-fundamentals - Evaluating context usage
206
- - context-degradation - Detecting degradation
207
- - context-optimization - Measuring optimization effectiveness
208
- - multi-agent-patterns - Evaluating coordination
209
- - tool-design - Evaluating tool effectiveness
210
- - memory-systems - Evaluating memory quality
211
-
212
- ## References
213
-
214
- Internal reference:
215
-
216
- - [Metrics Reference](./references/metrics.md) - Detailed evaluation metrics and implementation
217
-
218
- Internal skills:
219
-
220
- - All other skills connect to evaluation for quality measurement
221
-
222
- External resources:
223
-
224
- - LLM evaluation benchmarks
225
- - Agent evaluation research papers
226
- - Production monitoring practices
227
-
228
- ---
229
-
230
- ## Skill Metadata
231
-
232
- **Created**: 2025-12-20
233
- **Last Updated**: 2025-12-20
234
- **Author**: Agent Skills for Context Engineering Contributors
235
- **Version**: 1.0.0
@@ -1,49 +0,0 @@
1
- ---
2
- name: feat
3
- description: Quickly create a feature task. Usage: /feat <title> [description]
4
- ---
5
-
6
- # /feat - Create Feature Task
7
-
8
- Create a new feature task with the provided title and optional description.
9
-
10
- ## Usage
11
-
12
- ```
13
- /feat <title>
14
- /feat <title> - <description>
15
- ```
16
-
17
- ## Examples
18
-
19
- ```
20
- /feat Add dark mode toggle
21
- /feat User profile avatars - Allow users to upload custom profile pictures with cropping support
22
- ```
23
-
24
- ## Action
25
-
26
- Call `gobby-tasks.create_task` with:
27
- - `title`: The feature title from user input
28
- - `task_type`: "feature"
29
- - `priority`: 2 (medium - standard priority for new features)
30
-
31
- Parse the user input:
32
- - If input contains " - ", split into title and description
33
- - Otherwise, use entire input as title
34
-
35
- ```python
36
- call_tool(
37
- server_name="gobby-tasks",
38
- tool_name="create_task",
39
- arguments={
40
- "title": "<parsed title>",
41
- "description": "<parsed description if any>",
42
- "task_type": "feature",
43
- "priority": 2,
44
- "session_id": "<session_id>" # Required - from session context
45
- }
46
- )
47
- ```
48
-
49
- After creating, confirm with the task reference (e.g., "Created feature #124: Add dark mode toggle").
@@ -1,52 +0,0 @@
1
- ---
2
- name: nit
3
- description: Quickly create a nitpick/minor cleanup task. Usage: /nit <title> [description]
4
- ---
5
-
6
- # /nit - Create Nitpick Task
7
-
8
- Create a minor cleanup or nitpick task with the provided title and optional description. These are small improvements that don't warrant a full feature or bug.
9
-
10
- ## Usage
11
-
12
- ```
13
- /nit <title>
14
- /nit <title> - <description>
15
- ```
16
-
17
- ## Examples
18
-
19
- ```
20
- /nit Rename confusing variable
21
- /nit Fix typo in error message - "authentification" should be "authentication"
22
- /nit Remove unused import in utils.py
23
- ```
24
-
25
- ## Action
26
-
27
- Call `gobby-tasks.create_task` with:
28
- - `title`: The nitpick title from user input
29
- - `task_type`: "chore"
30
- - `labels`: ["nitpick"]
31
- - `priority`: 4 (backlog - low priority, do when convenient)
32
-
33
- Parse the user input:
34
- - If input contains " - ", split into title and description
35
- - Otherwise, use entire input as title
36
-
37
- ```python
38
- call_tool(
39
- server_name="gobby-tasks",
40
- tool_name="create_task",
41
- arguments={
42
- "title": "<parsed title>",
43
- "description": "<parsed description if any>",
44
- "task_type": "chore",
45
- "labels": ["nitpick"],
46
- "priority": 4,
47
- "session_id": "<session_id>" # Required - from session context
48
- }
49
- )
50
- ```
51
-
52
- After creating, confirm with the task reference (e.g., "Created nitpick #125: Rename confusing variable").
@@ -1,52 +0,0 @@
1
- ---
2
- name: ref
3
- description: Quickly create a refactoring task. Usage: /ref <title> [description]
4
- ---
5
-
6
- # /ref - Create Refactoring Task
7
-
8
- Create a refactoring task with the provided title and optional description. For code improvements that don't change behavior.
9
-
10
- ## Usage
11
-
12
- ```
13
- /ref <title>
14
- /ref <title> - <description>
15
- ```
16
-
17
- ## Examples
18
-
19
- ```
20
- /ref Extract database logic into repository class
21
- /ref Simplify authentication middleware - Current implementation has too many nested conditionals
22
- /ref Convert callbacks to async/await in file handlers
23
- ```
24
-
25
- ## Action
26
-
27
- Call `gobby-tasks.create_task` with:
28
- - `title`: The refactoring title from user input
29
- - `task_type`: "chore"
30
- - `labels`: ["refactor"]
31
- - `priority`: 3 (low - important but not urgent)
32
-
33
- Parse the user input:
34
- - If input contains " - ", split into title and description
35
- - Otherwise, use entire input as title
36
-
37
- ```python
38
- call_tool(
39
- server_name="gobby-tasks",
40
- tool_name="create_task",
41
- arguments={
42
- "title": "<parsed title>",
43
- "description": "<parsed description if any>",
44
- "task_type": "chore",
45
- "labels": ["refactor"],
46
- "priority": 3,
47
- "session_id": "<session_id>" # Required - from session context
48
- }
49
- )
50
- ```
51
-
52
- After creating, confirm with the task reference (e.g., "Created refactor #126: Extract database logic into repository class").