@yeyuan98/opencode-bioresearcher-plugin 1.3.1-alpha.1 → 1.4.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +14 -0
- package/dist/index.js +4 -1
- package/dist/misc-tools/index.d.ts +3 -0
- package/dist/misc-tools/index.js +3 -0
- package/dist/misc-tools/json-extract.d.ts +13 -0
- package/dist/misc-tools/json-extract.js +394 -0
- package/dist/misc-tools/json-infer.d.ts +13 -0
- package/dist/misc-tools/json-infer.js +199 -0
- package/dist/misc-tools/json-tools.d.ts +33 -0
- package/dist/misc-tools/json-tools.js +187 -0
- package/dist/misc-tools/json-validate.d.ts +13 -0
- package/dist/misc-tools/json-validate.js +228 -0
- package/dist/skills/bioresearcher-core/README.md +210 -0
- package/dist/skills/bioresearcher-core/SKILL.md +128 -0
- package/dist/skills/bioresearcher-core/examples/contexts.json +29 -0
- package/dist/skills/bioresearcher-core/examples/data-exchange-example.md +303 -0
- package/dist/skills/bioresearcher-core/examples/template.md +49 -0
- package/dist/skills/bioresearcher-core/patterns/calculator.md +215 -0
- package/dist/skills/bioresearcher-core/patterns/data-exchange.md +406 -0
- package/dist/skills/bioresearcher-core/patterns/json-tools.md +263 -0
- package/dist/skills/bioresearcher-core/patterns/progress.md +127 -0
- package/dist/skills/bioresearcher-core/patterns/retry.md +110 -0
- package/dist/skills/bioresearcher-core/patterns/shell-commands.md +79 -0
- package/dist/skills/bioresearcher-core/patterns/subagent-waves.md +186 -0
- package/dist/skills/bioresearcher-core/patterns/table-tools.md +260 -0
- package/dist/skills/bioresearcher-core/patterns/user-confirmation.md +187 -0
- package/dist/skills/bioresearcher-core/python/template.md +273 -0
- package/dist/skills/bioresearcher-core/python/template.py +323 -0
- package/dist/skills/long-table-summary/SKILL.md +437 -0
- package/dist/skills/long-table-summary/combine_outputs.py +336 -0
- package/dist/skills/long-table-summary/generate_prompts.py +211 -0
- package/dist/skills/long-table-summary/pyproject.toml +8 -0
- package/dist/skills/pubmed-weekly/SKILL.md +329 -329
- package/dist/skills/pubmed-weekly/pubmed_weekly.py +411 -411
- package/dist/skills/pubmed-weekly/pyproject.toml +8 -8
- package/package.json +7 -2
|
@@ -0,0 +1,215 @@
|
|
|
1
|
+
# Calculator Pattern
|
|
2
|
+
|
|
3
|
+
In-workflow calculations using the calculator tool.
|
|
4
|
+
|
|
5
|
+
## Overview
|
|
6
|
+
|
|
7
|
+
Use the calculator tool for arithmetic operations in workflows. It's more reliable than manual calculations and provides consistent precision.
|
|
8
|
+
|
|
9
|
+
## Tool: calculator
|
|
10
|
+
|
|
11
|
+
```
|
|
12
|
+
calculator(formula: string, precision: number = 3)
|
|
13
|
+
```
|
|
14
|
+
|
|
15
|
+
### Parameters
|
|
16
|
+
- `formula`: Mathematical expression (string)
|
|
17
|
+
- `precision`: Decimal places for result (0-15, default 3)
|
|
18
|
+
|
|
19
|
+
### Return Format
|
|
20
|
+
```json
|
|
21
|
+
{
|
|
22
|
+
"formula": "(45 / 100) * 100",
|
|
23
|
+
"result": 45
|
|
24
|
+
}
|
|
25
|
+
```
|
|
26
|
+
|
|
27
|
+
### Supported Operations
|
|
28
|
+
|
|
29
|
+
| Operation | Symbol | Example |
|
|
30
|
+
|-----------|--------|---------|
|
|
31
|
+
| Addition | + | `2 + 3` |
|
|
32
|
+
| Subtraction | - | `5 - 2` |
|
|
33
|
+
| Multiplication | * | `3 * 4` |
|
|
34
|
+
| Division | / | `10 / 2` |
|
|
35
|
+
| Power | ^ | `2 ^ 3` |
|
|
36
|
+
| Brackets | () | `(2 + 3) * 4` |
|
|
37
|
+
| Scientific | e/E | `1e5`, `1.5e-3` |
|
|
38
|
+
|
|
39
|
+
### Important Rules
|
|
40
|
+
|
|
41
|
+
1. **MUST use explicit * for multiplication**: `2*(3)` NOT `2(3)`
|
|
42
|
+
2. **Maximum precision**: 15 decimal places
|
|
43
|
+
3. **Default precision**: 3 decimal places
|
|
44
|
+
4. **No functions**: ceil, floor, sqrt not supported
|
|
45
|
+
|
|
46
|
+
## Common Use Cases
|
|
47
|
+
|
|
48
|
+
### Batch Calculations
|
|
49
|
+
|
|
50
|
+
```
|
|
51
|
+
# Calculate number of batches needed
|
|
52
|
+
calculator(formula="ceil(100 / 30)", precision=0)
|
|
53
|
+
# Note: ceil not supported, use workaround:
|
|
54
|
+
calculator(formula="(100 + 30 - 1) / 30", precision=0)
|
|
55
|
+
# Result: 4.333 -> Use ceiling logic in agent
|
|
56
|
+
```
|
|
57
|
+
|
|
58
|
+
### Progress Percentages
|
|
59
|
+
|
|
60
|
+
```
|
|
61
|
+
# Calculate completion percentage
|
|
62
|
+
calculator(formula="(45 / 100) * 100", precision=1)
|
|
63
|
+
# Result: 45
|
|
64
|
+
|
|
65
|
+
# With variables in workflow
|
|
66
|
+
completed = 67
|
|
67
|
+
total = 120
|
|
68
|
+
calculator(formula="({completed} / {total}) * 100", precision=1)
|
|
69
|
+
# Result: 55.8
|
|
70
|
+
```
|
|
71
|
+
|
|
72
|
+
### Time Estimates
|
|
73
|
+
|
|
74
|
+
```
|
|
75
|
+
# Estimate remaining time
|
|
76
|
+
remaining_items = 50
|
|
77
|
+
items_per_minute = 10
|
|
78
|
+
calculator(formula="50 / 10", precision=0)
|
|
79
|
+
# Result: 5 minutes
|
|
80
|
+
```
|
|
81
|
+
|
|
82
|
+
### Data Size Calculations
|
|
83
|
+
|
|
84
|
+
```
|
|
85
|
+
# Calculate total rows across batches
|
|
86
|
+
batch_size = 30
|
|
87
|
+
num_batches = 4
|
|
88
|
+
calculator(formula="30 * 4", precision=0)
|
|
89
|
+
# Result: 120
|
|
90
|
+
```
|
|
91
|
+
|
|
92
|
+
## Ceiling/Floor Workarounds
|
|
93
|
+
|
|
94
|
+
Since ceil/floor are not supported:
|
|
95
|
+
|
|
96
|
+
### Ceiling
|
|
97
|
+
```
|
|
98
|
+
# ceil(a / b) = (a + b - 1) / b (for positive integers)
|
|
99
|
+
# ceil(100 / 30)
|
|
100
|
+
calculator(formula="(100 + 30 - 1) / 30", precision=0)
|
|
101
|
+
# Result: 4.333 -> Agent interprets as 5
|
|
102
|
+
```
|
|
103
|
+
|
|
104
|
+
### Floor
|
|
105
|
+
```
|
|
106
|
+
# floor(a / b) = a / b (truncate decimal)
|
|
107
|
+
# floor(100 / 30)
|
|
108
|
+
calculator(formula="100 / 30", precision=0)
|
|
109
|
+
# Result: 3.333 -> Agent interprets as 3
|
|
110
|
+
```
|
|
111
|
+
|
|
112
|
+
## Example Workflow
|
|
113
|
+
|
|
114
|
+
### Calculate Batch Configuration
|
|
115
|
+
|
|
116
|
+
```
|
|
117
|
+
# Given
|
|
118
|
+
total_rows = 250
|
|
119
|
+
batch_size = 30
|
|
120
|
+
|
|
121
|
+
# Calculate batches needed
|
|
122
|
+
# ceil(250 / 30) = 9 batches
|
|
123
|
+
batches_needed = calculator(
|
|
124
|
+
formula="(250 + 30 - 1) / 30",
|
|
125
|
+
precision=0
|
|
126
|
+
)
|
|
127
|
+
# Result: 9.3 -> Agent rounds up to 10
|
|
128
|
+
|
|
129
|
+
# Calculate rows in last batch
|
|
130
|
+
# 250 - (9 * 30) = 250 - 270 = -20 -> use 30
|
|
131
|
+
# Actually: 250 - (8 * 30) = 250 - 240 = 10
|
|
132
|
+
last_batch_rows = calculator(
|
|
133
|
+
formula="250 - (8 * 30)",
|
|
134
|
+
precision=0
|
|
135
|
+
)
|
|
136
|
+
# Result: 10
|
|
137
|
+
```
|
|
138
|
+
|
|
139
|
+
### Progress Tracking
|
|
140
|
+
|
|
141
|
+
```
|
|
142
|
+
# During batch processing
|
|
143
|
+
completed = 0
|
|
144
|
+
total = 10
|
|
145
|
+
|
|
146
|
+
for batch in batches:
|
|
147
|
+
process(batch)
|
|
148
|
+
completed += 1
|
|
149
|
+
|
|
150
|
+
# Calculate and report progress
|
|
151
|
+
percent = calculator(
|
|
152
|
+
formula="({completed} / {total}) * 100",
|
|
153
|
+
precision=0
|
|
154
|
+
)
|
|
155
|
+
report(f"Progress: {completed}/{total} ({percent}%)")
|
|
156
|
+
```
|
|
157
|
+
|
|
158
|
+
### Wave Timing
|
|
159
|
+
|
|
160
|
+
```
|
|
161
|
+
# Calculate expected completion time
|
|
162
|
+
waves_remaining = 3
|
|
163
|
+
seconds_per_wave = 45
|
|
164
|
+
|
|
165
|
+
estimated_seconds = calculator(
|
|
166
|
+
formula="3 * 45",
|
|
167
|
+
precision=0
|
|
168
|
+
)
|
|
169
|
+
# Result: 135 seconds
|
|
170
|
+
|
|
171
|
+
# Convert to minutes
|
|
172
|
+
estimated_minutes = calculator(
|
|
173
|
+
formula="135 / 60",
|
|
174
|
+
precision=1
|
|
175
|
+
)
|
|
176
|
+
# Result: 2.3 minutes
|
|
177
|
+
```
|
|
178
|
+
|
|
179
|
+
## Error Handling
|
|
180
|
+
|
|
181
|
+
### Invalid Formula
|
|
182
|
+
```
|
|
183
|
+
# Missing explicit multiplication
|
|
184
|
+
calculator(formula="2(3)")
|
|
185
|
+
# Error: "CALCULATOR ERROR: Invalid syntax: parentheses-less multiplication not allowed"
|
|
186
|
+
```
|
|
187
|
+
|
|
188
|
+
### Division by Zero
|
|
189
|
+
```
|
|
190
|
+
calculator(formula="10 / 0")
|
|
191
|
+
# Error: "CALCULATOR ERROR: Division by zero"
|
|
192
|
+
```
|
|
193
|
+
|
|
194
|
+
### Invalid Characters
|
|
195
|
+
```
|
|
196
|
+
calculator(formula="2 + abc")
|
|
197
|
+
# Error: "CALCULATOR ERROR: Formula contains invalid characters: a b c"
|
|
198
|
+
```
|
|
199
|
+
|
|
200
|
+
## Integration with Other Patterns
|
|
201
|
+
|
|
202
|
+
| Pattern | Calculator Usage |
|
|
203
|
+
|---------|-----------------|
|
|
204
|
+
| `progress.md` | Calculate percentages |
|
|
205
|
+
| `retry.md` | Calculate backoff delays |
|
|
206
|
+
| `subagent-waves.md` | Calculate wave counts |
|
|
207
|
+
| `table-tools.md` | Calculate row counts |
|
|
208
|
+
|
|
209
|
+
## Best Practices
|
|
210
|
+
|
|
211
|
+
1. **Use precision=0 for counts**: Integer results
|
|
212
|
+
2. **Use precision=1 for percentages**: One decimal place
|
|
213
|
+
3. **Always use explicit ***: Never implicit multiplication
|
|
214
|
+
4. **Check for division by zero**: Validate divisors
|
|
215
|
+
5. **Document formulas**: Explain what calculation does
|
|
@@ -0,0 +1,406 @@
|
|
|
1
|
+
# Data Exchange Pattern
|
|
2
|
+
|
|
3
|
+
Standardized protocol for data exchange between main agent and subagents.
|
|
4
|
+
|
|
5
|
+
## Overview
|
|
6
|
+
|
|
7
|
+
This pattern ensures reliable communication between main agent and subagents using:
|
|
8
|
+
- File-based prompts with embedded schemas
|
|
9
|
+
- JSON output files with validation
|
|
10
|
+
- Schema-first design for type safety
|
|
11
|
+
|
|
12
|
+
## Data Exchange Protocol
|
|
13
|
+
|
|
14
|
+
```
|
|
15
|
+
Main Agent Subagent
|
|
16
|
+
| |
|
|
17
|
+
|--- Write prompt file ----------->|
|
|
18
|
+
| (with embedded schema) |
|
|
19
|
+
| |
|
|
20
|
+
| |--- Read prompt
|
|
21
|
+
| |--- Process data
|
|
22
|
+
| |--- Write output JSON
|
|
23
|
+
| |
|
|
24
|
+
|<-- Write output file -------------|
|
|
25
|
+
| (JSON matching schema) |
|
|
26
|
+
| |
|
|
27
|
+
|--- jsonExtract ------------------>|
|
|
28
|
+
|--- jsonValidate ----------------->|
|
|
29
|
+
|--- Process validated data |
|
|
30
|
+
```
|
|
31
|
+
|
|
32
|
+
## Main Agent Responsibilities
|
|
33
|
+
|
|
34
|
+
### 1. Create Prompt File with Embedded Schema
|
|
35
|
+
|
|
36
|
+
Include the output schema directly in the prompt:
|
|
37
|
+
|
|
38
|
+
```markdown
|
|
39
|
+
# Task Description
|
|
40
|
+
|
|
41
|
+
Process the data and output results.
|
|
42
|
+
|
|
43
|
+
## Output Format
|
|
44
|
+
|
|
45
|
+
Your output must be valid JSON matching this schema:
|
|
46
|
+
|
|
47
|
+
```json
|
|
48
|
+
{
|
|
49
|
+
"batch_number": <integer>,
|
|
50
|
+
"row_count": <integer>,
|
|
51
|
+
"summaries": [
|
|
52
|
+
{
|
|
53
|
+
"row_number": <integer>,
|
|
54
|
+
"field1": "<string>",
|
|
55
|
+
"field2": "<string>"
|
|
56
|
+
}
|
|
57
|
+
]
|
|
58
|
+
}
|
|
59
|
+
```
|
|
60
|
+
|
|
61
|
+
Write your output to: {output_file}
|
|
62
|
+
|
|
63
|
+
**CRITICAL:** Write ONLY the JSON object, no additional text.
|
|
64
|
+
```
|
|
65
|
+
|
|
66
|
+
### 2. Launch Subagent with File Reference
|
|
67
|
+
|
|
68
|
+
```
|
|
69
|
+
task(
|
|
70
|
+
subagent_type="general",
|
|
71
|
+
description="Process batch 001",
|
|
72
|
+
prompt="Read your prompt from ./prompts/batch001.md and perform the task exactly as written."
|
|
73
|
+
)
|
|
74
|
+
```
|
|
75
|
+
|
|
76
|
+
### 3. Validate Subagent Output
|
|
77
|
+
|
|
78
|
+
```python
|
|
79
|
+
# Extract JSON from output file
|
|
80
|
+
result = jsonExtract(file_path="./outputs/batch001.md")
|
|
81
|
+
|
|
82
|
+
if not result.success:
|
|
83
|
+
log_error(f"Failed to extract JSON")
|
|
84
|
+
handle_failure()
|
|
85
|
+
|
|
86
|
+
# Validate against expected schema
|
|
87
|
+
validation = jsonValidate(
|
|
88
|
+
data=json.dumps(result.data),
|
|
89
|
+
schema=expected_schema
|
|
90
|
+
)
|
|
91
|
+
|
|
92
|
+
if not validation.valid:
|
|
93
|
+
log_error(f"Validation failed: {validation.errors}")
|
|
94
|
+
handle_failure()
|
|
95
|
+
|
|
96
|
+
# Process validated data
|
|
97
|
+
process(result.data)
|
|
98
|
+
```
|
|
99
|
+
|
|
100
|
+
## Subagent Responsibilities
|
|
101
|
+
|
|
102
|
+
### 1. Read Prompt File
|
|
103
|
+
|
|
104
|
+
The subagent reads the prompt file to understand:
|
|
105
|
+
- Task description
|
|
106
|
+
- Input data location
|
|
107
|
+
- Output format (schema)
|
|
108
|
+
- Output file path
|
|
109
|
+
|
|
110
|
+
### 2. Process and Generate Output
|
|
111
|
+
|
|
112
|
+
The subagent:
|
|
113
|
+
- Reads input data using available tools
|
|
114
|
+
- Processes according to instructions
|
|
115
|
+
- Generates output matching the schema exactly
|
|
116
|
+
|
|
117
|
+
### 3. Write Output File
|
|
118
|
+
|
|
119
|
+
Write ONLY valid JSON to the specified output file:
|
|
120
|
+
|
|
121
|
+
```json
|
|
122
|
+
{
|
|
123
|
+
"batch_number": 1,
|
|
124
|
+
"row_count": 30,
|
|
125
|
+
"summaries": [
|
|
126
|
+
{"row_number": 2, "field1": "value1", "field2": "value2"},
|
|
127
|
+
{"row_number": 3, "field1": "value3", "field2": "value4"}
|
|
128
|
+
]
|
|
129
|
+
}
|
|
130
|
+
```
|
|
131
|
+
|
|
132
|
+
## Schema Definition Guidelines
|
|
133
|
+
|
|
134
|
+
### Basic Schema Example
|
|
135
|
+
|
|
136
|
+
```json
|
|
137
|
+
{
|
|
138
|
+
"batch_number": <integer>,
|
|
139
|
+
"row_count": <integer>,
|
|
140
|
+
"summaries": [
|
|
141
|
+
{
|
|
142
|
+
"row_number": <integer>,
|
|
143
|
+
"field_name": "<type_description>"
|
|
144
|
+
}
|
|
145
|
+
]
|
|
146
|
+
}
|
|
147
|
+
```
|
|
148
|
+
|
|
149
|
+
### Type Annotations in Schema
|
|
150
|
+
|
|
151
|
+
Use clear type annotations in markdown:
|
|
152
|
+
|
|
153
|
+
| Annotation | Type |
|
|
154
|
+
|------------|------|
|
|
155
|
+
| `<integer>` | Integer number |
|
|
156
|
+
| `<number>` | Any number |
|
|
157
|
+
| `<string>` | Text string |
|
|
158
|
+
| `<boolean>` | true or false |
|
|
159
|
+
| `<array>` | JSON array |
|
|
160
|
+
| `<object>` | JSON object |
|
|
161
|
+
|
|
162
|
+
### Enum Values
|
|
163
|
+
|
|
164
|
+
Specify allowed values:
|
|
165
|
+
|
|
166
|
+
```json
|
|
167
|
+
{
|
|
168
|
+
"status": "<one of: active/inactive/pending>"
|
|
169
|
+
}
|
|
170
|
+
```
|
|
171
|
+
|
|
172
|
+
### Optional Fields
|
|
173
|
+
|
|
174
|
+
Mark optional fields clearly:
|
|
175
|
+
|
|
176
|
+
```json
|
|
177
|
+
{
|
|
178
|
+
"required_field": "<string>",
|
|
179
|
+
"optional_field?": "<string or null>"
|
|
180
|
+
}
|
|
181
|
+
```
|
|
182
|
+
|
|
183
|
+
## Validation Flow
|
|
184
|
+
|
|
185
|
+
### Step 1: Infer Schema from First Output
|
|
186
|
+
|
|
187
|
+
```python
|
|
188
|
+
# Get first output
|
|
189
|
+
first_result = jsonExtract(file_path="./outputs/batch001.md")
|
|
190
|
+
|
|
191
|
+
# Infer schema
|
|
192
|
+
schema_result = jsonInfer(
|
|
193
|
+
data=json.dumps(first_result.data),
|
|
194
|
+
strict=true
|
|
195
|
+
)
|
|
196
|
+
|
|
197
|
+
# Store schema for validation
|
|
198
|
+
expected_schema = json.dumps(schema_result.data)
|
|
199
|
+
```
|
|
200
|
+
|
|
201
|
+
### Step 2: Validate All Outputs
|
|
202
|
+
|
|
203
|
+
```python
|
|
204
|
+
for file_path in output_files:
|
|
205
|
+
# Extract
|
|
206
|
+
result = jsonExtract(file_path=file_path)
|
|
207
|
+
if not result.success:
|
|
208
|
+
log_error(f"Extraction failed: {file_path}")
|
|
209
|
+
continue
|
|
210
|
+
|
|
211
|
+
# Validate
|
|
212
|
+
validation = jsonValidate(
|
|
213
|
+
data=json.dumps(result.data),
|
|
214
|
+
schema=expected_schema
|
|
215
|
+
)
|
|
216
|
+
|
|
217
|
+
if not validation.valid:
|
|
218
|
+
log_error(f"Validation failed: {file_path}")
|
|
219
|
+
log_error(validation.errors)
|
|
220
|
+
continue
|
|
221
|
+
|
|
222
|
+
# Collect valid data
|
|
223
|
+
valid_outputs.append(result.data)
|
|
224
|
+
```
|
|
225
|
+
|
|
226
|
+
## Error Handling
|
|
227
|
+
|
|
228
|
+
### Error Structures
|
|
229
|
+
|
|
230
|
+
#### jsonExtract Error Response
|
|
231
|
+
```json
|
|
232
|
+
{
|
|
233
|
+
"success": false,
|
|
234
|
+
"data": null,
|
|
235
|
+
"metadata": {
|
|
236
|
+
"error": {
|
|
237
|
+
"code": "NO_JSON_FOUND",
|
|
238
|
+
"message": "No valid JSON found in file"
|
|
239
|
+
}
|
|
240
|
+
}
|
|
241
|
+
}
|
|
242
|
+
```
|
|
243
|
+
|
|
244
|
+
#### Error Codes
|
|
245
|
+
| Code | Description |
|
|
246
|
+
|------|-------------|
|
|
247
|
+
| `FILE_NOT_FOUND` | File does not exist |
|
|
248
|
+
| `FILE_TOO_LARGE` | File exceeds 200MB limit |
|
|
249
|
+
| `BINARY_FILE` | File is binary format |
|
|
250
|
+
| `EMPTY_FILE` | File has no content |
|
|
251
|
+
| `NO_JSON_FOUND` | No valid JSON found |
|
|
252
|
+
|
|
253
|
+
#### jsonValidate Error Response
|
|
254
|
+
```json
|
|
255
|
+
{
|
|
256
|
+
"success": true,
|
|
257
|
+
"valid": false,
|
|
258
|
+
"errors": [
|
|
259
|
+
{
|
|
260
|
+
"path": "summaries.0.row_number",
|
|
261
|
+
"message": "Expected number, received string",
|
|
262
|
+
"code": "invalid_type",
|
|
263
|
+
"expected": "number",
|
|
264
|
+
"received": "string"
|
|
265
|
+
}
|
|
266
|
+
]
|
|
267
|
+
}
|
|
268
|
+
```
|
|
269
|
+
|
|
270
|
+
### Extraction Failures
|
|
271
|
+
|
|
272
|
+
```python
|
|
273
|
+
if not result.success:
|
|
274
|
+
error_code = result.metadata.get("error", {}).get("code", "UNKNOWN")
|
|
275
|
+
if error_code == "NO_JSON_FOUND":
|
|
276
|
+
log_error("Subagent did not output valid JSON")
|
|
277
|
+
log_error("Check subagent output for errors")
|
|
278
|
+
retry_or_skip()
|
|
279
|
+
```
|
|
280
|
+
|
|
281
|
+
### Validation Failures
|
|
282
|
+
|
|
283
|
+
```python
|
|
284
|
+
if not validation.valid:
|
|
285
|
+
for error in validation.errors:
|
|
286
|
+
log_error(f"Field {error['path']}: {error['message']}")
|
|
287
|
+
if error['code'] == "invalid_type":
|
|
288
|
+
log_error(f" Expected: {error['expected']}")
|
|
289
|
+
log_error(f" Received: {error['received']}")
|
|
290
|
+
```
|
|
291
|
+
|
|
292
|
+
### Subagent Execution Failures
|
|
293
|
+
|
|
294
|
+
Beyond output validation, subagents may fail during execution:
|
|
295
|
+
|
|
296
|
+
| Failure Type | Detection | Recovery |
|
|
297
|
+
|--------------|-----------|----------|
|
|
298
|
+
| Timeout | Task exceeds time limit | Retry with smaller batch |
|
|
299
|
+
| Crash | No output file created | Retry or skip |
|
|
300
|
+
| Partial output | Incomplete JSON | Retry or use partial data |
|
|
301
|
+
| Wrong format | JSON doesn't match schema | Re-prompt with clearer instructions |
|
|
302
|
+
|
|
303
|
+
### Failure Handling Pattern
|
|
304
|
+
|
|
305
|
+
```python
|
|
306
|
+
# After launching subagent wave
|
|
307
|
+
failed_batches = []
|
|
308
|
+
|
|
309
|
+
for batch_file in expected_outputs:
|
|
310
|
+
if not file_exists(batch_file):
|
|
311
|
+
log_error(f"Subagent failed to create output: {batch_file}")
|
|
312
|
+
failed_batches.append(batch_file)
|
|
313
|
+
continue
|
|
314
|
+
|
|
315
|
+
result = jsonExtract(file_path=batch_file)
|
|
316
|
+
if not result.success:
|
|
317
|
+
log_error(f"Failed to extract JSON: {batch_file}")
|
|
318
|
+
failed_batches.append(batch_file)
|
|
319
|
+
continue
|
|
320
|
+
|
|
321
|
+
validation = jsonValidate(data=json.dumps(result.data), schema=expected_schema)
|
|
322
|
+
if not validation.valid:
|
|
323
|
+
log_error(f"Validation failed: {batch_file}")
|
|
324
|
+
failed_batches.append(batch_file)
|
|
325
|
+
continue
|
|
326
|
+
|
|
327
|
+
valid_outputs.append(result.data)
|
|
328
|
+
|
|
329
|
+
# Retry failed batches using retry.md pattern
|
|
330
|
+
if failed_batches:
|
|
331
|
+
for batch in failed_batches:
|
|
332
|
+
retry_subagent(batch, max_attempts=3, delay=5)
|
|
333
|
+
```
|
|
334
|
+
|
|
335
|
+
> **Note:** Reference `patterns/retry.md` for implementing retry logic with exponential backoff.
|
|
336
|
+
|
|
337
|
+
## Complete Example
|
|
338
|
+
|
|
339
|
+
### Main Agent: Create Prompt
|
|
340
|
+
|
|
341
|
+
```markdown
|
|
342
|
+
# Gene Classification Task
|
|
343
|
+
|
|
344
|
+
## Input
|
|
345
|
+
- File: ./data/genes.xlsx
|
|
346
|
+
- Sheet: Sheet1
|
|
347
|
+
- Rows: 2-31
|
|
348
|
+
|
|
349
|
+
## Instructions
|
|
350
|
+
For each row, classify the gene by species and function.
|
|
351
|
+
|
|
352
|
+
## Output Format
|
|
353
|
+
Write JSON to: ./outputs/batch001.md
|
|
354
|
+
|
|
355
|
+
```json
|
|
356
|
+
{
|
|
357
|
+
"batch_number": 1,
|
|
358
|
+
"row_count": 30,
|
|
359
|
+
"summaries": [
|
|
360
|
+
{
|
|
361
|
+
"row_number": <integer>,
|
|
362
|
+
"gene_name": "<string>",
|
|
363
|
+
"species": "<one of: human/mouse/other>",
|
|
364
|
+
"function": "<string>"
|
|
365
|
+
}
|
|
366
|
+
]
|
|
367
|
+
}
|
|
368
|
+
```
|
|
369
|
+
```
|
|
370
|
+
|
|
371
|
+
### Subagent: Write Output
|
|
372
|
+
|
|
373
|
+
```json
|
|
374
|
+
{
|
|
375
|
+
"batch_number": 1,
|
|
376
|
+
"row_count": 30,
|
|
377
|
+
"summaries": [
|
|
378
|
+
{"row_number": 2, "gene_name": "BRAF", "species": "human", "function": "Kinase"},
|
|
379
|
+
{"row_number": 3, "gene_name": "TP53", "species": "human", "function": "Tumor suppressor"}
|
|
380
|
+
]
|
|
381
|
+
}
|
|
382
|
+
```
|
|
383
|
+
|
|
384
|
+
### Main Agent: Validate
|
|
385
|
+
|
|
386
|
+
```python
|
|
387
|
+
# Extract
|
|
388
|
+
result = jsonExtract(file_path="./outputs/batch001.md")
|
|
389
|
+
|
|
390
|
+
# Validate
|
|
391
|
+
validation = jsonValidate(
|
|
392
|
+
data=json.dumps(result.data),
|
|
393
|
+
schema='{"type":"object","properties":{"batch_number":{"type":"integer"},"summaries":{"type":"array"}}}'
|
|
394
|
+
)
|
|
395
|
+
|
|
396
|
+
if validation.valid:
|
|
397
|
+
process(result.data)
|
|
398
|
+
```
|
|
399
|
+
|
|
400
|
+
## Best Practices
|
|
401
|
+
|
|
402
|
+
1. **Embed schema in prompt**: Don't rely on external schema files
|
|
403
|
+
2. **Use strict typing**: Specify exact types and allowed values
|
|
404
|
+
3. **Validate every output**: Never skip validation
|
|
405
|
+
4. **Handle errors gracefully**: Log and continue with other outputs
|
|
406
|
+
5. **Keep schemas simple**: Avoid complex nested structures
|