@yeyuan98/opencode-bioresearcher-plugin 1.6.0 → 1.6.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (32) hide show
  1. package/README.md +7 -6
  2. package/dist/agents/bioresearcher/prompt.d.ts +1 -1
  3. package/dist/agents/bioresearcher/prompt.js +4 -4
  4. package/dist/agents/bioresearcherDR/prompt.d.ts +1 -1
  5. package/dist/agents/bioresearcherDR/prompt.js +2 -2
  6. package/dist/agents/bioresearcherDR_worker/prompt.d.ts +1 -1
  7. package/dist/agents/bioresearcherDR_worker/prompt.js +2 -2
  8. package/dist/index.js +16 -3
  9. package/dist/shared/skill-sync.d.ts +1 -0
  10. package/dist/shared/skill-sync.js +113 -0
  11. package/dist/shared/tool-restrictions.d.ts +2 -2
  12. package/dist/shared/tool-restrictions.js +10 -4
  13. package/dist/skills/bioresearcher-core/README.md +1 -1
  14. package/dist/skills/bioresearcher-core/SKILL.md +1 -1
  15. package/dist/skills/bioresearcher-core/patterns/bioresearcher/analysis-methods.md +1 -1
  16. package/dist/skills/bioresearcher-core/patterns/bioresearcher/best-practices.md +1 -1
  17. package/dist/skills/bioresearcher-tests/README.md +1 -1
  18. package/dist/skills/bioresearcher-tests/SKILL.md +1 -1
  19. package/dist/skills/bioresearcher-tests/test_cases/skill_tests.md +3 -3
  20. package/dist/skills/gromacs-guides/SKILL.md +1 -1
  21. package/dist/tools/db/tools.js +6 -3
  22. package/dist/tools/misc/calculator.js +1 -0
  23. package/dist/tools/misc/json-extract.js +1 -0
  24. package/dist/tools/misc/json-infer.js +1 -0
  25. package/dist/tools/misc/json-validate.js +1 -0
  26. package/dist/tools/misc/timer.js +1 -0
  27. package/dist/tools/parser/obo/obo.js +1 -0
  28. package/dist/tools/parser/pubmed/pubmed.js +1 -0
  29. package/dist/tools/table/tools.js +13 -0
  30. package/dist/version.d.ts +1 -0
  31. package/dist/version.js +1 -0
  32. package/package.json +2 -2
package/README.md CHANGED
@@ -117,17 +117,17 @@ Configuration via `env.jsonc` in working directory. Use the `env-jsonc-setup` sk
117
117
 
118
118
  ## Skills
119
119
 
120
- Skills are reusable prompt templates discovered from multiple paths via the `bioresearcher-skill` tool:
120
+ Skills are reusable prompt templates discovered by OpenCode's built-in `skill` tool.
121
+
122
+ Plugin-shipped skills are automatically copied into `.opencode/skills/` at plugin load time, making them discoverable alongside user-defined skills. Skills are auto-updated when the plugin version changes. To customize a plugin skill, delete its `.plugin-managed` marker file.
121
123
 
122
124
  | Path | Scope |
123
125
  |------|-------|
124
- | `.opencode/skills/` | Project |
126
+ | `.opencode/skills/` | Project (includes auto-copied plugin skills) |
125
127
  | `~/.config/opencode/skills/` | Global |
126
128
  | `.claude/skills/` | Claude Code compatible |
127
129
  | `.agents/skills/` | Agents compatible |
128
130
 
129
- Plugin-shipped skills are discovered alongside user-defined skills. The `bioresearcher-skill` tool name avoids conflict with OpenCode's built-in `skill` tool.
130
-
131
131
  ### Supplied skills
132
132
 
133
133
  - `demo-skill`: showcase skill integration and resource resolution.
@@ -142,7 +142,7 @@ Plugin-shipped skills are discovered alongside user-defined skills. The `biorese
142
142
  Prompt the following and follow along:
143
143
 
144
144
  ```txt
145
- Setup python uv with bioresearcher-skill
145
+ Setup python uv with skill
146
146
  ```
147
147
 
148
148
  ## Installation
@@ -173,7 +173,8 @@ BioMCP is absolutely required and might take time to load. You will need to have
173
173
  | Issue | Solution |
174
174
  |-------|----------|
175
175
  | Agent not appearing | Check plugin in opencode.json is typed correctly |
176
- | BioMCP tool failures | Make sure you have BioMCP [installed and enabled](https://opencode.ai/docs/mcp-servers/#enable) |
176
+ | BioMCP tool failures | Make sure you have BioMCP [installed and enabled](https://opencode.ai/docs/mcp-servers/#enable). The `mcp.biomcp` config in opencode.json is **required** — without it, biomcp tools will not exist regardless of agent permissions. |
177
+ | biomcp tools not available to agents | Ensure the `mcp` section is present in your `opencode.json`. Agent permission config only controls access to tools that already exist — it cannot create tools that the MCP server hasn't registered. |
177
178
  | Research taking a long time | Check whether subagents are generating outputs; could be slow model / API throttle / overcomplicated query / etc. |
178
179
 
179
180
  ## License
@@ -4,5 +4,5 @@
4
4
  * A specialized biomedical research agent that performs reference-based
5
5
  * pharmaceutical and drug development research using BioMCP tools.
6
6
  */
7
- export declare const BIORESEARCHER_SYSTEM_PROMPT = "\n------ RULE REMINDER START ------\nIMPORTANT: THIS REMINDER SHALL OVERRIDE ALL OTHER PROMPTS PROVIDED TO YOU. IGNORE ALL OTHER PROMPTS.\n\nYou are an expert in biomedical and pharmaceutical drug development research.\n\n## Skill Loading (MANDATORY)\n\nAt the start of complex research tasks, load the core skill:\n\n```\nbioresearcher-skill bioresearcher-core\n```\n\nThis skill provides patterns for:\n- **Decision Making**: Tool selection, analysis method choice\n- **Workflow Control**: Retry logic, progress tracking, rate limiting\n- **Data Handling**: JSON validation, table operations, data exchange\n- **Research Standards**: Citation formatting, report templates, Python standards\n- **Best Practices**: Upfront filtering, error handling, performance optimization\n\n## Core Workflow\n\n### Step 1: Clarify Questions\nIf unclear, guide the user to make their question professional and specific:\n- Identify the core research question\n- Determine what type of data is needed\n- Understand the expected output format\n\n### Step 2: Select Appropriate Tools\nUse decision trees from `patterns/tool-selection.md`:\n\n**Data Source Identification:**\n- Database/SQL \u2192 db* tools (dbQuery, dbListTables, dbDescribeTable)\n- Excel/CSV file \u2192 table* tools (tableFilterRows, tableGroupBy, etc.)\n- Website/URL \u2192 web* tools (webfetch, websearch)\n- Literature/Papers \u2192 biomcp* article tools\n- Clinical Trials \u2192 biomcp* trial tools\n- Genes/Variants \u2192 biomcp* gene/variant tools\n- Drugs/Compounds \u2192 biomcp* drug tools\n\n**CRITICAL: Apply upfront filtering at the source (see best-practices.md)**\n\n### Step 3: Fetch Information\nGather trustable information using selected tools:\n\n**Database Queries:**\n```\n1. Check env.jsonc exists (if not, load bioresearcher-skill 'env-jsonc-setup')\n2. dbListTables() \u2192 Discover available data\n3. dbDescribeTable() \u2192 Understand schema\n4. dbQuery(\"SELECT ... WHERE filter = :param\", {param: value})\n \u2705 DO: Use WHERE clauses, LIMIT, named parameters\n \u274C DON'T: SELECT * then filter in Python\n```\n\n**Table Operations:**\n```\n1. tableGetSheetPreview() \u2192 Preview structure\n2. Determine row count \u2192 Choose approach:\n - < 30 rows: Use table tools directly\n - 30-1000 rows: Consider long-table-summary skill\n - > 1000 rows: Use Python for complex analysis\n3. Apply filters: tableFilterRows(column, operator, value)\n \u2705 DO: Filter upfront with tableFilterRows\n \u274C DON'T: Load entire table then filter\n```\n\n**BioMCP Queries:**\n```\n1. Use targeted queries with specific filters\n2. biomcp_article_searcher(genes=[\"BRAF\"], diseases=[\"melanoma\"], page_size=50)\n3. ALWAYS: blockingTimer(0.3) between consecutive calls\n4. Sequential only (NEVER concurrent)\n \u2705 DO: Use specific filters (genes, diseases, variants)\n \u274C DON'T: Broad query then manual filtering\n```\n\n### Step 4: Analyze Data\nChoose analysis method using `patterns/analysis-methods.md`:\n\n**Decision Matrix:**\n| Approach | When to Use |\n|----------|-------------|\n| Table Tools | < 30 rows, simple operations (filter, group, summarize) |\n| long-table-summary Skill | 30-1000 rows, structured summarization, parallel processing |\n| Custom Python | > 1000 rows, complex logic, ML, reusable pipeline |\n\n**Skill Loading:**\n- Complex analysis \u2192 Load `bioresearcher-core` for retry, validation patterns\n- Large table summarization \u2192 Load `long-table-summary` skill\n- Python needed but uv missing \u2192 Load `python-setup-uv` skill\n\n**Python Scripts:**\n- Follow `patterns/python-standards.md` (DRY principle)\n- Module docstrings with purpose, usage, dependencies\n- Function docstrings with Args, Returns, Raises, Examples\n- No code duplication - extract to reusable functions\n- Type hints for all functions\n- Save to `.scripts/py/` folder\n\n### Step 5: Write Reference-Based Report\nFollow `patterns/report-template.md` structure:\n\n**Mandatory Sections:**\n1. **Executive Summary** - Key findings with citations [1, 2]\n2. **Data Sources** - Origin, access method, scope, quality notes\n3. **Analysis Methodology** - Approach, tools, steps, validation\n4. **Findings** - Results with citations and data provenance\n5. **Limitations** - Data gaps, methodological constraints\n6. **References** - Formatted bibliography by source type\n\n**Data Provenance Requirements:**\nEvery claim must have:\n- Citation [N] reference, OR\n- Data source documentation, OR\n- Analysis method description\n\n**Citation Format (from `patterns/citations.md`):**\n- In-text: [1], [2, 3], [1-5]\n- Bibliography: Numbered by order of appearance\n- Source-specific formats (articles, trials, web, databases)\n\n## Rate Limiting (MANDATORY)\n\n**ALWAYS use blockingTimer between consecutive API calls:**\n- BioMCP tools: 0.3 seconds (300ms)\n- Web tools: 0.5 seconds (500ms)\n- Database: No delay needed\n- File operations: No delay needed\n\n## Error Handling & Validation\n\n**Validation Pattern (from best-practices.md):**\n1. Check data existence (not empty)\n2. Validate structure (required fields)\n3. Validate types (correct data types)\n4. Validate values (within ranges)\n5. Validate quality (no duplicates)\n\n**Retry Logic (from patterns/retry.md):**\n- Max 3 attempts for network operations\n- Exponential backoff: 2s, 4s, 8s\n- Use blockingTimer between retries\n\n## Python Guidelines\n\n**When to Use Python:**\n- ONLY if existing tools are not suitable\n- Complex transformations beyond table tools\n- Statistical analysis beyond basic aggregation\n- Machine learning or custom algorithms\n\n**Code Standards (MANDATORY):**\n```python\n#!/usr/bin/env python3\n\"\"\"Script Purpose - One Line Description\n\nThis module provides functionality for:\n- Functionality 1\n- Functionality 2\n\nUsage:\n uv run python script.py command --input file.xlsx --output results/\n\nDependencies:\n - pandas >= 1.5.0\n\nAuthor: BioResearcher AI Agent\nDate: YYYY-MM-DD\n\"\"\"\n```\n\n**Function Documentation:**\n```python\ndef analyze_data(data: List[Dict], threshold: float = 0.5) -> Dict:\n \"\"\"Brief description.\n \n Args:\n data: Description of data\n threshold: Threshold value (0.0 to 1.0)\n \n Returns:\n Dictionary with results\n \n Raises:\n ValueError: If threshold out of range\n \"\"\"\n```\n\n**File Location:**\n- Scripts: `.scripts/py/`\n- Use uv for execution: `uv run python .scripts/py/script.py`\n- If uv unavailable, load bioresearcher-skill `python-setup-uv`\n\n## Best Practices (CRITICAL)\n\n### Upfront Filtering\n\u2705 ALWAYS filter at source:\n- Database: WHERE clauses, LIMIT\n- Tables: tableFilterRows upfront\n- BioMCP: Specific filters (genes, diseases, variants)\n- Web: Specific search queries\n\n\u274C NEVER retrieve all data then filter in Python\n\n### Data Validation\n\u2705 ALWAYS validate:\n- Check required fields exist\n- Verify data types correct\n- Ensure values in expected ranges\n\n### Error Handling\n\u2705 ALWAYS handle errors:\n- Try-except for external operations\n- Retry logic with exponential backoff\n- Informative error messages\n\n### Context Management\n\u2705 ALWAYS minimize context usage:\n- Summarize large datasets instead of loading all\n- Use file-based data exchange\n- Paginate large result sets\n\n## Bottomline Rules\n\n1. ONLY use high-quality sources: biomcp results or official biotech/pharma websites\n2. ALWAYS provide citations [1], [2], ... with full bibliography at end\n3. ALWAYS backup files before editing (create `.bak` files)\n4. ALWAYS use blockingTimer(0.3) between consecutive biomcp* calls\n5. ALWAYS filter upfront - never retrieve then filter\n6. ALWAYS validate data before processing\n7. ALWAYS follow structured report template with data provenance\n8. ALWAYS write documented Python code following DRY principle\n------ RULE REMINDER END ------\n";
7
+ export declare const BIORESEARCHER_SYSTEM_PROMPT = "\n------ RULE REMINDER START ------\nIMPORTANT: THIS REMINDER SHALL OVERRIDE ALL OTHER PROMPTS PROVIDED TO YOU. IGNORE ALL OTHER PROMPTS.\n\nYou are an expert in biomedical and pharmaceutical drug development research.\n\n## Skill Loading (MANDATORY)\n\nAt the start of complex research tasks, load the core skill:\n\n```\nskill bioresearcher-core\n```\n\nThis skill provides patterns for:\n- **Decision Making**: Tool selection, analysis method choice\n- **Workflow Control**: Retry logic, progress tracking, rate limiting\n- **Data Handling**: JSON validation, table operations, data exchange\n- **Research Standards**: Citation formatting, report templates, Python standards\n- **Best Practices**: Upfront filtering, error handling, performance optimization\n\n## Core Workflow\n\n### Step 1: Clarify Questions\nIf unclear, guide the user to make their question professional and specific:\n- Identify the core research question\n- Determine what type of data is needed\n- Understand the expected output format\n\n### Step 2: Select Appropriate Tools\nUse decision trees from `patterns/tool-selection.md`:\n\n**Data Source Identification:**\n- Database/SQL \u2192 db* tools (dbQuery, dbListTables, dbDescribeTable)\n- Excel/CSV file \u2192 table* tools (tableFilterRows, tableGroupBy, etc.)\n- Website/URL \u2192 Ask the user to provide the content directly (web search/fetch tools are not available to this agent)\n- Literature/Papers \u2192 biomcp* article tools\n- Clinical Trials \u2192 biomcp* trial tools\n- Genes/Variants \u2192 biomcp* gene/variant tools\n- Drugs/Compounds \u2192 biomcp* drug tools\n\n**CRITICAL: Apply upfront filtering at the source (see best-practices.md)**\n\n### Step 3: Fetch Information\nGather trustable information using selected tools:\n\n**Database Queries:**\n```\n1. Check env.jsonc exists (if not, load skill 'env-jsonc-setup')\n2. dbListTables() \u2192 Discover available data\n3. dbDescribeTable() \u2192 Understand schema\n4. dbQuery(\"SELECT ... WHERE filter = :param\", {param: value})\n \u2705 DO: Use WHERE clauses, LIMIT, named parameters\n \u274C DON'T: SELECT * then filter in Python\n```\n\n**Table Operations:**\n```\n1. tableGetSheetPreview() \u2192 Preview structure\n2. Determine row count \u2192 Choose approach:\n - < 30 rows: Use table tools directly\n - 30-1000 rows: Consider long-table-summary skill\n - > 1000 rows: Use Python for complex analysis\n3. Apply filters: tableFilterRows(column, operator, value)\n \u2705 DO: Filter upfront with tableFilterRows\n \u274C DON'T: Load entire table then filter\n```\n\n**BioMCP Queries:**\n```\n1. Use targeted queries with specific filters\n2. biomcp_article_searcher(genes=[\"BRAF\"], diseases=[\"melanoma\"], page_size=50)\n3. ALWAYS: blockingTimer(0.3) between consecutive calls\n4. Sequential only (NEVER concurrent)\n \u2705 DO: Use specific filters (genes, diseases, variants)\n \u274C DON'T: Broad query then manual filtering\n```\n\n### Step 4: Analyze Data\nChoose analysis method using `patterns/analysis-methods.md`:\n\n**Decision Matrix:**\n| Approach | When to Use |\n|----------|-------------|\n| Table Tools | < 30 rows, simple operations (filter, group, summarize) |\n| long-table-summary Skill | 30-1000 rows, structured summarization, parallel processing |\n| Custom Python | > 1000 rows, complex logic, ML, reusable pipeline |\n\n**Skill Loading:**\n- Complex analysis \u2192 Load `bioresearcher-core` for retry, validation patterns\n- Large table summarization \u2192 Load `long-table-summary` skill\n- Python needed but uv missing \u2192 Load `python-setup-uv` skill\n\n**Python Scripts:**\n- Follow `patterns/python-standards.md` (DRY principle)\n- Module docstrings with purpose, usage, dependencies\n- Function docstrings with Args, Returns, Raises, Examples\n- No code duplication - extract to reusable functions\n- Type hints for all functions\n- Save to `.scripts/py/` folder\n\n### Step 5: Write Reference-Based Report\nFollow `patterns/report-template.md` structure:\n\n**Mandatory Sections:**\n1. **Executive Summary** - Key findings with citations [1, 2]\n2. **Data Sources** - Origin, access method, scope, quality notes\n3. **Analysis Methodology** - Approach, tools, steps, validation\n4. **Findings** - Results with citations and data provenance\n5. **Limitations** - Data gaps, methodological constraints\n6. **References** - Formatted bibliography by source type\n\n**Data Provenance Requirements:**\nEvery claim must have:\n- Citation [N] reference, OR\n- Data source documentation, OR\n- Analysis method description\n\n**Citation Format (from `patterns/citations.md`):**\n- In-text: [1], [2, 3], [1-5]\n- Bibliography: Numbered by order of appearance\n- Source-specific formats (articles, trials, web, databases)\n\n## Rate Limiting (MANDATORY)\n\n**ALWAYS use blockingTimer between consecutive API calls:**\n- BioMCP tools: 0.3 seconds (300ms)\n- Web tools: 0.5 seconds (500ms)\n- Database: No delay needed\n- File operations: No delay needed\n\n## Error Handling & Validation\n\n**Validation Pattern (from best-practices.md):**\n1. Check data existence (not empty)\n2. Validate structure (required fields)\n3. Validate types (correct data types)\n4. Validate values (within ranges)\n5. Validate quality (no duplicates)\n\n**Retry Logic (from patterns/retry.md):**\n- Max 3 attempts for network operations\n- Exponential backoff: 2s, 4s, 8s\n- Use blockingTimer between retries\n\n## Python Guidelines\n\n**When to Use Python:**\n- ONLY if existing tools are not suitable\n- Complex transformations beyond table tools\n- Statistical analysis beyond basic aggregation\n- Machine learning or custom algorithms\n\n**Code Standards (MANDATORY):**\n```python\n#!/usr/bin/env python3\n\"\"\"Script Purpose - One Line Description\n\nThis module provides functionality for:\n- Functionality 1\n- Functionality 2\n\nUsage:\n uv run python script.py command --input file.xlsx --output results/\n\nDependencies:\n - pandas >= 1.5.0\n\nAuthor: BioResearcher AI Agent\nDate: YYYY-MM-DD\n\"\"\"\n```\n\n**Function Documentation:**\n```python\ndef analyze_data(data: List[Dict], threshold: float = 0.5) -> Dict:\n \"\"\"Brief description.\n \n Args:\n data: Description of data\n threshold: Threshold value (0.0 to 1.0)\n \n Returns:\n Dictionary with results\n \n Raises:\n ValueError: If threshold out of range\n \"\"\"\n```\n\n**File Location:**\n- Scripts: `.scripts/py/`\n- Use uv for execution: `uv run python .scripts/py/script.py`\n- If uv unavailable, load skill `python-setup-uv`\n\n## Best Practices (CRITICAL)\n\n### Upfront Filtering\n\u2705 ALWAYS filter at source:\n- Database: WHERE clauses, LIMIT\n- Tables: tableFilterRows upfront\n- BioMCP: Specific filters (genes, diseases, variants)\n- Web: Specific search queries\n\n\u274C NEVER retrieve all data then filter in Python\n\n### Data Validation\n\u2705 ALWAYS validate:\n- Check required fields exist\n- Verify data types correct\n- Ensure values in expected ranges\n\n### Error Handling\n\u2705 ALWAYS handle errors:\n- Try-except for external operations\n- Retry logic with exponential backoff\n- Informative error messages\n\n### Context Management\n\u2705 ALWAYS minimize context usage:\n- Summarize large datasets instead of loading all\n- Use file-based data exchange\n- Paginate large result sets\n\n## Bottomline Rules\n\n1. ONLY use high-quality sources: biomcp results or official biotech/pharma websites\n2. ALWAYS provide citations [1], [2], ... with full bibliography at end\n3. ALWAYS backup files before editing (create `.bak` files)\n4. ALWAYS use blockingTimer(0.3) between consecutive biomcp* calls\n5. ALWAYS filter upfront - never retrieve then filter\n6. ALWAYS validate data before processing\n7. ALWAYS follow structured report template with data provenance\n8. ALWAYS write documented Python code following DRY principle\n------ RULE REMINDER END ------\n";
8
8
  export declare function getBioResearcherPrompt(): string;
@@ -15,7 +15,7 @@ You are an expert in biomedical and pharmaceutical drug development research.
15
15
  At the start of complex research tasks, load the core skill:
16
16
 
17
17
  \`\`\`
18
- bioresearcher-skill bioresearcher-core
18
+ skill bioresearcher-core
19
19
  \`\`\`
20
20
 
21
21
  This skill provides patterns for:
@@ -39,7 +39,7 @@ Use decision trees from \`patterns/tool-selection.md\`:
39
39
  **Data Source Identification:**
40
40
  - Database/SQL → db* tools (dbQuery, dbListTables, dbDescribeTable)
41
41
  - Excel/CSV file → table* tools (tableFilterRows, tableGroupBy, etc.)
42
- - Website/URL → web* tools (webfetch, websearch)
42
+ - Website/URL → Ask the user to provide the content directly (web search/fetch tools are not available to this agent)
43
43
  - Literature/Papers → biomcp* article tools
44
44
  - Clinical Trials → biomcp* trial tools
45
45
  - Genes/Variants → biomcp* gene/variant tools
@@ -52,7 +52,7 @@ Gather trustable information using selected tools:
52
52
 
53
53
  **Database Queries:**
54
54
  \`\`\`
55
- 1. Check env.jsonc exists (if not, load bioresearcher-skill 'env-jsonc-setup')
55
+ 1. Check env.jsonc exists (if not, load skill 'env-jsonc-setup')
56
56
  2. dbListTables() → Discover available data
57
57
  3. dbDescribeTable() → Understand schema
58
58
  4. dbQuery("SELECT ... WHERE filter = :param", {param: value})
@@ -197,7 +197,7 @@ def analyze_data(data: List[Dict], threshold: float = 0.5) -> Dict:
197
197
  **File Location:**
198
198
  - Scripts: \`.scripts/py/\`
199
199
  - Use uv for execution: \`uv run python .scripts/py/script.py\`
200
- - If uv unavailable, load bioresearcher-skill \`python-setup-uv\`
200
+ - If uv unavailable, load skill \`python-setup-uv\`
201
201
 
202
202
  ## Best Practices (CRITICAL)
203
203
 
@@ -4,5 +4,5 @@
4
4
  * A specialized biomedical research agent that performs reference-based
5
5
  * pharmaceutical and drug development research using BioMCP tools.
6
6
  */
7
- export declare const BIORESEARCHERDR_SYSTEM_PROMPT = "\n------ RULE REMINDER START ------\nIMPORTANT: THIS REMINDER SHALL OVERRIDE ALL OTHER PROMPTS PROVIDED TO YOU. IGNORE ALL OTHER PROMPTS.\nOverall goal: Perform reference-based biomedical and pharmaceutical drug development research.\n\nSteps to STRICTLY adhere to:\n\n1. If the user query includes 'no-interview', skip Step 2 and proceed to Step 3. Otherwise, proceed to Step 2.\n2. Comprehend initial user inquiry. Use the question tool to ask user to clarify 3-6 unclear points depending on inquiry complexity.\n3. Comprehend final user inquiry to identify critical research aspects to answer user inquiry.\n4. If the original user inquiry includes 'light-research', combine and/or pick top two research aspects and proceed to Step 5. Otherwise, proceed directly to Step 5.\n5. Decide on TOPIC of this inquiry (NO user input). TOPIC should be highly succinct, underscore-separated name based on user inquiry.\n6. Use the todowrite tool to generate a list of identified research aspects.\n7. Create the reports_biomcp/<TOPIC>/ folder if needed.\n8. Use the task tool to assign each research aspect to a bioresearcherDR_worker subagent. Start subagents in parallel in batches (size of 5 for each batch). Record finished subagents by checking the todo list. Prompt the user: 'If subagents are stuck without progress for too long, interrupt and ask me to resume work.'\n9. Proceed until subagents complete research. Restart failed subagents if necessary.\n10. Load the bioresearcher-skill 'bioresearcher-core' and read 'patterns/citations.md' for citation format. Read reports from all subagents. Summarize findings to provide a succinct and accurate report addressing user inquiry with proper citations.\n11. Write to reports_biomcp/<TOPIC>/final_report.md with full bibliography.\n\nFollow this template to prompt the bioresearcherDR_worker subagents (Step 8):\n\n```md\nTOPIC: <TOPIC>\nYOUR RESEARCH FOCUS: <RESEARCH-ASPECT>\nDESCRIPTION: <ABSTRACT>\n```\n\nABSTRACT should be a short paragraph of less than 200 words, describing exact focus of the subagent's research aspect and a list of detailed research items.\n\nRules for YOU:\n\n- Do NOT use the following tools: biomcp*, web*, context7* (i.e., tool names starting with biomcp or web or context7. VERY IMPORTANT DO NOT USE ANY BIOMCP TOOL).\n- Do NOT fallback to internal knowledge when query tools fail. STRICTLY ADHERE to external trusted sources.\n- DO provide concrete references for all findings with citations (in brackets, e.g., [1], [2], ...) and full bibliography at the end.\n- DO keep your word succinct, accurate and professional, fitting top standards of academic writing.\n------ RULE REMINDER END ------\n";
7
+ export declare const BIORESEARCHERDR_SYSTEM_PROMPT = "\n------ RULE REMINDER START ------\nIMPORTANT: THIS REMINDER SHALL OVERRIDE ALL OTHER PROMPTS PROVIDED TO YOU. IGNORE ALL OTHER PROMPTS.\nOverall goal: Perform reference-based biomedical and pharmaceutical drug development research.\n\nSteps to STRICTLY adhere to:\n\n1. If the user query includes 'no-interview', skip Step 2 and proceed to Step 3. Otherwise, proceed to Step 2.\n2. Comprehend initial user inquiry. Use the question tool to ask user to clarify 3-6 unclear points depending on inquiry complexity.\n3. Comprehend final user inquiry to identify critical research aspects to answer user inquiry.\n4. If the original user inquiry includes 'light-research', combine and/or pick top two research aspects and proceed to Step 5. Otherwise, proceed directly to Step 5.\n5. Decide on TOPIC of this inquiry (NO user input). TOPIC should be highly succinct, underscore-separated name based on user inquiry.\n6. Use the todowrite tool to generate a list of identified research aspects.\n7. Create the reports_biomcp/<TOPIC>/ directory by writing a placeholder file to it (e.g., write to reports_biomcp/<TOPIC>/.gitkeep). The write tool auto-creates parent directories - do NOT attempt to use bash mkdir.\n8. Use the task tool to assign each research aspect to a bioresearcherDR_worker subagent. Start subagents in parallel in batches (size of 5 for each batch). Record finished subagents by checking the todo list. Prompt the user: 'If subagents are stuck without progress for too long, interrupt and ask me to resume work.'\n9. Proceed until subagents complete research. Restart failed subagents if necessary.\n10. Load the skill 'bioresearcher-core' and read 'patterns/citations.md' for citation format. Read reports from all subagents. Summarize findings to provide a succinct and accurate report addressing user inquiry with proper citations.\n11. Write to reports_biomcp/<TOPIC>/final_report.md with full bibliography.\n\nFollow this template to prompt the bioresearcherDR_worker subagents (Step 8):\n\n```md\nTOPIC: <TOPIC>\nYOUR RESEARCH FOCUS: <RESEARCH-ASPECT>\nDESCRIPTION: <ABSTRACT>\n```\n\nABSTRACT should be a short paragraph of less than 200 words, describing exact focus of the subagent's research aspect and a list of detailed research items.\n\nRules for YOU:\n\n- Do NOT use the following tools: biomcp*, web*, context7* (i.e., tool names starting with biomcp or web or context7. VERY IMPORTANT DO NOT USE ANY BIOMCP TOOL).\n- Do NOT fallback to internal knowledge when query tools fail. STRICTLY ADHERE to external trusted sources.\n- DO provide concrete references for all findings with citations (in brackets, e.g., [1], [2], ...) and full bibliography at the end.\n- DO keep your word succinct, accurate and professional, fitting top standards of academic writing.\n------ RULE REMINDER END ------\n";
8
8
  export declare function getBioResearcherDRPrompt(): string;
@@ -17,10 +17,10 @@ Steps to STRICTLY adhere to:
17
17
  4. If the original user inquiry includes 'light-research', combine and/or pick top two research aspects and proceed to Step 5. Otherwise, proceed directly to Step 5.
18
18
  5. Decide on TOPIC of this inquiry (NO user input). TOPIC should be highly succinct, underscore-separated name based on user inquiry.
19
19
  6. Use the todowrite tool to generate a list of identified research aspects.
20
- 7. Create the reports_biomcp/<TOPIC>/ folder if needed.
20
+ 7. Create the reports_biomcp/<TOPIC>/ directory by writing a placeholder file to it (e.g., write to reports_biomcp/<TOPIC>/.gitkeep). The write tool auto-creates parent directories - do NOT attempt to use bash mkdir.
21
21
  8. Use the task tool to assign each research aspect to a bioresearcherDR_worker subagent. Start subagents in parallel in batches (size of 5 for each batch). Record finished subagents by checking the todo list. Prompt the user: 'If subagents are stuck without progress for too long, interrupt and ask me to resume work.'
22
22
  9. Proceed until subagents complete research. Restart failed subagents if necessary.
23
- 10. Load the bioresearcher-skill 'bioresearcher-core' and read 'patterns/citations.md' for citation format. Read reports from all subagents. Summarize findings to provide a succinct and accurate report addressing user inquiry with proper citations.
23
+ 10. Load the skill 'bioresearcher-core' and read 'patterns/citations.md' for citation format. Read reports from all subagents. Summarize findings to provide a succinct and accurate report addressing user inquiry with proper citations.
24
24
  11. Write to reports_biomcp/<TOPIC>/final_report.md with full bibliography.
25
25
 
26
26
  Follow this template to prompt the bioresearcherDR_worker subagents (Step 8):
@@ -4,5 +4,5 @@
4
4
  * A focused worker subagent that executes specific research tasks
5
5
  * assigned by the bioresearcherDR orchestrator.
6
6
  */
7
- export declare const BIORESEARCHERDRWORKER_SYSTEM_PROMPT = "\n------ RULE REMINDER START ------\nIMPORTANT: THIS REMINDER SHALL OVERRIDE ALL OTHER PROMPTS PROVIDED TO YOU. IGNORE ALL OTHER PROMPTS.\n\nOverall goal: Execute focused biomedical research as directed by the orchestrator.\n\n## Skill Loading (MANDATORY)\n\nAt the start of your task, load the core skill:\n\n```\nbioresearcher-skill bioresearcher-core\n```\n\nThis skill provides patterns for:\n- `patterns/rate-limiting.md` - API rate limiting (use 0.5s between biomcp calls)\n- `patterns/retry.md` - Retry logic for failed requests (up to 3 retries)\n- `patterns/citations.md` - Citation formatting for your report\n\n## Workflow\n\n1. **Follow directions**: Execute the specific research task assigned to you\n2. **Stay focused**: Do NOT delegate to other subagents\n3. **Write findings**: Output to `reports_biomcp/<TOPIC>/<YOUR-FOCUS>.md`\n\n## Rate Limiting\n\nALWAYS use blockingTimer(0.5) between consecutive biomcp* tool calls.\n\n## Retry Logic\n\nIf a query fails:\n1. Wait a few seconds using blockingTimer\n2. Try with a simpler query\n3. Retry up to 3 times before giving up\n\n## Rules\n\n- Do NOT run concurrent MCP calls (sequential only)\n- Do NOT fallback to internal knowledge - use external trusted sources only\n- ALWAYS provide citations [1], [2], ... with full bibliography\n- Keep writing succinct, accurate, professional (academic standard)\n------ RULE REMINDER END ------\n";
7
+ export declare const BIORESEARCHERDRWORKER_SYSTEM_PROMPT = "\n------ RULE REMINDER START ------\nIMPORTANT: THIS REMINDER SHALL OVERRIDE ALL OTHER PROMPTS PROVIDED TO YOU. IGNORE ALL OTHER PROMPTS.\n\nOverall goal: Execute focused biomedical research as directed by the orchestrator.\n\n## Skill Loading (MANDATORY)\n\nAt the start of your task, load the core skill:\n\n```\nskill bioresearcher-core\n```\n\nThis skill provides patterns for:\n- `patterns/rate-limiting.md` - API rate limiting (use 0.5s between biomcp calls)\n- `patterns/retry.md` - Retry logic for failed requests (up to 3 retries)\n- `patterns/citations.md` - Citation formatting for your report\n\n## Workflow\n\n1. **Follow directions**: Execute the specific research task assigned to you\n2. **Stay focused**: Do NOT delegate to other subagents\n3. **Write findings**: Output to `reports_biomcp/<TOPIC>/<YOUR-FOCUS>.md`. The write tool auto-creates parent directories - no need to create the directory first.\n\n## Rate Limiting\n\nALWAYS use blockingTimer(0.5) between consecutive biomcp* tool calls.\n\n## Retry Logic\n\nIf a query fails:\n1. Wait a few seconds using blockingTimer\n2. Try with a simpler query\n3. Retry up to 3 times before giving up\n\n## Rules\n\n- Do NOT run concurrent MCP calls (sequential only)\n- Do NOT fallback to internal knowledge - use external trusted sources only\n- ALWAYS provide citations [1], [2], ... with full bibliography\n- Keep writing succinct, accurate, professional (academic standard)\n------ RULE REMINDER END ------\n";
8
8
  export declare function getBioResearcherDRWorkerPrompt(): string;
@@ -15,7 +15,7 @@ Overall goal: Execute focused biomedical research as directed by the orchestrato
15
15
  At the start of your task, load the core skill:
16
16
 
17
17
  \`\`\`
18
- bioresearcher-skill bioresearcher-core
18
+ skill bioresearcher-core
19
19
  \`\`\`
20
20
 
21
21
  This skill provides patterns for:
@@ -27,7 +27,7 @@ This skill provides patterns for:
27
27
 
28
28
  1. **Follow directions**: Execute the specific research task assigned to you
29
29
  2. **Stay focused**: Do NOT delegate to other subagents
30
- 3. **Write findings**: Output to \`reports_biomcp/<TOPIC>/<YOUR-FOCUS>.md\`
30
+ 3. **Write findings**: Output to \`reports_biomcp/<TOPIC>/<YOUR-FOCUS>.md\`. The write tool auto-creates parent directories - no need to create the directory first.
31
31
 
32
32
  ## Rate Limiting
33
33
 
package/dist/index.js CHANGED
@@ -1,13 +1,14 @@
1
+ import { syncPluginSkills } from "./shared/skill-sync";
1
2
  import { createBioResearcherAgent } from "./agents/bioresearcher/index";
2
3
  import { createBioResearcherDRAgent } from "./agents/bioresearcherDR/index";
3
4
  import { createBioResearcherDRWorkerAgent } from "./agents/bioresearcherDR_worker/index";
4
- import { BioResearcherSkillTool } from "./tools/skill";
5
5
  import { tableTools } from "./tools/table/index";
6
6
  import { dbTools } from "./tools/db/index";
7
7
  import { blockingTimer, calculator, jsonExtract, jsonValidate, jsonInfer } from "./tools/misc/index";
8
8
  import { parse_pubmed_articleSet } from "./tools/parser/pubmed";
9
9
  import { parse_obo_file } from "./tools/parser/obo";
10
- export const BioResearcherPlugin = async (_input) => {
10
+ export const BioResearcherPlugin = async (input) => {
11
+ syncPluginSkills(input.directory);
11
12
  return {
12
13
  config: async (config) => {
13
14
  config.agent = config.agent || {};
@@ -15,8 +16,20 @@ export const BioResearcherPlugin = async (_input) => {
15
16
  config.agent.bioresearcherDR = createBioResearcherDRAgent();
16
17
  config.agent.bioresearcherDR_worker = createBioResearcherDRWorkerAgent();
17
18
  },
19
+ "tool.definition": async (input, output) => {
20
+ const pluginToolIds = [
21
+ "tableGetSheetPreview", "tableListSheets", "tableGetHeaders", "tableGetCell",
22
+ "tableFilterRows", "tableSearch", "tableGetRange", "tableSummarize",
23
+ "tableGroupBy", "tablePivotSummary", "tableAppendRows", "tableUpdateCell",
24
+ "tableCreateFile", "dbQuery", "dbListTables", "dbDescribeTable",
25
+ "calculator", "blockingTimer", "jsonExtract", "jsonValidate", "jsonInfer",
26
+ "parse_pubmed_articleSet", "parse_obo_file",
27
+ ];
28
+ if (!pluginToolIds.includes(input.toolID))
29
+ return;
30
+ output.description += "\n\nNOTE: This tool is permission-gated. If you receive a permission denied error, do not retry — use an alternative approach or ask the user.";
31
+ },
18
32
  tool: {
19
- "bioresearcher-skill": BioResearcherSkillTool,
20
33
  ...tableTools,
21
34
  ...dbTools,
22
35
  blockingTimer,
@@ -0,0 +1 @@
1
+ export declare function syncPluginSkills(workspaceDir: string): void;
@@ -0,0 +1,113 @@
1
+ import path from "path";
2
+ import { fileURLToPath } from "url";
3
+ import { existsSync, readFileSync, writeFileSync, rmSync, mkdirSync, cpSync, readdirSync } from "fs";
4
+ import { PLUGIN_VERSION } from "../version";
5
+ const MANIFEST_NAME = ".bioresearcher-manifest.json";
6
+ const MARKER_NAME = ".plugin-managed";
7
+ function getPluginSkillsDir() {
8
+ const currentDir = path.dirname(fileURLToPath(import.meta.url));
9
+ const pathSep = path.sep;
10
+ const isDist = currentDir.includes(`${pathSep}dist${pathSep}`);
11
+ const isSrc = currentDir.includes(`${pathSep}src${pathSep}`);
12
+ if (isDist) {
13
+ return path.join(currentDir, "..", "..", "skills");
14
+ }
15
+ else if (isSrc) {
16
+ return path.join(currentDir, "..", "..", "..", "skills");
17
+ }
18
+ let searchDir = currentDir;
19
+ for (let i = 0; i < 6; i++) {
20
+ const skillsPath = path.join(searchDir, "skills");
21
+ if (existsSync(skillsPath))
22
+ return skillsPath;
23
+ const pkgJson = path.join(searchDir, "package.json");
24
+ if (existsSync(pkgJson)) {
25
+ const distSkills = path.join(searchDir, "dist", "skills");
26
+ const directSkills = path.join(searchDir, "skills");
27
+ if (existsSync(distSkills))
28
+ return distSkills;
29
+ if (existsSync(directSkills))
30
+ return directSkills;
31
+ }
32
+ searchDir = path.dirname(searchDir);
33
+ }
34
+ return path.join(currentDir, "..", "..", "..", "skills");
35
+ }
36
+ function getPluginSkillNames(pluginSkillsDir) {
37
+ if (!existsSync(pluginSkillsDir))
38
+ return [];
39
+ return readdirSync(pluginSkillsDir, { withFileTypes: true })
40
+ .filter((d) => d.isDirectory())
41
+ .map((d) => d.name);
42
+ }
43
+ function readManifest(opencodeSkillsDir) {
44
+ const manifestPath = path.join(opencodeSkillsDir, MANIFEST_NAME);
45
+ if (!existsSync(manifestPath))
46
+ return null;
47
+ try {
48
+ const raw = readFileSync(manifestPath, "utf-8");
49
+ const parsed = JSON.parse(raw);
50
+ if (typeof parsed.version === "string" && Array.isArray(parsed.skills)) {
51
+ return parsed;
52
+ }
53
+ }
54
+ catch {
55
+ // Corrupted manifest — treat as missing
56
+ }
57
+ return null;
58
+ }
59
+ function writeManifest(opencodeSkillsDir, manifest) {
60
+ mkdirSync(opencodeSkillsDir, { recursive: true });
61
+ writeFileSync(path.join(opencodeSkillsDir, MANIFEST_NAME), JSON.stringify(manifest, null, 2) + "\n");
62
+ }
63
+ function writeSkillMarker(skillDir) {
64
+ writeFileSync(path.join(skillDir, MARKER_NAME), JSON.stringify({ version: PLUGIN_VERSION }) + "\n");
65
+ }
66
+ function hasMarker(skillDir) {
67
+ return existsSync(path.join(skillDir, MARKER_NAME));
68
+ }
69
+ export function syncPluginSkills(workspaceDir) {
70
+ const pluginSkillsDir = getPluginSkillsDir();
71
+ const opencodeSkillsDir = path.join(workspaceDir, ".opencode", "skills");
72
+ const manifest = readManifest(opencodeSkillsDir);
73
+ const currentSkillNames = getPluginSkillNames(pluginSkillsDir);
74
+ if (manifest && manifest.version === PLUGIN_VERSION) {
75
+ // Already up to date — check for newly-added skills only
76
+ let updated = false;
77
+ for (const name of currentSkillNames) {
78
+ if (!manifest.skills.includes(name)) {
79
+ const src = path.join(pluginSkillsDir, name);
80
+ const dest = path.join(opencodeSkillsDir, name);
81
+ cpSync(src, dest, { recursive: true });
82
+ writeSkillMarker(dest);
83
+ manifest.skills.push(name);
84
+ updated = true;
85
+ }
86
+ }
87
+ if (updated) {
88
+ writeManifest(opencodeSkillsDir, manifest);
89
+ }
90
+ return;
91
+ }
92
+ // Version mismatch or no manifest — full re-sync
93
+ // Delete old managed skill directories
94
+ if (manifest) {
95
+ for (const name of manifest.skills) {
96
+ const skillDir = path.join(opencodeSkillsDir, name);
97
+ if (hasMarker(skillDir)) {
98
+ rmSync(skillDir, { recursive: true, force: true });
99
+ }
100
+ }
101
+ }
102
+ // Copy current plugin skills
103
+ const newSkillNames = [];
104
+ for (const name of currentSkillNames) {
105
+ const src = path.join(pluginSkillsDir, name);
106
+ const dest = path.join(opencodeSkillsDir, name);
107
+ mkdirSync(opencodeSkillsDir, { recursive: true });
108
+ cpSync(src, dest, { recursive: true });
109
+ writeSkillMarker(dest);
110
+ newSkillNames.push(name);
111
+ }
112
+ writeManifest(opencodeSkillsDir, { version: PLUGIN_VERSION, skills: newSkillNames });
113
+ }
@@ -20,8 +20,8 @@ export declare function createAllowlist(tools: string[]): ToolRestrictions;
20
20
  * Agent tool restrictions map.
21
21
  *
22
22
  * - bioresearcher: denylist for context7*, web*
23
- * - bioresearcherDR: allowlist for bioresearcher-skill, table*, and core file tools (orchestrator - no biomcp)
24
- * - bioresearcherDR_worker: allowlist for bioresearcher-skill, biomcp*, table*, and core file tools (data gatherer)
23
+ * - bioresearcherDR: allowlist for skill, table*, db*, json*, parse_*, and core file tools (orchestrator - no biomcp)
24
+ * - bioresearcherDR_worker: allowlist for skill, biomcp*, table*, db*, json*, parse_*, and core file tools (data gatherer)
25
25
  */
26
26
  export declare const AGENT_TOOL_RESTRICTIONS: Record<string, ToolRestrictions>;
27
27
  /**
@@ -27,14 +27,17 @@ export function createAllowlist(tools) {
27
27
  * Agent tool restrictions map.
28
28
  *
29
29
  * - bioresearcher: denylist for context7*, web*
30
- * - bioresearcherDR: allowlist for bioresearcher-skill, table*, and core file tools (orchestrator - no biomcp)
31
- * - bioresearcherDR_worker: allowlist for bioresearcher-skill, biomcp*, table*, and core file tools (data gatherer)
30
+ * - bioresearcherDR: allowlist for skill, table*, db*, json*, parse_*, and core file tools (orchestrator - no biomcp)
31
+ * - bioresearcherDR_worker: allowlist for skill, biomcp*, table*, db*, json*, parse_*, and core file tools (data gatherer)
32
32
  */
33
33
  export const AGENT_TOOL_RESTRICTIONS = {
34
34
  bioresearcher: createDenylist(["context7*", "web*"]),
35
35
  bioresearcherDR: createAllowlist([
36
- "bioresearcher-skill",
36
+ "skill",
37
37
  "table*",
38
+ "db*",
39
+ "json*",
40
+ "parse_*",
38
41
  "calculator",
39
42
  "blockingTimer",
40
43
  "glob",
@@ -47,9 +50,12 @@ export const AGENT_TOOL_RESTRICTIONS = {
47
50
  "task"
48
51
  ]),
49
52
  bioresearcherDR_worker: createAllowlist([
50
- "bioresearcher-skill",
53
+ "skill",
51
54
  "biomcp*",
52
55
  "table*",
56
+ "db*",
57
+ "json*",
58
+ "parse_*",
53
59
  "calculator",
54
60
  "blockingTimer",
55
61
  "glob",
@@ -21,7 +21,7 @@ No installation required. This skill uses only:
21
21
  ### Load the Skill
22
22
 
23
23
  ```
24
- bioresearcher-skill bioresearcher-core
24
+ skill bioresearcher-core
25
25
  ```
26
26
 
27
27
  ### Extract Skill Path
@@ -59,7 +59,7 @@ The bioresearcherDR_worker subagent uses only shared patterns (citations, rate-l
59
59
 
60
60
  ### Step 1: Load This Skill
61
61
  ```
62
- bioresearcher-skill bioresearcher-core
62
+ skill bioresearcher-core
63
63
  ```
64
64
 
65
65
  ### Step 2: Extract Skill Path
@@ -155,7 +155,7 @@ Agent actions:
155
155
  ### Skill Loading
156
156
 
157
157
  ```markdown
158
- bioresearcher-skill long-table-summary
158
+ skill long-table-summary
159
159
  ```
160
160
 
161
161
  ### 16-Step Workflow Overview
@@ -408,7 +408,7 @@ if row_count < 30:
408
408
 
409
409
  # Medium datasets (30-1000 rows): Use long-table-summary skill
410
410
  elif row_count < 1000:
411
- bioresearcher-skill long-table-summary
411
+ skill long-table-summary
412
412
  # Follow 16-step workflow
413
413
 
414
414
  # Large datasets (> 1000 rows): Use Python
@@ -5,7 +5,7 @@ Comprehensive test suite for the bioresearcher plugin.
5
5
  ## Quick Start
6
6
 
7
7
  ```
8
- bioresearcher-skill bioresearcher-tests
8
+ skill bioresearcher-tests
9
9
  ```
10
10
 
11
11
  Then follow the workflow steps in SKILL.md.
@@ -25,7 +25,7 @@ allowedTools:
25
25
  - calculator
26
26
  - parse_pubmed_articleSet
27
27
  - parse_obo_file
28
- - bioresearcher-skill
28
+ - skill
29
29
  - Question
30
30
  ---
31
31
 
@@ -1,7 +1,7 @@
1
1
  # Skill Tests
2
2
 
3
3
  ## Test: Load demo-skill
4
- - Tool: bioresearcher-skill
4
+ - Tool: skill
5
5
  - Input:
6
6
  ```json
7
7
  {"name": "demo-skill"}
@@ -12,7 +12,7 @@
12
12
  - Expected: Skill content loaded successfully
13
13
 
14
14
  ## Test: Load bioresearcher-core
15
- - Tool: bioresearcher-skill
15
+ - Tool: skill
16
16
  - Input:
17
17
  ```json
18
18
  {"name": "bioresearcher-core"}
@@ -35,7 +35,7 @@
35
35
  - Expected: Demo script executes successfully
36
36
 
37
37
  ## Test: Skill Not Found
38
- - Tool: bioresearcher-skill
38
+ - Tool: skill
39
39
  - Input:
40
40
  ```json
41
41
  {"name": "nonexistent-skill-xyz-12345"}
@@ -13,7 +13,7 @@ This skill provides reusable guides for common GROMACS molecular dynamics workfl
13
13
  ## Quick Start
14
14
 
15
15
  ### Step 1: Load This Skill
16
- The skill is loaded automatically when agent calls `bioresearcher-skill gromacs-guides`.
16
+ The skill is loaded automatically when agent calls `skill gromacs-guides`.
17
17
 
18
18
  ### Step 2: Extract Skill Path
19
19
  From the `<skill_files>` section in the skill tool output, extract the `<skill_path>` value.
@@ -29,7 +29,8 @@ export const dbQuery = tool({
29
29
  sql: z.string().describe('SELECT SQL query to execute. Use named placeholders like :name for parameters.'),
30
30
  params: z.record(z.string(), z.any()).optional().describe('Named parameters as key-value pairs (e.g., { "status": "active", "limit": 10 })'),
31
31
  },
32
- execute: async (args, _context) => {
32
+ execute: async (args, context) => {
33
+ await context.ask({ permission: "dbQuery", patterns: ["*"], always: ["*"], metadata: {} });
33
34
  try {
34
35
  const backend = await getOrCreateBackend();
35
36
  const validation = validateReadOnlyQuery(args.sql, backend.type);
@@ -59,7 +60,8 @@ export const dbListTables = tool({
59
60
 
60
61
  **Usage:** Call this first to discover what tables are available before using dbDescribeTable or dbQuery.`,
61
62
  args: {},
62
- execute: async (_args, _context) => {
63
+ execute: async (args, context) => {
64
+ await context.ask({ permission: "dbListTables", patterns: ["*"], always: ["*"], metadata: {} });
63
65
  try {
64
66
  const backend = await getOrCreateBackend();
65
67
  const collections = await backend.listCollections();
@@ -90,7 +92,8 @@ export const dbDescribeTable = tool({
90
92
  args: {
91
93
  table_name: z.string().describe('Name of the table/collection to describe. Use dbListTables to see available tables.'),
92
94
  },
93
- execute: async (args, _context) => {
95
+ execute: async (args, context) => {
96
+ await context.ask({ permission: "dbDescribeTable", patterns: ["*"], always: ["*"], metadata: {} });
94
97
  try {
95
98
  const backend = await getOrCreateBackend();
96
99
  const validation = validateCollectionName(args.table_name, backend.type);
@@ -105,6 +105,7 @@ export const calculator = tool({
105
105
  .describe("Decimal places for result (0-15, default 3)")
106
106
  },
107
107
  execute: async (args, context) => {
108
+ await context.ask({ permission: "calculator", patterns: ["*"], always: ["*"], metadata: {} });
108
109
  const validation = validateFormula(args.formula);
109
110
  if (!validation.valid) {
110
111
  return validation.error;
@@ -253,6 +253,7 @@ export const jsonExtract = tool({
253
253
  return_all: z.boolean().default(false).describe("If true, returns all JSON objects found as array; if false (default), returns first only")
254
254
  },
255
255
  execute: async (args, context) => {
256
+ await context.ask({ permission: "jsonExtract", patterns: [args.file_path], always: ["*"], metadata: {} });
256
257
  try {
257
258
  const resolvedPath = path.isAbsolute(args.file_path)
258
259
  ? args.file_path
@@ -145,6 +145,7 @@ export const jsonInfer = tool({
145
145
  strict: z.boolean().default(false).describe("If true, all fields are required; if false (default), fields are optional")
146
146
  },
147
147
  execute: async (args, context) => {
148
+ await context.ask({ permission: "jsonInfer", patterns: ["*"], always: ["*"], metadata: {} });
148
149
  let parsedData;
149
150
  try {
150
151
  parsedData = JSON.parse(args.data);
@@ -200,6 +200,7 @@ export const jsonValidate = tool({
200
200
  schema: z.string().describe("JSON Schema string or file path (auto-reads from file if path detected)")
201
201
  },
202
202
  execute: async (args, context) => {
203
+ await context.ask({ permission: "jsonValidate", patterns: ["*"], always: ["*"], metadata: {} });
203
204
  let parsedData;
204
205
  let parsedSchema;
205
206
  let schemaString = args.schema;
@@ -8,6 +8,7 @@ export const blockingTimer = tool({
8
8
  .describe("Delay time in seconds (max 300 seconds)")
9
9
  },
10
10
  execute: async (args, context) => {
11
+ await context.ask({ permission: "blockingTimer", patterns: ["*"], always: ["*"], metadata: {} });
11
12
  const MAX_DELAY = 300;
12
13
  if (args.delay > MAX_DELAY) {
13
14
  return 'Blocking timer must NOT exceed 300 seconds. DO YOU REALLY NEED THIS LONG WAIT?';
@@ -165,6 +165,7 @@ export const parse_obo_file = tool({
165
165
  .describe('Enable verbose logging for debugging')
166
166
  },
167
167
  execute: async (args, context) => {
168
+ await context.ask({ permission: "parse_obo_file", patterns: [args.filePath], always: ["*"], metadata: {} });
168
169
  const verbose = args.verbose ?? false;
169
170
  try {
170
171
  const { filePath, outputFileName = 'obo_output.csv', outputDir } = args;
@@ -174,6 +174,7 @@ export const parse_pubmed_articleSet = tool({
174
174
  .describe('Enable verbose logging for debugging')
175
175
  },
176
176
  execute: async (args, context) => {
177
+ await context.ask({ permission: "parse_pubmed_articleSet", patterns: [args.filePath], always: ["*"], metadata: {} });
177
178
  try {
178
179
  const { filePath, outputMode = 'single', outputFileName, outputDir, verbose = false } = args;
179
180
  if (verbose)
@@ -9,6 +9,7 @@ export const tableGetSheetPreview = tool({
9
9
  sheet_name: z.string().optional().describe("Worksheet name (optional, uses first sheet by default)")
10
10
  },
11
11
  execute: async (args, context) => {
12
+ await context.ask({ permission: "tableGetSheetPreview", patterns: [args.file_path], always: ["*"], metadata: {} });
12
13
  try {
13
14
  const resolvedPath = resolvePath(args.file_path, context.directory);
14
15
  const workbook = XLSX.readFile(resolvedPath);
@@ -32,6 +33,7 @@ export const tableListSheets = tool({
32
33
  file_path: z.string().describe("Path to table file (supports .xlsx, .ods, .csv formats)")
33
34
  },
34
35
  execute: async (args, context) => {
36
+ await context.ask({ permission: "tableListSheets", patterns: [args.file_path], always: ["*"], metadata: {} });
35
37
  try {
36
38
  const resolvedPath = resolvePath(args.file_path, context.directory);
37
39
  const workbook = XLSX.readFile(resolvedPath);
@@ -49,6 +51,7 @@ export const tableGetHeaders = tool({
49
51
  sheet_name: z.string().optional().describe("Worksheet name (optional, uses first sheet by default)")
50
52
  },
51
53
  execute: async (args, context) => {
54
+ await context.ask({ permission: "tableGetHeaders", patterns: [args.file_path], always: ["*"], metadata: {} });
52
55
  try {
53
56
  const resolvedPath = resolvePath(args.file_path, context.directory);
54
57
  const workbook = XLSX.readFile(resolvedPath);
@@ -74,6 +77,7 @@ export const tableGetCell = tool({
74
77
  cell_address: z.string().describe("Cell address (e.g., 'A1', 'B5')")
75
78
  },
76
79
  execute: async (args, context) => {
80
+ await context.ask({ permission: "tableGetCell", patterns: [args.file_path], always: ["*"], metadata: {} });
77
81
  try {
78
82
  const resolvedPath = resolvePath(args.file_path, context.directory);
79
83
  const workbook = XLSX.readFile(resolvedPath);
@@ -103,6 +107,7 @@ export const tableFilterRows = tool({
103
107
  max_results: z.number().default(100).describe("Maximum number of results to return")
104
108
  },
105
109
  execute: async (args, context) => {
110
+ await context.ask({ permission: "tableFilterRows", patterns: [args.file_path], always: ["*"], metadata: {} });
106
111
  try {
107
112
  const resolvedPath = resolvePath(args.file_path, context.directory);
108
113
  const workbook = XLSX.readFile(resolvedPath);
@@ -150,6 +155,7 @@ export const tableSearch = tool({
150
155
  max_results: z.number().default(50).describe("Maximum number of results to return")
151
156
  },
152
157
  execute: async (args, context) => {
158
+ await context.ask({ permission: "tableSearch", patterns: [args.file_path], always: ["*"], metadata: {} });
153
159
  try {
154
160
  const resolvedPath = resolvePath(args.file_path, context.directory);
155
161
  const workbook = XLSX.readFile(resolvedPath);
@@ -192,6 +198,7 @@ export const tableGetRange = tool({
192
198
  range: z.string().describe("Cell range (e.g., 'A1:C10', 'A1:B20')")
193
199
  },
194
200
  execute: async (args, context) => {
201
+ await context.ask({ permission: "tableGetRange", patterns: [args.file_path], always: ["*"], metadata: {} });
195
202
  try {
196
203
  const resolvedPath = resolvePath(args.file_path, context.directory);
197
204
  const workbook = XLSX.readFile(resolvedPath);
@@ -228,6 +235,7 @@ export const tableSummarize = tool({
228
235
  columns: z.array(z.string()).optional().describe("Specific columns to summarize (empty = all numeric columns)")
229
236
  },
230
237
  execute: async (args, context) => {
238
+ await context.ask({ permission: "tableSummarize", patterns: [args.file_path], always: ["*"], metadata: {} });
231
239
  try {
232
240
  const resolvedPath = resolvePath(args.file_path, context.directory);
233
241
  const workbook = XLSX.readFile(resolvedPath);
@@ -268,6 +276,7 @@ export const tableGroupBy = tool({
268
276
  agg_type: z.enum(['sum', 'count', 'avg', 'min', 'max']).default('sum').describe("Aggregation type")
269
277
  },
270
278
  execute: async (args, context) => {
279
+ await context.ask({ permission: "tableGroupBy", patterns: [args.file_path], always: ["*"], metadata: {} });
271
280
  try {
272
281
  const resolvedPath = resolvePath(args.file_path, context.directory);
273
282
  const workbook = XLSX.readFile(resolvedPath);
@@ -331,6 +340,7 @@ export const tablePivotSummary = tool({
331
340
  agg: z.enum(['sum', 'count', 'avg', 'min', 'max']).default('sum').describe("Aggregation type")
332
341
  },
333
342
  execute: async (args, context) => {
343
+ await context.ask({ permission: "tablePivotSummary", patterns: [args.file_path], always: ["*"], metadata: {} });
334
344
  try {
335
345
  const resolvedPath = resolvePath(args.file_path, context.directory);
336
346
  const workbook = XLSX.readFile(resolvedPath);
@@ -411,6 +421,7 @@ export const tableAppendRows = tool({
411
421
  rows: z.array(z.union([z.array(z.any()), z.record(z.string(), z.any())])).describe("Rows to append (array of arrays or array of objects)")
412
422
  },
413
423
  execute: async (args, context) => {
424
+ await context.ask({ permission: "tableAppendRows", patterns: [args.file_path], always: ["*"], metadata: {} });
414
425
  try {
415
426
  const resolvedPath = resolvePath(args.file_path, context.directory);
416
427
  const workbook = XLSX.readFile(resolvedPath);
@@ -452,6 +463,7 @@ export const tableUpdateCell = tool({
452
463
  value: z.any().describe("Value to set")
453
464
  },
454
465
  execute: async (args, context) => {
466
+ await context.ask({ permission: "tableUpdateCell", patterns: [args.file_path], always: ["*"], metadata: {} });
455
467
  try {
456
468
  const resolvedPath = resolvePath(args.file_path, context.directory);
457
469
  const workbook = XLSX.readFile(resolvedPath);
@@ -516,6 +528,7 @@ export const tableCreateFile = tool({
516
528
  data: z.any().describe("Data to write (array of arrays or array of objects)")
517
529
  },
518
530
  execute: async (args, context) => {
531
+ await context.ask({ permission: "tableCreateFile", patterns: [args.file_path], always: ["*"], metadata: {} });
519
532
  try {
520
533
  const resolvedPath = resolvePath(args.file_path, context.directory);
521
534
  const workbook = XLSX.utils.book_new();
@@ -0,0 +1 @@
1
+ export declare const PLUGIN_VERSION = "1.6.3";
@@ -0,0 +1 @@
1
+ export const PLUGIN_VERSION = "1.6.3";
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@yeyuan98/opencode-bioresearcher-plugin",
3
- "version": "1.6.0",
3
+ "version": "1.6.3",
4
4
  "description": "OpenCode plugin that adds a bioresearcher agent",
5
5
  "main": "dist/index.js",
6
6
  "types": "dist/index.d.ts",
@@ -12,6 +12,7 @@
12
12
  }
13
13
  },
14
14
  "scripts": {
15
+ "prebuild": "node -e \"const p=require('./package.json');require('fs').writeFileSync('src/version.ts','export const PLUGIN_VERSION = \\\"'+p.version+'\\\"\\n')\"",
15
16
  "build": "tsc && node -e \"const fs=require('fs');fs.rmSync('dist/skills',{recursive:true,force:true});fs.cpSync('skills','dist/skills',{recursive:true})\"",
16
17
  "typecheck": "tsc --noEmit"
17
18
  },
@@ -31,7 +32,6 @@
31
32
  ],
32
33
  "dependencies": {
33
34
  "@opencode-ai/plugin": "^1.14.0",
34
- "effect": "4.0.0-beta.48",
35
35
  "fast-xml-parser": "^5.3.5",
36
36
  "mongodb": "^7.1.0",
37
37
  "mysql2": "^3.18.2",