oh-my-opencode 3.17.1 → 3.17.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -30,4 +30,5 @@ export declare function detectedToInitialValues(detected: DetectedConfig): {
30
30
  zaiCodingPlan: BooleanArg;
31
31
  kimiForCoding: BooleanArg;
32
32
  opencodeGo: BooleanArg;
33
+ vercelAiGateway: BooleanArg;
33
34
  };
@@ -10,6 +10,7 @@ export interface ProviderAvailability {
10
10
  zai: boolean;
11
11
  kimiForCoding: boolean;
12
12
  opencodeGo: boolean;
13
+ vercelAiGateway: boolean;
13
14
  isMaxPlan: boolean;
14
15
  }
15
16
  export interface AgentConfig {
@@ -1 +1 @@
1
- export { transformModelForProvider } from "../shared/provider-model-id-transform";
1
+ export declare function transformModelForProvider(provider: string, model: string): string;
@@ -10,6 +10,7 @@ export interface InstallArgs {
10
10
  zaiCodingPlan?: BooleanArg;
11
11
  kimiForCoding?: BooleanArg;
12
12
  opencodeGo?: BooleanArg;
13
+ vercelAiGateway?: BooleanArg;
13
14
  skipAuth?: boolean;
14
15
  }
15
16
  export interface InstallConfig {
@@ -22,6 +23,7 @@ export interface InstallConfig {
22
23
  hasZaiCodingPlan: boolean;
23
24
  hasKimiForCoding: boolean;
24
25
  hasOpencodeGo: boolean;
26
+ hasVercelAiGateway: boolean;
25
27
  }
26
28
  export interface ConfigMergeResult {
27
29
  success: boolean;
@@ -40,4 +42,5 @@ export interface DetectedConfig {
40
42
  hasZaiCodingPlan: boolean;
41
43
  hasKimiForCoding: boolean;
42
44
  hasOpencodeGo: boolean;
45
+ hasVercelAiGateway: boolean;
43
46
  }
@@ -1,3 +1,3 @@
1
1
  import type { OpencodeClient } from "./opencode-client";
2
2
  export declare const MIN_SESSION_GONE_POLLS = 3;
3
- export declare function verifySessionExists(client: OpencodeClient, sessionID: string): Promise<boolean>;
3
+ export declare function verifySessionExists(client: OpencodeClient, sessionID: string, directory?: string): Promise<boolean>;
@@ -9,7 +9,7 @@ export interface SubagentSpawnContext {
9
9
  }
10
10
  export declare function getMaxSubagentDepth(config?: BackgroundTaskConfig): number;
11
11
  export declare function getMaxRootSessionSpawnBudget(config?: BackgroundTaskConfig): number;
12
- export declare function resolveSubagentSpawnContext(client: OpencodeClient, parentSessionID: string): Promise<SubagentSpawnContext>;
12
+ export declare function resolveSubagentSpawnContext(client: OpencodeClient, parentSessionID: string, directory?: string): Promise<SubagentSpawnContext>;
13
13
  export declare function createSubagentDepthLimitError(input: {
14
14
  childDepth: number;
15
15
  maxDepth: number;
@@ -14,6 +14,7 @@ export type SessionStatusMap = Record<string, {
14
14
  export declare function checkAndInterruptStaleTasks(args: {
15
15
  tasks: Iterable<BackgroundTask>;
16
16
  client: OpencodeClient;
17
+ directory?: string;
17
18
  config: BackgroundTaskConfig | undefined;
18
19
  concurrencyManager: ConcurrencyManager;
19
20
  notifyParentSession: (task: BackgroundTask) => Promise<void>;
@@ -1,6 +1,6 @@
1
1
  export declare const DIRECT_WORK_REMINDER: string;
2
2
  export declare const BOULDER_CONTINUATION_PROMPT: string;
3
- export declare const VERIFICATION_REMINDER = "**THE SUBAGENT JUST CLAIMED THIS TASK IS DONE. THEY ARE PROBABLY LYING.**\n\nSubagents say \"done\" when code has errors, tests pass trivially, logic is wrong,\nor they quietly added features nobody asked for. This happens EVERY TIME.\nAssume the work is broken until YOU prove otherwise.\n\n---\n\n**PHASE 1: READ THE CODE FIRST (before running anything)**\n\nDo NOT run tests yet. Read the code FIRST so you know what you're testing.\n\n1. `Bash(\"git diff --stat\")` - see exactly which files changed. Any file outside expected scope = scope creep.\n2. `Read` EVERY changed file - no exceptions, no skimming.\n3. For EACH file, critically ask:\n - Does this code ACTUALLY do what the task required? (Re-read the task, compare line by line)\n - Any stubs, TODOs, placeholders, hardcoded values? (`Grep` for TODO, FIXME, HACK, xxx)\n - Logic errors? Trace the happy path AND the error path in your head.\n - Anti-patterns? (`Grep` for `as any`, `@ts-ignore`, empty catch, console.log in changed files)\n - Scope creep? Did the subagent touch things or add features NOT in the task spec?\n4. Cross-check every claim:\n - Said \"Updated X\" - READ X. Actually updated, or just superficially touched?\n - Said \"Added tests\" - READ the tests. Do they test REAL behavior or just `expect(true).toBe(true)`?\n - Said \"Follows patterns\" - OPEN a reference file. Does it ACTUALLY match?\n\n**If you cannot explain what every changed line does, you have NOT reviewed it.**\n\n**PHASE 2: RUN AUTOMATED CHECKS (targeted, then broad)**\n\nNow that you understand the code, verify mechanically:\n1. `lsp_diagnostics` on EACH changed file - ZERO new errors\n2. Run tests for changed modules FIRST, then full suite\n3. Build/typecheck - exit 0\n\nIf Phase 1 found issues but Phase 2 passes: Phase 2 is WRONG. The code has bugs that tests don't cover. Fix the code.\n\n**PHASE 3: HANDS-ON QA - ACTUALLY RUN IT (MANDATORY for user-facing changes)**\n\nTests and linters CANNOT catch: visual bugs, wrong CLI output, broken user flows, API response shape issues.\n\n**If this task produced anything a user would SEE or INTERACT with, you MUST launch it and verify yourself.**\n\n- **Frontend/UI**: `/playwright` skill - load the page, click through the flow, check console. Verify: page loads, interactions work, console clean, responsive.\n- **TUI/CLI**: `interactive_bash` - run the command, try good input, try bad input, try --help. Verify: command runs, output correct, error messages helpful, edge inputs handled.\n- **API/Backend**: `Bash` with curl - hit the endpoint, check response body, send malformed input. Verify: returns 200, body correct, error cases return proper errors.\n- **Config/Build**: Actually start the service or import the config. Verify: loads without error, backward compatible.\n\nThis is NOT optional \"if applicable\". If the deliverable is user-facing and you did not run it, you are shipping untested work.\n\n**PHASE 4: GATE DECISION - Should you proceed to the next task?**\n\nAnswer honestly:\n1. Can I explain what EVERY changed line does? (If no - back to Phase 1)\n2. Did I SEE it work with my own eyes? (If user-facing and no - back to Phase 3)\n3. Am I confident nothing existing is broken? (If no - run broader tests)\n\nALL three must be YES. \"Probably\" = NO. \"I think so\" = NO. Investigate until CERTAIN.\n\n- **All 3 YES** - Proceed: mark task complete, move to next.\n- **Any NO** - Reject: resume session with `session_id`, fix the specific issue.\n- **Unsure** - Reject: \"unsure\" = \"no\". Investigate until you have a definitive answer.\n\n**DO NOT proceed to the next task until all 4 phases are complete and the gate passes.**";
4
- export declare const VERIFICATION_REMINDER_GEMINI = "**THE SUBAGENT HAS FINISHED. THEIR WORK IS EXTREMELY SUSPICIOUS.**\n\nThe subagent CLAIMS this task is done. Based on thousands of executions, subagent claims are FALSE more often than true.\nThey ROUTINELY:\n- Ship code with syntax errors they didn't bother to check\n- Create stub implementations with TODOs and call it \"done\"\n- Write tests that pass trivially (testing nothing meaningful)\n- Implement logic that does NOT match what was requested\n- Add features nobody asked for and call it \"improvement\"\n- Report \"all tests pass\" when they didn't run any tests\n\n**This is NOT a theoretical warning. This WILL happen on this task. Assume the work is BROKEN.**\n\n**YOU MUST VERIFY WITH ACTUAL TOOL CALLS. NOT REASONING. TOOL CALLS.**\nThinking \"it looks correct\" is NOT verification. Running `lsp_diagnostics` IS.\n\n---\n\n**PHASE 1: READ THE CODE FIRST (DO NOT SKIP - DO NOT RUN TESTS YET)**\n\nRead the code FIRST so you know what you're testing.\n\n1. `Bash(\"git diff --stat\")` - see exactly which files changed.\n2. `Read` EVERY changed file - no exceptions, no skimming.\n3. For EACH file:\n - Does this code ACTUALLY do what the task required? RE-READ the task spec.\n - Any stubs, TODOs, placeholders? `Grep` for TODO, FIXME, HACK, xxx\n - Anti-patterns? `Grep` for `as any`, `@ts-ignore`, empty catch\n - Scope creep? Did the subagent add things NOT in the task spec?\n4. Cross-check EVERY claim against actual code.\n\n**If you cannot explain what every changed line does, GO BACK AND READ AGAIN.**\n\n**PHASE 2: RUN AUTOMATED CHECKS**\n\n1. `lsp_diagnostics` on EACH changed file - ZERO new errors. ACTUALLY RUN THIS.\n2. Run tests for changed modules, then full suite. ACTUALLY RUN THESE.\n3. Build/typecheck - exit 0.\n\nIf Phase 1 found issues but Phase 2 passes: Phase 2 is WRONG. Fix the code.\n\n**PHASE 3: HANDS-ON QA (MANDATORY for user-facing changes)**\n\n- **Frontend/UI**: `/playwright`\n- **TUI/CLI**: `interactive_bash`\n- **API/Backend**: `Bash` with curl\n\n**If user-facing and you did not run it, you are shipping UNTESTED BROKEN work.**\n\n**PHASE 4: GATE DECISION**\n\n1. Can I explain what EVERY changed line does? (If no \u2192 Phase 1)\n2. Did I SEE it work via tool calls? (If user-facing and no \u2192 Phase 3)\n3. Am I confident nothing is broken? (If no \u2192 broader tests)\n\nALL three must be YES. \"Probably\" = NO. \"I think so\" = NO.\n\n**DO NOT proceed to the next task until all 4 phases are complete.**";
3
+ export declare const VERIFICATION_REMINDER = "**THE SUBAGENT JUST CLAIMED THIS TASK IS DONE. THEY ARE PROBABLY LYING.**\n\nSubagents say \"done\" when code has errors, tests pass trivially, logic is wrong,\nor they quietly added features nobody asked for. This happens EVERY TIME.\nAssume the work is broken until YOU prove otherwise.\n\n---\n\n**PHASE 1: READ THE CODE FIRST (before running anything)**\n\nDo NOT run tests yet. Read the code FIRST so you know what you're testing.\n\n1. `Bash(\"git diff --stat -- ':!node_modules'\")` - see exactly which files changed. Any file outside expected scope = scope creep.\n2. `Read` EVERY changed file - no exceptions, no skimming.\n3. For EACH file, critically ask:\n - Does this code ACTUALLY do what the task required? (Re-read the task, compare line by line)\n - Any stubs, TODOs, placeholders, hardcoded values? (`Grep` for TODO, FIXME, HACK, xxx)\n - Logic errors? Trace the happy path AND the error path in your head.\n - Anti-patterns? (`Grep` for `as any`, `@ts-ignore`, empty catch, console.log in changed files)\n - Scope creep? Did the subagent touch things or add features NOT in the task spec?\n4. Cross-check every claim:\n - Said \"Updated X\" - READ X. Actually updated, or just superficially touched?\n - Said \"Added tests\" - READ the tests. Do they test REAL behavior or just `expect(true).toBe(true)`?\n - Said \"Follows patterns\" - OPEN a reference file. Does it ACTUALLY match?\n\n**If you cannot explain what every changed line does, you have NOT reviewed it.**\n\n**PHASE 2: RUN AUTOMATED CHECKS (targeted, then broad)**\n\nNow that you understand the code, verify mechanically:\n1. `lsp_diagnostics` on EACH changed file - ZERO new errors\n2. Run tests for changed modules FIRST, then full suite\n3. Build/typecheck - exit 0\n\nIf Phase 1 found issues but Phase 2 passes: Phase 2 is WRONG. The code has bugs that tests don't cover. Fix the code.\n\n**PHASE 3: HANDS-ON QA - ACTUALLY RUN IT (MANDATORY for user-facing changes)**\n\nTests and linters CANNOT catch: visual bugs, wrong CLI output, broken user flows, API response shape issues.\n\n**If this task produced anything a user would SEE or INTERACT with, you MUST launch it and verify yourself.**\n\n- **Frontend/UI**: `/playwright` skill - load the page, click through the flow, check console. Verify: page loads, interactions work, console clean, responsive.\n- **TUI/CLI**: `interactive_bash` - run the command, try good input, try bad input, try --help. Verify: command runs, output correct, error messages helpful, edge inputs handled.\n- **API/Backend**: `Bash` with curl - hit the endpoint, check response body, send malformed input. Verify: returns 200, body correct, error cases return proper errors.\n- **Config/Build**: Actually start the service or import the config. Verify: loads without error, backward compatible.\n\nThis is NOT optional \"if applicable\". If the deliverable is user-facing and you did not run it, you are shipping untested work.\n\n**PHASE 4: GATE DECISION - Should you proceed to the next task?**\n\nAnswer honestly:\n1. Can I explain what EVERY changed line does? (If no - back to Phase 1)\n2. Did I SEE it work with my own eyes? (If user-facing and no - back to Phase 3)\n3. Am I confident nothing existing is broken? (If no - run broader tests)\n\nALL three must be YES. \"Probably\" = NO. \"I think so\" = NO. Investigate until CERTAIN.\n\n- **All 3 YES** - Proceed: mark task complete, move to next.\n- **Any NO** - Reject: resume session with `session_id`, fix the specific issue.\n- **Unsure** - Reject: \"unsure\" = \"no\". Investigate until you have a definitive answer.\n\n**DO NOT proceed to the next task until all 4 phases are complete and the gate passes.**";
4
+ export declare const VERIFICATION_REMINDER_GEMINI = "**THE SUBAGENT HAS FINISHED. THEIR WORK IS EXTREMELY SUSPICIOUS.**\n\nThe subagent CLAIMS this task is done. Based on thousands of executions, subagent claims are FALSE more often than true.\nThey ROUTINELY:\n- Ship code with syntax errors they didn't bother to check\n- Create stub implementations with TODOs and call it \"done\"\n- Write tests that pass trivially (testing nothing meaningful)\n- Implement logic that does NOT match what was requested\n- Add features nobody asked for and call it \"improvement\"\n- Report \"all tests pass\" when they didn't run any tests\n\n**This is NOT a theoretical warning. This WILL happen on this task. Assume the work is BROKEN.**\n\n**YOU MUST VERIFY WITH ACTUAL TOOL CALLS. NOT REASONING. TOOL CALLS.**\nThinking \"it looks correct\" is NOT verification. Running `lsp_diagnostics` IS.\n\n---\n\n**PHASE 1: READ THE CODE FIRST (DO NOT SKIP - DO NOT RUN TESTS YET)**\n\nRead the code FIRST so you know what you're testing.\n\n1. `Bash(\"git diff --stat -- ':!node_modules'\")` - see exactly which files changed.\n2. `Read` EVERY changed file - no exceptions, no skimming.\n3. For EACH file:\n - Does this code ACTUALLY do what the task required? RE-READ the task spec.\n - Any stubs, TODOs, placeholders? `Grep` for TODO, FIXME, HACK, xxx\n - Anti-patterns? `Grep` for `as any`, `@ts-ignore`, empty catch\n - Scope creep? Did the subagent add things NOT in the task spec?\n4. Cross-check EVERY claim against actual code.\n\n**If you cannot explain what every changed line does, GO BACK AND READ AGAIN.**\n\n**PHASE 2: RUN AUTOMATED CHECKS**\n\n1. `lsp_diagnostics` on EACH changed file - ZERO new errors. ACTUALLY RUN THIS.\n2. Run tests for changed modules, then full suite. ACTUALLY RUN THESE.\n3. Build/typecheck - exit 0.\n\nIf Phase 1 found issues but Phase 2 passes: Phase 2 is WRONG. Fix the code.\n\n**PHASE 3: HANDS-ON QA (MANDATORY for user-facing changes)**\n\n- **Frontend/UI**: `/playwright`\n- **TUI/CLI**: `interactive_bash`\n- **API/Backend**: `Bash` with curl\n\n**If user-facing and you did not run it, you are shipping UNTESTED BROKEN work.**\n\n**PHASE 4: GATE DECISION**\n\n1. Can I explain what EVERY changed line does? (If no \u2192 Phase 1)\n2. Did I SEE it work via tool calls? (If user-facing and no \u2192 Phase 3)\n3. Am I confident nothing is broken? (If no \u2192 broader tests)\n\nALL three must be YES. \"Probably\" = NO. \"I think so\" = NO.\n\n**DO NOT proceed to the next task until all 4 phases are complete.**";
5
5
  export declare const ORCHESTRATOR_DELEGATION_REQUIRED: string;
6
6
  export declare const SINGLE_TASK_DIRECTIVE: string;
@@ -9,8 +9,12 @@ interface ToolExecuteOutput {
9
9
  output: string;
10
10
  metadata: unknown;
11
11
  }
12
- interface ToolExecuteBeforeOutput {
13
- args: unknown;
12
+ interface DirectoryAgentsInjectorHook {
13
+ "tool.execute.before"?: (input: ToolExecuteInput, output: {
14
+ args: unknown;
15
+ }) => Promise<void>;
16
+ "tool.execute.after": (input: ToolExecuteInput, output: ToolExecuteOutput) => Promise<void>;
17
+ event: (input: EventInput) => Promise<void>;
14
18
  }
15
19
  interface EventInput {
16
20
  event: {
@@ -20,9 +24,5 @@ interface EventInput {
20
24
  }
21
25
  export declare function createDirectoryAgentsInjectorHook(ctx: PluginInput, modelCacheState?: {
22
26
  anthropicContext1MEnabled: boolean;
23
- }): {
24
- "tool.execute.before": (input: ToolExecuteInput, output: ToolExecuteBeforeOutput) => Promise<void>;
25
- "tool.execute.after": (input: ToolExecuteInput, output: ToolExecuteOutput) => Promise<void>;
26
- event: ({ event }: EventInput) => Promise<void>;
27
- };
27
+ }): DirectoryAgentsInjectorHook;
28
28
  export {};
@@ -9,8 +9,12 @@ interface ToolExecuteOutput {
9
9
  output: string;
10
10
  metadata: unknown;
11
11
  }
12
- interface ToolExecuteBeforeOutput {
13
- args: unknown;
12
+ interface DirectoryReadmeInjectorHook {
13
+ "tool.execute.before"?: (input: ToolExecuteInput, output: {
14
+ args: unknown;
15
+ }) => Promise<void>;
16
+ "tool.execute.after": (input: ToolExecuteInput, output: ToolExecuteOutput) => Promise<void>;
17
+ event: (input: EventInput) => Promise<void>;
14
18
  }
15
19
  interface EventInput {
16
20
  event: {
@@ -20,9 +24,5 @@ interface EventInput {
20
24
  }
21
25
  export declare function createDirectoryReadmeInjectorHook(ctx: PluginInput, modelCacheState?: {
22
26
  anthropicContext1MEnabled: boolean;
23
- }): {
24
- "tool.execute.before": (input: ToolExecuteInput, output: ToolExecuteBeforeOutput) => Promise<void>;
25
- "tool.execute.after": (input: ToolExecuteInput, output: ToolExecuteOutput) => Promise<void>;
26
- event: ({ event }: EventInput) => Promise<void>;
27
- };
27
+ }): DirectoryReadmeInjectorHook;
28
28
  export {};
@@ -3,4 +3,12 @@ export declare function getNextReachableFallback(sessionID: string, state: Model
3
3
  providerID: string;
4
4
  modelID: string;
5
5
  variant?: string;
6
+ reasoningEffort?: string;
7
+ temperature?: number;
8
+ top_p?: number;
9
+ maxTokens?: number;
10
+ thinking?: {
11
+ type: "enabled" | "disabled";
12
+ budgetTokens?: number;
13
+ };
6
14
  } | null;