@moikapy/origen 0.3.1 → 0.4.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,204 @@
1
+ import { ModelId } from './models.js';
2
+ import { z } from 'zod';
3
+ import { Model, Api, Message, Context } from '@mariozechner/pi-ai';
4
+ import { AgentTool, AgentEvent } from '@mariozechner/pi-agent-core';
5
+
6
+ /**
7
+ * Origen types — no runtime deps, safe for client + server.
8
+ */
9
+ /** D1-compatible database interface for tool execution */
10
+ interface D1Like {
11
+ prepare(sql: string): {
12
+ bind(...params: unknown[]): {
13
+ all(): Promise<{
14
+ results?: Record<string, unknown>[];
15
+ }>;
16
+ first(): Promise<Record<string, unknown> | null>;
17
+ run(): Promise<{
18
+ meta?: {
19
+ changes: number;
20
+ last_row_id: number;
21
+ };
22
+ }>;
23
+ };
24
+ };
25
+ }
26
+ /** Function that provides a D1 instance to tool executors */
27
+ type D1Provider = () => Promise<D1Like>;
28
+ /** Chat context passed from the UI (what the user is reading) */
29
+ interface ReadingContext {
30
+ translation: string;
31
+ bookCode: string;
32
+ chapter: number;
33
+ selectedVerses?: number[];
34
+ }
35
+ interface Citation {
36
+ book: string;
37
+ chapter: number;
38
+ verse: number;
39
+ }
40
+ interface UsageInfo {
41
+ promptTokens?: number;
42
+ completionTokens?: number;
43
+ totalCost?: number;
44
+ }
45
+ /** Model configuration entry */
46
+ interface ModelConfig {
47
+ name: string;
48
+ description: string;
49
+ free: boolean;
50
+ }
51
+
52
+ /**
53
+ * Origen — Agent Engine (v0.3)
54
+ *
55
+ * Multi-provider agent harness built on pi-ai + pi-agent-core.
56
+ * Supports OpenRouter, Ollama, Anthropic, Google, and any OpenAI-compatible API.
57
+ * Soul.md personas, streaming, parallel tool execution, abort support.
58
+ */
59
+
60
+ /**
61
+ * A tool that the host app registers with Origen.
62
+ * Simple interface: name, description, JSON schema, and an execute function
63
+ * that receives (args, getD1). The adapter wraps this into pi-agent-core's AgentTool.
64
+ */
65
+ interface OrigenTool {
66
+ name: string;
67
+ description: string;
68
+ /** OpenAI function-calling parameter schema (JSON) */
69
+ parameters: Record<string, unknown>;
70
+ /** Zod schema for runtime validation (optional) */
71
+ inputSchema?: z.ZodType;
72
+ execute: (args: Record<string, unknown>, getD1: D1Provider) => Promise<string>;
73
+ }
74
+ interface AgentConfig {
75
+ appName?: string;
76
+ systemPrompt?: string;
77
+ tools: OrigenTool[];
78
+ getD1: D1Provider;
79
+ model?: ModelId;
80
+ maxSteps?: number;
81
+ /** Custom citation extractor */
82
+ extractCitations?: (text: string) => Citation[];
83
+ /** Dynamic API key resolution per provider (e.g., for expiring OAuth tokens) */
84
+ getApiKey?: (provider: string) => Promise<string | undefined>;
85
+ /** Ollama base URL override (default: http://localhost:11434/v1) */
86
+ ollamaBaseUrl?: string;
87
+ /** Tool execution mode: "parallel" (default) or "sequential" */
88
+ toolExecution?: "sequential" | "parallel";
89
+ /** Abort signal for cancellation */
90
+ signal?: AbortSignal;
91
+ /** Reasoning/thinking level for models that support it */
92
+ thinkingLevel?: "off" | "minimal" | "low" | "medium" | "high";
93
+ }
94
+ interface AuthCheckResult {
95
+ authenticated: boolean;
96
+ apiKey: string | null;
97
+ provider?: string;
98
+ error?: string;
99
+ }
100
+ /**
101
+ * Provider-aware auth check. Tests key availability for each provider.
102
+ * If no provider argument, checks OpenRouter + Ollama availability.
103
+ */
104
+ declare function checkAuth(getApiKey: ((provider: string) => Promise<string | undefined>) | (() => Promise<string | null>)): Promise<AuthCheckResult>;
105
+ /** Convenience: check OpenRouter auth only (backward compat). */
106
+ declare function checkOpenRouterAuth(getApiKey: () => Promise<string | null>): Promise<AuthCheckResult>;
107
+ type StreamEvent = {
108
+ type: "reasoning";
109
+ content: string;
110
+ } | {
111
+ type: "tool_call";
112
+ name: string;
113
+ args: Record<string, unknown>;
114
+ } | {
115
+ type: "tool_result";
116
+ name: string;
117
+ result: string;
118
+ } | {
119
+ type: "text";
120
+ content: string;
121
+ } | {
122
+ type: "done";
123
+ message: string;
124
+ citations: Citation[];
125
+ usage?: UsageInfo;
126
+ } | {
127
+ type: "error";
128
+ message: string;
129
+ };
130
+ declare function streamOrigen(messages: Array<{
131
+ role: "user" | "assistant";
132
+ content: string;
133
+ }>, context: Record<string, unknown> | undefined, config: AgentConfig, apiKey?: string): AsyncGenerator<StreamEvent>;
134
+ interface AgentResponse {
135
+ message: string;
136
+ citations: Citation[];
137
+ usage?: UsageInfo;
138
+ }
139
+ declare function callOrigen(messages: Array<{
140
+ role: "user" | "assistant";
141
+ content: string;
142
+ }>, context: Record<string, unknown> | undefined, config: AgentConfig, apiKey?: string): Promise<AgentResponse>;
143
+
144
+ /**
145
+ * Adapter: bridges Origen's simple types to pi-agent-core/pi-ai types.
146
+ *
147
+ * - OrigenTool → AgentTool (injects D1Provider)
148
+ * - pi-ai Model resolution (OpenRouter, Ollama, Anthropic, Google)
149
+ * - StreamEvent translation (AgentEvent → Origen's StreamEvent)
150
+ */
151
+
152
+ /**
153
+ * Convert an OrigenTool into a pi-agent-core AgentTool.
154
+ * The D1Provider is captured in closure so the tool's execute gets it.
155
+ */
156
+ declare function adaptTool(tool: OrigenTool, getD1: D1Provider): AgentTool;
157
+ /** Adapt all OrigenTools for an Agent instance. */
158
+ declare function adaptTools(tools: OrigenTool[], getD1: D1Provider): AgentTool[];
159
+ interface ModelResolutionOptions {
160
+ /** Ollama base URL, e.g. "http://localhost:11434/v1" */
161
+ ollamaBaseUrl?: string;
162
+ }
163
+ /**
164
+ * Resolve a model ID string to a pi-ai Model object.
165
+ * Tries pi-ai's registry first, then falls back to built-in Ollama definitions.
166
+ */
167
+ declare function resolveModel(modelId: string, options?: ModelResolutionOptions): Model<Api>;
168
+ /** Convert Origen's simple messages to pi-ai Message format. */
169
+ declare function convertMessages(messages: Array<{
170
+ role: "user" | "assistant";
171
+ content: string;
172
+ }>): Message[];
173
+ /** Build a pi-ai Context from Origen's config. */
174
+ declare function buildContext(systemPrompt: string, messages: Message[], adaptedTools: AgentTool[]): Context;
175
+ /** Translate a pi-agent-core AgentEvent into an Origen StreamEvent. */
176
+ declare function translateEvent(event: AgentEvent, extractCitations?: (text: string) => Citation[]): StreamEvent | null;
177
+ /**
178
+ * Eagerly subscribe to an Agent and return an async iterable of Origen StreamEvents.
179
+ *
180
+ * CRITICAL: The subscription is created synchronously when this function is called,
181
+ * BEFORE agent.prompt() starts. This avoids the race condition where events
182
+ * emitted during prompt() are missed if subscription happens after.
183
+ *
184
+ * Usage:
185
+ * const { stream, unsubscribe } = createEventStream(agent, extractCitations);
186
+ * agent.prompt(messages); // events flow into stream via active subscription
187
+ * for await (const event of stream) { ... }
188
+ */
189
+ declare function createEventStream(agent: any, // Agent from pi-agent-core
190
+ extractCitations?: (text: string) => Citation[]): {
191
+ stream: AsyncGenerator<StreamEvent>;
192
+ unsubscribe: () => void;
193
+ };
194
+ /**
195
+ * Subscribe to an Agent and yield Origen StreamEvents.
196
+ * Handles the full lifecycle from agent_start to agent_end.
197
+ *
198
+ * @deprecated Use createEventStream() instead to avoid race conditions.
199
+ * This function subscribes lazily (on first iteration) which can miss events
200
+ * if the agent has already started emitting.
201
+ */
202
+ declare function agentToStreamEvents(agent: any, extractCitations?: (text: string) => Citation[]): AsyncGenerator<StreamEvent>;
203
+
204
+ export { type AgentConfig as A, type Citation as C, type D1Like as D, type ModelConfig as M, type ModelResolutionOptions, type OrigenTool as O, type ReadingContext as R, type StreamEvent as S, type UsageInfo as U, type AgentResponse as a, adaptTool, adaptTools, agentToStreamEvents, type AuthCheckResult as b, buildContext, type D1Provider as c, convertMessages, createEventStream, callOrigen as d, checkAuth as e, checkOpenRouterAuth as f, resolveModel, streamOrigen as s, translateEvent };
@@ -0,0 +1,21 @@
1
+ import {
2
+ adaptTool,
3
+ adaptTools,
4
+ agentToStreamEvents,
5
+ buildContext,
6
+ convertMessages,
7
+ createEventStream,
8
+ resolveModel,
9
+ translateEvent
10
+ } from "./chunk-TECUAB3E.js";
11
+ export {
12
+ adaptTool,
13
+ adaptTools,
14
+ agentToStreamEvents,
15
+ buildContext,
16
+ convertMessages,
17
+ createEventStream,
18
+ resolveModel,
19
+ translateEvent
20
+ };
21
+ //# sourceMappingURL=adapter.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"sources":[],"sourcesContent":[],"mappings":"","names":[]}
@@ -0,0 +1,109 @@
1
+ // src/models.ts
2
+ function getModelsForUI() {
3
+ const uiModels = {};
4
+ for (const [id, config] of Object.entries(MODELS)) {
5
+ uiModels[id] = { name: config.name, description: config.description, free: config.free };
6
+ }
7
+ return uiModels;
8
+ }
9
+ function buildModels() {
10
+ const models = {};
11
+ models["openrouter/free"] = {
12
+ name: "Free (Auto)",
13
+ description: "Free \u2014 auto-selects best free model for your request",
14
+ free: true
15
+ };
16
+ models["google/gemma-4-31b-it:free"] = {
17
+ name: "Gemma 4 31B",
18
+ description: "Free \u2014 great quality for Bible study",
19
+ free: true
20
+ };
21
+ models["nvidia/nemotron-3-super-120b-a12b:free"] = {
22
+ name: "Nemotron 3 Super",
23
+ description: "Free \u2014 large model, strong reasoning",
24
+ free: true
25
+ };
26
+ models["deepseek/deepseek-r1:free"] = {
27
+ name: "DeepSeek R1 (Free)",
28
+ description: "Free \u2014 reasoning with thinking support",
29
+ free: true
30
+ };
31
+ models["qwen/qwen3-coder:free"] = {
32
+ name: "Qwen3 Coder",
33
+ description: "Free \u2014 480B parameters, excellent tool use",
34
+ free: true
35
+ };
36
+ models["openrouter/auto"] = {
37
+ name: "Auto (All)",
38
+ description: "Auto-selects best model (requires credits)",
39
+ free: false
40
+ };
41
+ models["anthropic/claude-sonnet-4"] = {
42
+ name: "Claude Sonnet 4",
43
+ description: "Premium \u2014 excellent quality + reasoning (requires credits)",
44
+ free: false
45
+ };
46
+ models["google/gemini-2.5-flash-preview"] = {
47
+ name: "Gemini 2.5 Flash",
48
+ description: "Premium \u2014 fast with thinking (requires credits)",
49
+ free: false
50
+ };
51
+ models["ollama/llama3"] = {
52
+ name: "Llama 3 (Ollama)",
53
+ description: "Local \u2014 Meta's Llama 3, requires Ollama",
54
+ free: true
55
+ };
56
+ models["ollama/gemma3"] = {
57
+ name: "Gemma 3 (Ollama)",
58
+ description: "Local \u2014 Google's Gemma 3, requires Ollama",
59
+ free: true
60
+ };
61
+ models["ollama/mistral"] = {
62
+ name: "Mistral (Ollama)",
63
+ description: "Local \u2014 Mistral's 7B model, requires Ollama",
64
+ free: true
65
+ };
66
+ models["ollama/qwen3"] = {
67
+ name: "Qwen 3 (Ollama)",
68
+ description: "Local \u2014 Alibaba's Qwen 3, requires Ollama",
69
+ free: true
70
+ };
71
+ models["ollama/deepseek-r1"] = {
72
+ name: "DeepSeek R1 (Ollama)",
73
+ description: "Local \u2014 reasoning model, requires Ollama",
74
+ free: true
75
+ };
76
+ return models;
77
+ }
78
+ var MODELS = buildModels();
79
+ var DEFAULT_MODEL_ID = "openrouter/free";
80
+ var DEFAULT_MODEL = DEFAULT_MODEL_ID;
81
+ var THINKING_MODELS = /* @__PURE__ */ new Set([
82
+ "anthropic/claude-sonnet-4",
83
+ "deepseek/deepseek-r1:free",
84
+ "google/gemini-2.5-flash-preview",
85
+ "ollama/deepseek-r1"
86
+ ]);
87
+ function supportsThinking(model) {
88
+ return THINKING_MODELS.has(model);
89
+ }
90
+ function isOllamaModel(model) {
91
+ return model.startsWith("ollama/");
92
+ }
93
+ function getModelsByProvider(provider) {
94
+ return Object.keys(MODELS).filter(
95
+ (id) => id.startsWith(`${provider}/`)
96
+ );
97
+ }
98
+
99
+ export {
100
+ getModelsForUI,
101
+ MODELS,
102
+ DEFAULT_MODEL_ID,
103
+ DEFAULT_MODEL,
104
+ THINKING_MODELS,
105
+ supportsThinking,
106
+ isOllamaModel,
107
+ getModelsByProvider
108
+ };
109
+ //# sourceMappingURL=chunk-ECRY7XDR.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"sources":["../src/models.ts"],"sourcesContent":["/**\n * Origen model configuration.\n *\n * Delegates to pi-ai's model registry for known providers (OpenRouter, Anthropic, Google, etc.)\n * Plus custom entries for Ollama and free-tier aliases.\n */\n\nimport { getModel } from \"@mariozechner/pi-ai\";\nimport type { Model, Api } from \"@mariozechner/pi-ai\";\nexport type { Model as ProviderModel, Api } from \"@mariozechner/pi-ai\";\n\n// ── Model registry ────────────────────────────────────────────────────\n\nexport interface ModelConfig {\n name: string;\n description: string;\n free: boolean;\n}\n\n/** UI-facing model config — safe to send to the client. Strips internal fields. */\nexport type UIModelConfig = ModelConfig;\n\n/** Get models as a simple UI map (name, description, free). No internal fields. */\nexport function getModelsForUI(): Record<string, UIModelConfig> {\n const uiModels: Record<string, UIModelConfig> = {};\n for (const [id, config] of Object.entries(MODELS)) {\n uiModels[id] = { name: config.name, description: config.description, free: config.free };\n }\n return uiModels;\n}\n\n// Build MODELS map from pi-ai registry + custom entries\nfunction buildModels(): Record<string, ModelConfig> {\n const models: Record<string, ModelConfig> = {};\n\n // ── OpenRouter (free tier) ───────────────────────────\n models[\"openrouter/free\"] = {\n name: \"Free (Auto)\",\n description: \"Free — auto-selects best free model for your request\",\n free: true,\n };\n models[\"google/gemma-4-31b-it:free\"] = {\n name: \"Gemma 4 31B\",\n description: \"Free — great quality for Bible study\",\n free: true,\n };\n models[\"nvidia/nemotron-3-super-120b-a12b:free\"] = {\n name: \"Nemotron 3 Super\",\n description: \"Free — large model, strong reasoning\",\n free: true,\n };\n models[\"deepseek/deepseek-r1:free\"] = {\n name: \"DeepSeek R1 (Free)\",\n description: \"Free — reasoning with thinking support\",\n free: true,\n };\n\n models[\"qwen/qwen3-coder:free\"] = {\n name: \"Qwen3 Coder\",\n description: \"Free — 480B parameters, excellent tool use\",\n free: true,\n };\n\n // ── OpenRouter (premium) ─────────────────────────────\n models[\"openrouter/auto\"] = {\n name: \"Auto (All)\",\n description: \"Auto-selects best model (requires credits)\",\n free: false,\n };\n models[\"anthropic/claude-sonnet-4\"] = {\n name: \"Claude Sonnet 4\",\n description: \"Premium — excellent quality + reasoning (requires credits)\",\n free: false,\n };\n models[\"google/gemini-2.5-flash-preview\"] = {\n name: \"Gemini 2.5 Flash\",\n description: \"Premium — fast with thinking (requires credits)\",\n free: false,\n };\n\n // ── Ollama (local, always free) ──────────────────────\n models[\"ollama/llama3\"] = {\n name: \"Llama 3 (Ollama)\",\n description: \"Local — Meta's Llama 3, requires Ollama\",\n free: true,\n };\n models[\"ollama/gemma3\"] = {\n name: \"Gemma 3 (Ollama)\",\n description: \"Local — Google's Gemma 3, requires Ollama\",\n free: true,\n };\n models[\"ollama/mistral\"] = {\n name: \"Mistral (Ollama)\",\n description: \"Local — Mistral's 7B model, requires Ollama\",\n free: true,\n };\n models[\"ollama/qwen3\"] = {\n name: \"Qwen 3 (Ollama)\",\n description: \"Local — Alibaba's Qwen 3, requires Ollama\",\n free: true,\n };\n models[\"ollama/deepseek-r1\"] = {\n name: \"DeepSeek R1 (Ollama)\",\n description: \"Local — reasoning model, requires Ollama\",\n free: true,\n };\n\n return models;\n}\n\nexport const MODELS: Record<string, ModelConfig> = buildModels();\nexport type ModelId = keyof typeof MODELS;\n\n/** Default model — free router, works with $0 credits */\nexport const DEFAULT_MODEL_ID: ModelId = \"openrouter/free\";\n\n/** Backward compat alias */\nexport const DEFAULT_MODEL: ModelId = DEFAULT_MODEL_ID;\n\n/** Models that support extended thinking */\nexport const THINKING_MODELS: ReadonlySet<ModelId> = new Set<ModelId>([\n \"anthropic/claude-sonnet-4\",\n \"deepseek/deepseek-r1:free\",\n \"google/gemini-2.5-flash-preview\",\n \"ollama/deepseek-r1\",\n]);\n\n/** Check if a model supports extended thinking */\nexport function supportsThinking(model: ModelId): boolean {\n return THINKING_MODELS.has(model);\n}\n\n/** Check if a model is an Ollama model */\nexport function isOllamaModel(model: ModelId): boolean {\n return (model as string).startsWith(\"ollama/\");\n}\n\n/** Get all model IDs for a specific provider prefix */\nexport function getModelsByProvider(provider: string): ModelId[] {\n return (Object.keys(MODELS) as ModelId[]).filter((id) =>\n (id as string).startsWith(`${provider}/`)\n );\n}"],"mappings":";AAuBO,SAAS,iBAAgD;AAC9D,QAAM,WAA0C,CAAC;AACjD,aAAW,CAAC,IAAI,MAAM,KAAK,OAAO,QAAQ,MAAM,GAAG;AACjD,aAAS,EAAE,IAAI,EAAE,MAAM,OAAO,MAAM,aAAa,OAAO,aAAa,MAAM,OAAO,KAAK;AAAA,EACzF;AACA,SAAO;AACT;AAGA,SAAS,cAA2C;AAClD,QAAM,SAAsC,CAAC;AAG7C,SAAO,iBAAiB,IAAI;AAAA,IAC1B,MAAM;AAAA,IACN,aAAa;AAAA,IACb,MAAM;AAAA,EACR;AACA,SAAO,4BAA4B,IAAI;AAAA,IACrC,MAAM;AAAA,IACN,aAAa;AAAA,IACb,MAAM;AAAA,EACR;AACA,SAAO,wCAAwC,IAAI;AAAA,IACjD,MAAM;AAAA,IACN,aAAa;AAAA,IACb,MAAM;AAAA,EACR;AACA,SAAO,2BAA2B,IAAI;AAAA,IACpC,MAAM;AAAA,IACN,aAAa;AAAA,IACb,MAAM;AAAA,EACR;AAEA,SAAO,uBAAuB,IAAI;AAAA,IAChC,MAAM;AAAA,IACN,aAAa;AAAA,IACb,MAAM;AAAA,EACR;AAGA,SAAO,iBAAiB,IAAI;AAAA,IAC1B,MAAM;AAAA,IACN,aAAa;AAAA,IACb,MAAM;AAAA,EACR;AACA,SAAO,2BAA2B,IAAI;AAAA,IACpC,MAAM;AAAA,IACN,aAAa;AAAA,IACb,MAAM;AAAA,EACR;AACA,SAAO,iCAAiC,IAAI;AAAA,IAC1C,MAAM;AAAA,IACN,aAAa;AAAA,IACb,MAAM;AAAA,EACR;AAGA,SAAO,eAAe,IAAI;AAAA,IACxB,MAAM;AAAA,IACN,aAAa;AAAA,IACb,MAAM;AAAA,EACR;AACA,SAAO,eAAe,IAAI;AAAA,IACxB,MAAM;AAAA,IACN,aAAa;AAAA,IACb,MAAM;AAAA,EACR;AACA,SAAO,gBAAgB,IAAI;AAAA,IACzB,MAAM;AAAA,IACN,aAAa;AAAA,IACb,MAAM;AAAA,EACR;AACA,SAAO,cAAc,IAAI;AAAA,IACvB,MAAM;AAAA,IACN,aAAa;AAAA,IACb,MAAM;AAAA,EACR;AACA,SAAO,oBAAoB,IAAI;AAAA,IAC7B,MAAM;AAAA,IACN,aAAa;AAAA,IACb,MAAM;AAAA,EACR;AAEA,SAAO;AACT;AAEO,IAAM,SAAsC,YAAY;AAIxD,IAAM,mBAA4B;AAGlC,IAAM,gBAAyB;AAG/B,IAAM,kBAAwC,oBAAI,IAAa;AAAA,EACpE;AAAA,EACA;AAAA,EACA;AAAA,EACA;AACF,CAAC;AAGM,SAAS,iBAAiB,OAAyB;AACxD,SAAO,gBAAgB,IAAI,KAAK;AAClC;AAGO,SAAS,cAAc,OAAyB;AACrD,SAAQ,MAAiB,WAAW,SAAS;AAC/C;AAGO,SAAS,oBAAoB,UAA6B;AAC/D,SAAQ,OAAO,KAAK,MAAM,EAAgB;AAAA,IAAO,CAAC,OAC/C,GAAc,WAAW,GAAG,QAAQ,GAAG;AAAA,EAC1C;AACF;","names":[]}
@@ -0,0 +1,296 @@
1
+ // src/adapter.ts
2
+ import { getModel } from "@mariozechner/pi-ai";
3
+ function adaptTool(tool, getD1) {
4
+ return {
5
+ name: tool.name,
6
+ description: tool.description,
7
+ // Convert JSON schema to TypeBox format — pi-agent-core uses TypeBox
8
+ // but accepts plain JSON schemas for the tool definition sent to the LLM.
9
+ // We provide parameters as a TypeBox-like schema.
10
+ parameters: {
11
+ type: "object",
12
+ ...tool.parameters
13
+ },
14
+ label: tool.name,
15
+ execute: async (_toolCallId, params, _signal) => {
16
+ const result = await tool.execute(params, getD1);
17
+ return {
18
+ content: [{ type: "text", text: result }],
19
+ details: {}
20
+ };
21
+ }
22
+ };
23
+ }
24
+ function adaptTools(tools, getD1) {
25
+ return tools.map((t) => adaptTool(t, getD1));
26
+ }
27
+ var OLLAMA_MODELS = {
28
+ "ollama/llama3": {
29
+ id: "llama3",
30
+ name: "Llama 3 (Ollama)",
31
+ api: "openai-completions",
32
+ provider: "ollama",
33
+ baseUrl: "http://localhost:11434/v1",
34
+ reasoning: false,
35
+ input: ["text"],
36
+ cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
37
+ contextWindow: 8192,
38
+ maxTokens: 4096
39
+ },
40
+ "ollama/gemma3": {
41
+ id: "gemma3",
42
+ name: "Gemma 3 (Ollama)",
43
+ api: "openai-completions",
44
+ provider: "ollama",
45
+ baseUrl: "http://localhost:11434/v1",
46
+ reasoning: false,
47
+ input: ["text"],
48
+ cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
49
+ contextWindow: 8192,
50
+ maxTokens: 4096
51
+ },
52
+ "ollama/mistral": {
53
+ id: "mistral",
54
+ name: "Mistral (Ollama)",
55
+ api: "openai-completions",
56
+ provider: "ollama",
57
+ baseUrl: "http://localhost:11434/v1",
58
+ reasoning: false,
59
+ input: ["text"],
60
+ cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
61
+ contextWindow: 32768,
62
+ maxTokens: 4096
63
+ },
64
+ "ollama/qwen3": {
65
+ id: "qwen3",
66
+ name: "Qwen 3 (Ollama)",
67
+ api: "openai-completions",
68
+ provider: "ollama",
69
+ baseUrl: "http://localhost:11434/v1",
70
+ reasoning: false,
71
+ input: ["text"],
72
+ cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
73
+ contextWindow: 32768,
74
+ maxTokens: 4096
75
+ },
76
+ "ollama/deepseek-r1": {
77
+ id: "deepseek-r1",
78
+ name: "DeepSeek R1 (Ollama)",
79
+ api: "openai-completions",
80
+ provider: "ollama",
81
+ baseUrl: "http://localhost:11434/v1",
82
+ reasoning: true,
83
+ input: ["text"],
84
+ cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
85
+ contextWindow: 65536,
86
+ maxTokens: 8192
87
+ }
88
+ };
89
+ var DEFAULT_MODEL = {
90
+ id: "openrouter/free",
91
+ name: "Free (Auto)",
92
+ api: "openai-completions",
93
+ provider: "openrouter",
94
+ baseUrl: "https://openrouter.ai/api/v1",
95
+ reasoning: false,
96
+ input: ["text"],
97
+ cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
98
+ contextWindow: 128e3,
99
+ maxTokens: 4096
100
+ };
101
+ function resolveModel(modelId, options) {
102
+ if (modelId.startsWith("ollama/")) {
103
+ const ollamaDef = OLLAMA_MODELS[modelId];
104
+ if (ollamaDef) {
105
+ const baseUrl = options?.ollamaBaseUrl ?? ollamaDef.baseUrl ?? "http://localhost:11434/v1";
106
+ return {
107
+ ...DEFAULT_MODEL,
108
+ ...ollamaDef,
109
+ baseUrl,
110
+ compat: {
111
+ supportsStore: false,
112
+ supportsDeveloperRole: false,
113
+ supportsReasoningEffort: false,
114
+ supportsUsageInStreaming: false,
115
+ maxTokensField: "max_tokens",
116
+ requiresToolResultName: false,
117
+ requiresAssistantAfterToolResult: false,
118
+ requiresThinkingAsText: true,
119
+ requiresReasoningContentOnAssistantMessages: false,
120
+ thinkingFormat: "openai",
121
+ supportsStrictMode: false,
122
+ supportsLongCacheRetention: false
123
+ }
124
+ };
125
+ }
126
+ const customId = modelId.replace("ollama/", "");
127
+ return {
128
+ ...DEFAULT_MODEL,
129
+ id: customId,
130
+ name: `${customId} (Ollama)`,
131
+ provider: "ollama",
132
+ baseUrl: options?.ollamaBaseUrl ?? "http://localhost:11434/v1",
133
+ compat: {
134
+ supportsStore: false,
135
+ supportsDeveloperRole: false,
136
+ supportsReasoningEffort: false,
137
+ supportsUsageInStreaming: false,
138
+ maxTokensField: "max_tokens",
139
+ requiresToolResultName: false,
140
+ requiresAssistantAfterToolResult: false,
141
+ requiresThinkingAsText: true,
142
+ requiresReasoningContentOnAssistantMessages: false,
143
+ thinkingFormat: "openai",
144
+ supportsStrictMode: false,
145
+ supportsLongCacheRetention: false
146
+ }
147
+ };
148
+ }
149
+ const providers = ["openrouter", "anthropic", "google", "openai", "deepseek", "groq", "xai"];
150
+ for (const provider of providers) {
151
+ try {
152
+ const model = getModel(provider, modelId);
153
+ if (model) return model;
154
+ } catch {
155
+ }
156
+ }
157
+ return {
158
+ ...DEFAULT_MODEL,
159
+ id: modelId,
160
+ name: modelId
161
+ };
162
+ }
163
+ function convertMessages(messages) {
164
+ return messages.map((m) => ({
165
+ role: m.role,
166
+ content: m.content,
167
+ timestamp: Date.now()
168
+ }));
169
+ }
170
+ function buildContext(systemPrompt, messages, adaptedTools) {
171
+ return {
172
+ systemPrompt,
173
+ messages,
174
+ tools: adaptedTools.map((t) => ({
175
+ name: t.name,
176
+ description: t.description,
177
+ parameters: t.parameters
178
+ }))
179
+ };
180
+ }
181
+ function defaultCitationExtractor(text) {
182
+ const citations = [];
183
+ const regex = /\[([A-Z]{3})\s+(\d+):(\d+)\]/g;
184
+ let match;
185
+ while ((match = regex.exec(text)) !== null) {
186
+ citations.push({ book: match[1], chapter: parseInt(match[2]), verse: parseInt(match[3]) });
187
+ }
188
+ return citations;
189
+ }
190
+ function translateEvent(event, extractCitations) {
191
+ switch (event.type) {
192
+ case "message_update": {
193
+ const assistantEvent = event.assistantMessageEvent;
194
+ if (assistantEvent.type === "text_delta") {
195
+ return { type: "text", content: assistantEvent.delta };
196
+ }
197
+ if (assistantEvent.type === "thinking_delta") {
198
+ return { type: "reasoning", content: assistantEvent.delta };
199
+ }
200
+ return null;
201
+ }
202
+ case "tool_execution_start": {
203
+ return {
204
+ type: "tool_call",
205
+ name: event.toolName,
206
+ args: event.args
207
+ };
208
+ }
209
+ case "tool_execution_end": {
210
+ const resultText = event.result?.content?.filter((c) => c.type === "text").map((c) => c.text).join("\n") ?? "";
211
+ return {
212
+ type: "tool_result",
213
+ name: event.toolName,
214
+ result: resultText
215
+ };
216
+ }
217
+ case "agent_end": {
218
+ const assistantMsg = event.messages.filter((m) => m.role === "assistant").pop();
219
+ const text = assistantMsg?.content?.filter((c) => c.type === "text").map((c) => c.text).join("") ?? "";
220
+ const usage = assistantMsg?.usage ? {
221
+ promptTokens: assistantMsg.usage.input,
222
+ completionTokens: assistantMsg.usage.output,
223
+ totalCost: assistantMsg.usage.cost?.total
224
+ } : void 0;
225
+ const citFn = extractCitations ?? defaultCitationExtractor;
226
+ if (assistantMsg?.stopReason === "error" || assistantMsg?.stopReason === "aborted") {
227
+ return {
228
+ type: "error",
229
+ message: assistantMsg.errorMessage ?? "Agent encountered an error"
230
+ };
231
+ }
232
+ return {
233
+ type: "done",
234
+ message: text,
235
+ citations: citFn(text),
236
+ usage
237
+ };
238
+ }
239
+ default:
240
+ return null;
241
+ }
242
+ }
243
+ function createEventStream(agent, extractCitations) {
244
+ const queue = [];
245
+ let resolve = null;
246
+ let done = false;
247
+ const unsubscribe = agent.subscribe((event) => {
248
+ const translated = translateEvent(event, extractCitations);
249
+ if (translated) {
250
+ queue.push(translated);
251
+ if (resolve) {
252
+ resolve();
253
+ resolve = null;
254
+ }
255
+ }
256
+ if (event.type === "agent_end") {
257
+ done = true;
258
+ if (resolve) {
259
+ resolve();
260
+ resolve = null;
261
+ }
262
+ }
263
+ });
264
+ async function* stream() {
265
+ try {
266
+ while (!done || queue.length > 0) {
267
+ if (queue.length > 0) {
268
+ yield queue.shift();
269
+ continue;
270
+ }
271
+ if (done) break;
272
+ await new Promise((r) => {
273
+ resolve = r;
274
+ });
275
+ }
276
+ } finally {
277
+ unsubscribe();
278
+ }
279
+ }
280
+ return { stream: stream(), unsubscribe };
281
+ }
282
+ async function* agentToStreamEvents(agent, extractCitations) {
283
+ yield* createEventStream(agent, extractCitations).stream;
284
+ }
285
+
286
+ export {
287
+ adaptTool,
288
+ adaptTools,
289
+ resolveModel,
290
+ convertMessages,
291
+ buildContext,
292
+ translateEvent,
293
+ createEventStream,
294
+ agentToStreamEvents
295
+ };
296
+ //# sourceMappingURL=chunk-TECUAB3E.js.map