@moikapy/origen 0.4.1 → 0.5.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/adapter.js CHANGED
@@ -5,9 +5,10 @@ import {
5
5
  buildContext,
6
6
  convertMessages,
7
7
  createEventStream,
8
+ defaultCitationExtractor,
8
9
  resolveModel,
9
10
  translateEvent
10
- } from "./chunk-TECUAB3E.js";
11
+ } from "./chunk-K3FE63XL.js";
11
12
  export {
12
13
  adaptTool,
13
14
  adaptTools,
@@ -15,6 +16,7 @@ export {
15
16
  buildContext,
16
17
  convertMessages,
17
18
  createEventStream,
19
+ defaultCitationExtractor,
18
20
  resolveModel,
19
21
  translateEvent
20
22
  };
@@ -0,0 +1,201 @@
1
+ // src/models.ts
2
+ function buildStaticModels() {
3
+ const models = {};
4
+ models["openrouter/free"] = {
5
+ name: "Free (Auto)",
6
+ description: "Free \u2014 auto-selects best free model for your request",
7
+ free: true
8
+ };
9
+ models["google/gemma-4-31b-it:free"] = {
10
+ name: "Gemma 4 31B",
11
+ description: "Free \u2014 great quality for Bible study",
12
+ free: true
13
+ };
14
+ models["nvidia/nemotron-3-super-120b-a12b:free"] = {
15
+ name: "Nemotron 3 Super",
16
+ description: "Free \u2014 large model, strong reasoning",
17
+ free: true
18
+ };
19
+ models["deepseek/deepseek-r1:free"] = {
20
+ name: "DeepSeek R1 (Free)",
21
+ description: "Free \u2014 reasoning with thinking support",
22
+ free: true
23
+ };
24
+ models["qwen/qwen3-coder:free"] = {
25
+ name: "Qwen3 Coder",
26
+ description: "Free \u2014 480B parameters, excellent tool use",
27
+ free: true
28
+ };
29
+ models["openrouter/auto"] = {
30
+ name: "Auto (All)",
31
+ description: "Auto-selects best model (requires credits)",
32
+ free: false
33
+ };
34
+ models["anthropic/claude-sonnet-4"] = {
35
+ name: "Claude Sonnet 4",
36
+ description: "Premium \u2014 excellent quality + reasoning (requires credits)",
37
+ free: false
38
+ };
39
+ models["google/gemini-2.5-flash-preview"] = {
40
+ name: "Gemini 2.5 Flash",
41
+ description: "Premium \u2014 fast with thinking (requires credits)",
42
+ free: false
43
+ };
44
+ models["ollama/llama3"] = {
45
+ name: "Llama 3 (Ollama)",
46
+ description: "Local \u2014 Meta's Llama 3, requires Ollama",
47
+ free: true
48
+ };
49
+ models["ollama/llama3.1"] = {
50
+ name: "Llama 3.1 (Ollama)",
51
+ description: "Local \u2014 Llama 3.1 8B, 128K context, requires Ollama",
52
+ free: true
53
+ };
54
+ models["ollama/gemma3"] = {
55
+ name: "Gemma 3 (Ollama)",
56
+ description: "Local \u2014 Google's Gemma 3, requires Ollama",
57
+ free: true
58
+ };
59
+ models["ollama/mistral"] = {
60
+ name: "Mistral 7B (Ollama)",
61
+ description: "Local \u2014 Mistral's 7B model, requires Ollama",
62
+ free: true
63
+ };
64
+ models["ollama/qwen3"] = {
65
+ name: "Qwen 3 (Ollama)",
66
+ description: "Local \u2014 Alibaba's Qwen 3, requires Ollama",
67
+ free: true
68
+ };
69
+ models["ollama/deepseek-r1"] = {
70
+ name: "DeepSeek R1 (Ollama)",
71
+ description: "Local \u2014 reasoning model, requires Ollama",
72
+ free: true
73
+ };
74
+ models["ollama/codellama"] = {
75
+ name: "Code Llama (Ollama)",
76
+ description: "Local \u2014 code-focused Llama variant, requires Ollama",
77
+ free: true
78
+ };
79
+ models["ollama/phi3"] = {
80
+ name: "Phi-3 (Ollama)",
81
+ description: "Local \u2014 Microsoft's small but capable model, requires Ollama",
82
+ free: true
83
+ };
84
+ return models;
85
+ }
86
+ var MODELS = buildStaticModels();
87
+ var DEFAULT_MODEL_ID = "openrouter/free";
88
+ var DEFAULT_MODEL = DEFAULT_MODEL_ID;
89
+ var THINKING_MODELS = /* @__PURE__ */ new Set([
90
+ "anthropic/claude-sonnet-4",
91
+ "deepseek/deepseek-r1:free",
92
+ "google/gemini-2.5-flash-preview",
93
+ "ollama/deepseek-r1"
94
+ ]);
95
+ function supportsThinking(model) {
96
+ return THINKING_MODELS.has(model);
97
+ }
98
+ function isOllamaModel(model) {
99
+ return model.startsWith("ollama/");
100
+ }
101
+ function getModelsByProvider(provider) {
102
+ return Object.keys(MODELS).filter((id) => id.startsWith(`${provider}/`));
103
+ }
104
+ function getModelsForUI() {
105
+ const uiModels = {};
106
+ for (const [id, config] of Object.entries(MODELS)) {
107
+ uiModels[id] = { name: config.name, description: config.description, free: config.free };
108
+ }
109
+ return uiModels;
110
+ }
111
+ var REASONING_FAMILIES = /* @__PURE__ */ new Set([
112
+ "deepseek-r1",
113
+ "deepseek-r1-distill",
114
+ "qwq",
115
+ "qwen3",
116
+ "kimi-k2",
117
+ "glm-5.1",
118
+ "gemma4"
119
+ ]);
120
+ function describeOllamaModel(name, details, isCloud) {
121
+ const location = isCloud ? "Cloud" : "Local";
122
+ const family = details.family || name.split(":")[0].split("-")[0];
123
+ const params = details.parameter_size;
124
+ const quant = details.quantization_level;
125
+ const isReasoning = details.families?.some((f) => REASONING_FAMILIES.has(f)) ?? REASONING_FAMILIES.has(family);
126
+ const parts = [location, "\u2014"];
127
+ if (params && params !== "") {
128
+ parts.push(params);
129
+ }
130
+ if (quant && quant !== "") {
131
+ parts.push(`(${quant})`);
132
+ }
133
+ if (isReasoning) {
134
+ parts.push("reasoning");
135
+ }
136
+ parts.push("requires Ollama");
137
+ return parts.join(" ");
138
+ }
139
+ async function fetchOllamaModels(baseUrl = "http://localhost:11434") {
140
+ const tagsUrl = `${baseUrl.replace(/\/v1$/, "")}/api/tags`;
141
+ let response;
142
+ try {
143
+ response = await fetch(tagsUrl, { signal: AbortSignal.timeout(5e3) });
144
+ } catch {
145
+ return {};
146
+ }
147
+ if (!response.ok) {
148
+ return {};
149
+ }
150
+ const data = await response.json();
151
+ const discovered = {};
152
+ for (const model of data.models) {
153
+ const tagSuffix = model.name.includes(":") ? model.name.split(":").pop() : "";
154
+ const isCloud = tagSuffix === "cloud";
155
+ const baseName = model.name.split(":")[0];
156
+ const modelId = `ollama/${baseName}`;
157
+ if (model.details?.family === "nomic-bert" || model.details?.families?.includes("nomic-bert")) {
158
+ continue;
159
+ }
160
+ const description = describeOllamaModel(baseName, model.details, isCloud);
161
+ discovered[modelId] = {
162
+ name: model.details?.parameter_size ? `${baseName} (${model.details.parameter_size})` : baseName,
163
+ description,
164
+ free: true
165
+ };
166
+ if (tagSuffix && tagSuffix !== "latest") {
167
+ const fullId = `ollama/${model.name}`;
168
+ discovered[fullId] = {
169
+ name: model.name,
170
+ description,
171
+ free: true
172
+ };
173
+ }
174
+ }
175
+ return discovered;
176
+ }
177
+ function mergeOllamaModels(ollamaModels) {
178
+ for (const [id, config] of Object.entries(ollamaModels)) {
179
+ MODELS[id] = config;
180
+ }
181
+ }
182
+ async function discoverOllamaModels(baseUrl = "http://localhost:11434") {
183
+ const discovered = await fetchOllamaModels(baseUrl);
184
+ mergeOllamaModels(discovered);
185
+ return { ...MODELS };
186
+ }
187
+
188
+ export {
189
+ MODELS,
190
+ DEFAULT_MODEL_ID,
191
+ DEFAULT_MODEL,
192
+ THINKING_MODELS,
193
+ supportsThinking,
194
+ isOllamaModel,
195
+ getModelsByProvider,
196
+ getModelsForUI,
197
+ fetchOllamaModels,
198
+ mergeOllamaModels,
199
+ discoverOllamaModels
200
+ };
201
+ //# sourceMappingURL=chunk-GK5KZOHB.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"sources":["../src/models.ts"],"sourcesContent":["/**\n * Origen model configuration.\n *\n * Static entries for cloud providers (OpenRouter, Anthropic, Google, etc.)\n * plus dynamic Ollama model discovery via GET /api/tags.\n *\n * Hardcoded Ollama entries serve as fallbacks when Ollama isn't reachable.\n * When connected, fetchOllamaModels() pulls the live model list and merges\n * it with the static entries.\n */\n\nimport { getModel } from \"@mariozechner/pi-ai\";\nimport type { Model, Api } from \"@mariozechner/pi-ai\";\nexport type { Model as ProviderModel, Api } from \"@mariozechner/pi-ai\";\n\n// ── Types ─────────────────────────────────────────────────────────────\n\nexport interface ModelConfig {\n name: string;\n description: string;\n free: boolean;\n}\n\n/** UI-facing model config — safe to send to the client. Strips internal fields. */\nexport type UIModelConfig = ModelConfig;\n\n/** Ollama /api/tags response shape. */\ninterface OllamaModelResponse {\n models: Array<{\n name: string;\n model: string;\n modified_at: string;\n size: number;\n digest: string;\n details: {\n parent_model: string;\n format: string;\n family: string;\n families: string[] | null;\n parameter_size: string;\n quantization_level: string;\n };\n }>;\n}\n\n// ── Static model registry (cloud + hardcoded Ollama defaults) ────────\n\nfunction buildStaticModels(): Record<string, ModelConfig> {\n const models: Record<string, ModelConfig> = {};\n\n // ── OpenRouter (free tier) ───────────────────────────\n models[\"openrouter/free\"] = {\n name: \"Free (Auto)\",\n description: \"Free — auto-selects best free model for your request\",\n free: true,\n };\n models[\"google/gemma-4-31b-it:free\"] = {\n name: \"Gemma 4 31B\",\n description: \"Free — great quality for Bible study\",\n free: true,\n };\n models[\"nvidia/nemotron-3-super-120b-a12b:free\"] = {\n name: \"Nemotron 3 Super\",\n description: \"Free — large model, strong reasoning\",\n free: true,\n };\n models[\"deepseek/deepseek-r1:free\"] = {\n name: \"DeepSeek R1 (Free)\",\n description: \"Free — reasoning with thinking support\",\n free: true,\n };\n models[\"qwen/qwen3-coder:free\"] = {\n name: \"Qwen3 Coder\",\n description: \"Free — 480B parameters, excellent tool use\",\n free: true,\n };\n\n // ── OpenRouter (premium) ─────────────────────────────\n models[\"openrouter/auto\"] = {\n name: \"Auto (All)\",\n description: \"Auto-selects best model (requires credits)\",\n free: false,\n };\n models[\"anthropic/claude-sonnet-4\"] = {\n name: \"Claude Sonnet 4\",\n description: \"Premium — excellent quality + reasoning (requires credits)\",\n free: false,\n };\n models[\"google/gemini-2.5-flash-preview\"] = {\n name: \"Gemini 2.5 Flash\",\n description: \"Premium — fast with thinking (requires credits)\",\n free: false,\n };\n\n // ── Ollama (local, always free — fallback defaults) ──\n models[\"ollama/llama3\"] = {\n name: \"Llama 3 (Ollama)\",\n description: \"Local — Meta's Llama 3, requires Ollama\",\n free: true,\n };\n models[\"ollama/llama3.1\"] = {\n name: \"Llama 3.1 (Ollama)\",\n description: \"Local — Llama 3.1 8B, 128K context, requires Ollama\",\n free: true,\n };\n models[\"ollama/gemma3\"] = {\n name: \"Gemma 3 (Ollama)\",\n description: \"Local — Google's Gemma 3, requires Ollama\",\n free: true,\n };\n models[\"ollama/mistral\"] = {\n name: \"Mistral 7B (Ollama)\",\n description: \"Local — Mistral's 7B model, requires Ollama\",\n free: true,\n };\n models[\"ollama/qwen3\"] = {\n name: \"Qwen 3 (Ollama)\",\n description: \"Local — Alibaba's Qwen 3, requires Ollama\",\n free: true,\n };\n models[\"ollama/deepseek-r1\"] = {\n name: \"DeepSeek R1 (Ollama)\",\n description: \"Local — reasoning model, requires Ollama\",\n free: true,\n };\n models[\"ollama/codellama\"] = {\n name: \"Code Llama (Ollama)\",\n description: \"Local — code-focused Llama variant, requires Ollama\",\n free: true,\n };\n models[\"ollama/phi3\"] = {\n name: \"Phi-3 (Ollama)\",\n description: \"Local — Microsoft's small but capable model, requires Ollama\",\n free: true,\n };\n\n return models;\n}\n\nexport const MODELS: Record<string, ModelConfig> = buildStaticModels();\nexport type ModelId = keyof typeof MODELS;\n\n/** Default model — free router, works with $0 credits */\nexport const DEFAULT_MODEL_ID: ModelId = \"openrouter/free\";\n\n/** Backward compat alias */\nexport const DEFAULT_MODEL: ModelId = DEFAULT_MODEL_ID;\n\n/** Models that support extended thinking */\nexport const THINKING_MODELS: ReadonlySet<string> = new Set<string>([\n \"anthropic/claude-sonnet-4\",\n \"deepseek/deepseek-r1:free\",\n \"google/gemini-2.5-flash-preview\",\n \"ollama/deepseek-r1\",\n]);\n\n/** Check if a model supports extended thinking */\nexport function supportsThinking(model: string): boolean {\n return THINKING_MODELS.has(model);\n}\n\n/** Check if a model is an Ollama model */\nexport function isOllamaModel(model: string): boolean {\n return model.startsWith(\"ollama/\");\n}\n\n/** Get all model IDs for a specific provider prefix */\nexport function getModelsByProvider(provider: string): string[] {\n return Object.keys(MODELS).filter((id) => id.startsWith(`${provider}/`));\n}\n\n/** Get models as a simple UI map (name, description, free). No internal fields. */\nexport function getModelsForUI(): Record<string, UIModelConfig> {\n const uiModels: Record<string, UIModelConfig> = {};\n for (const [id, config] of Object.entries(MODELS)) {\n uiModels[id] = { name: config.name, description: config.description, free: config.free };\n }\n return uiModels;\n}\n\n// ── Dynamic Ollama discovery ─────────────────────────────────────────\n\n/** Known reasoning model families — used to tag discovered models. */\nconst REASONING_FAMILIES = new Set([\n \"deepseek-r1\", \"deepseek-r1-distill\", \"qwq\", \"qwen3\", \"kimi-k2\",\n \"glm-5.1\", \"gemma4\",\n]);\n\n/** Derive a human-readable description from Ollama model details. */\nfunction describeOllamaModel(\n name: string,\n details: OllamaModelResponse[\"models\"][number][\"details\"],\n isCloud: boolean,\n): string {\n const location = isCloud ? \"Cloud\" : \"Local\";\n const family = details.family || name.split(\":\")[0].split(\"-\")[0];\n const params = details.parameter_size;\n const quant = details.quantization_level;\n const isReasoning = details.families?.some((f) => REASONING_FAMILIES.has(f)) ?? REASONING_FAMILIES.has(family);\n\n const parts: string[] = [location, \"—\"];\n\n if (params && params !== \"\") {\n parts.push(params);\n }\n if (quant && quant !== \"\") {\n parts.push(`(${quant})`);\n }\n if (isReasoning) {\n parts.push(\"reasoning\");\n }\n parts.push(\"requires Ollama\");\n\n return parts.join(\" \");\n}\n\n/**\n * Fetch available models from a running Ollama server.\n *\n * Calls GET /api/tags on the Ollama server and returns model configs\n * merged with the static defaults. Cloud models (e.g., foo:cloud)\n * are included alongside local models.\n *\n * @param baseUrl - Ollama server URL (default: http://localhost:11434)\n * @returns Object with discovered Ollama model configs (keyed by \"ollama/<name>\")\n */\nexport async function fetchOllamaModels(\n baseUrl: string = \"http://localhost:11434\",\n): Promise<Record<string, ModelConfig>> {\n const tagsUrl = `${baseUrl.replace(/\\/v1$/, \"\")}/api/tags`;\n\n let response: Response;\n try {\n response = await fetch(tagsUrl, { signal: AbortSignal.timeout(5000) });\n } catch {\n // Ollama not reachable — return empty, callers use static defaults\n return {};\n }\n\n if (!response.ok) {\n return {};\n }\n\n const data: OllamaModelResponse = await response.json();\n const discovered: Record<string, ModelConfig> = {};\n\n for (const model of data.models) {\n // Strip tag suffix for a cleaner ID (e.g., \"llama3.2:latest\" → \"llama3.2\")\n const tagSuffix = model.name.includes(\":\") ? model.name.split(\":\").pop() : \"\";\n const isCloud = tagSuffix === \"cloud\";\n const baseName = model.name.split(\":\")[0];\n const modelId = `ollama/${baseName}`;\n\n // Skip embedding models\n if (model.details?.family === \"nomic-bert\" || model.details?.families?.includes(\"nomic-bert\")) {\n continue;\n }\n\n const description = describeOllamaModel(baseName, model.details, isCloud);\n\n discovered[modelId] = {\n name: model.details?.parameter_size\n ? `${baseName} (${model.details.parameter_size})`\n : baseName,\n description,\n free: true,\n };\n\n // If there's a tag like :cloud or :latest, also register the full tagged name\n if (tagSuffix && tagSuffix !== \"latest\") {\n const fullId = `ollama/${model.name}`;\n discovered[fullId] = {\n name: model.name,\n description,\n free: true,\n };\n }\n }\n\n return discovered;\n}\n\n/**\n * Merge dynamically discovered Ollama models into the static MODELS registry.\n *\n * Static defaults are kept as fallbacks. Discovered models override\n * entries with the same key (e.g., \"ollama/llama3\" from the server\n * replaces the hardcoded entry with live data).\n *\n * @param ollamaModels - Models returned by fetchOllamaModels()\n */\nexport function mergeOllamaModels(ollamaModels: Record<string, ModelConfig>): void {\n for (const [id, config] of Object.entries(ollamaModels)) {\n MODELS[id] = config;\n }\n}\n\n/**\n * One-shot: fetch Ollama models and merge them into the registry.\n * Returns the combined model map.\n *\n * @param baseUrl - Ollama server URL (default: http://localhost:11434)\n */\nexport async function discoverOllamaModels(\n baseUrl: string = \"http://localhost:11434\",\n): Promise<Record<string, ModelConfig>> {\n const discovered = await fetchOllamaModels(baseUrl);\n mergeOllamaModels(discovered);\n return { ...MODELS };\n}"],"mappings":";AA+CA,SAAS,oBAAiD;AACxD,QAAM,SAAsC,CAAC;AAG7C,SAAO,iBAAiB,IAAI;AAAA,IAC1B,MAAM;AAAA,IACN,aAAa;AAAA,IACb,MAAM;AAAA,EACR;AACA,SAAO,4BAA4B,IAAI;AAAA,IACrC,MAAM;AAAA,IACN,aAAa;AAAA,IACb,MAAM;AAAA,EACR;AACA,SAAO,wCAAwC,IAAI;AAAA,IACjD,MAAM;AAAA,IACN,aAAa;AAAA,IACb,MAAM;AAAA,EACR;AACA,SAAO,2BAA2B,IAAI;AAAA,IACpC,MAAM;AAAA,IACN,aAAa;AAAA,IACb,MAAM;AAAA,EACR;AACA,SAAO,uBAAuB,IAAI;AAAA,IAChC,MAAM;AAAA,IACN,aAAa;AAAA,IACb,MAAM;AAAA,EACR;AAGA,SAAO,iBAAiB,IAAI;AAAA,IAC1B,MAAM;AAAA,IACN,aAAa;AAAA,IACb,MAAM;AAAA,EACR;AACA,SAAO,2BAA2B,IAAI;AAAA,IACpC,MAAM;AAAA,IACN,aAAa;AAAA,IACb,MAAM;AAAA,EACR;AACA,SAAO,iCAAiC,IAAI;AAAA,IAC1C,MAAM;AAAA,IACN,aAAa;AAAA,IACb,MAAM;AAAA,EACR;AAGA,SAAO,eAAe,IAAI;AAAA,IACxB,MAAM;AAAA,IACN,aAAa;AAAA,IACb,MAAM;AAAA,EACR;AACA,SAAO,iBAAiB,IAAI;AAAA,IAC1B,MAAM;AAAA,IACN,aAAa;AAAA,IACb,MAAM;AAAA,EACR;AACA,SAAO,eAAe,IAAI;AAAA,IACxB,MAAM;AAAA,IACN,aAAa;AAAA,IACb,MAAM;AAAA,EACR;AACA,SAAO,gBAAgB,IAAI;AAAA,IACzB,MAAM;AAAA,IACN,aAAa;AAAA,IACb,MAAM;AAAA,EACR;AACA,SAAO,cAAc,IAAI;AAAA,IACvB,MAAM;AAAA,IACN,aAAa;AAAA,IACb,MAAM;AAAA,EACR;AACA,SAAO,oBAAoB,IAAI;AAAA,IAC7B,MAAM;AAAA,IACN,aAAa;AAAA,IACb,MAAM;AAAA,EACR;AACA,SAAO,kBAAkB,IAAI;AAAA,IAC3B,MAAM;AAAA,IACN,aAAa;AAAA,IACb,MAAM;AAAA,EACR;AACA,SAAO,aAAa,IAAI;AAAA,IACtB,MAAM;AAAA,IACN,aAAa;AAAA,IACb,MAAM;AAAA,EACR;AAEA,SAAO;AACT;AAEO,IAAM,SAAsC,kBAAkB;AAI9D,IAAM,mBAA4B;AAGlC,IAAM,gBAAyB;AAG/B,IAAM,kBAAuC,oBAAI,IAAY;AAAA,EAClE;AAAA,EACA;AAAA,EACA;AAAA,EACA;AACF,CAAC;AAGM,SAAS,iBAAiB,OAAwB;AACvD,SAAO,gBAAgB,IAAI,KAAK;AAClC;AAGO,SAAS,cAAc,OAAwB;AACpD,SAAO,MAAM,WAAW,SAAS;AACnC;AAGO,SAAS,oBAAoB,UAA4B;AAC9D,SAAO,OAAO,KAAK,MAAM,EAAE,OAAO,CAAC,OAAO,GAAG,WAAW,GAAG,QAAQ,GAAG,CAAC;AACzE;AAGO,SAAS,iBAAgD;AAC9D,QAAM,WAA0C,CAAC;AACjD,aAAW,CAAC,IAAI,MAAM,KAAK,OAAO,QAAQ,MAAM,GAAG;AACjD,aAAS,EAAE,IAAI,EAAE,MAAM,OAAO,MAAM,aAAa,OAAO,aAAa,MAAM,OAAO,KAAK;AAAA,EACzF;AACA,SAAO;AACT;AAKA,IAAM,qBAAqB,oBAAI,IAAI;AAAA,EACjC;AAAA,EAAe;AAAA,EAAuB;AAAA,EAAO;AAAA,EAAS;AAAA,EACtD;AAAA,EAAW;AACb,CAAC;AAGD,SAAS,oBACP,MACA,SACA,SACQ;AACR,QAAM,WAAW,UAAU,UAAU;AACrC,QAAM,SAAS,QAAQ,UAAU,KAAK,MAAM,GAAG,EAAE,CAAC,EAAE,MAAM,GAAG,EAAE,CAAC;AAChE,QAAM,SAAS,QAAQ;AACvB,QAAM,QAAQ,QAAQ;AACtB,QAAM,cAAc,QAAQ,UAAU,KAAK,CAAC,MAAM,mBAAmB,IAAI,CAAC,CAAC,KAAK,mBAAmB,IAAI,MAAM;AAE7G,QAAM,QAAkB,CAAC,UAAU,QAAG;AAEtC,MAAI,UAAU,WAAW,IAAI;AAC3B,UAAM,KAAK,MAAM;AAAA,EACnB;AACA,MAAI,SAAS,UAAU,IAAI;AACzB,UAAM,KAAK,IAAI,KAAK,GAAG;AAAA,EACzB;AACA,MAAI,aAAa;AACf,UAAM,KAAK,WAAW;AAAA,EACxB;AACA,QAAM,KAAK,iBAAiB;AAE5B,SAAO,MAAM,KAAK,GAAG;AACvB;AAYA,eAAsB,kBACpB,UAAkB,0BACoB;AACtC,QAAM,UAAU,GAAG,QAAQ,QAAQ,SAAS,EAAE,CAAC;AAE/C,MAAI;AACJ,MAAI;AACF,eAAW,MAAM,MAAM,SAAS,EAAE,QAAQ,YAAY,QAAQ,GAAI,EAAE,CAAC;AAAA,EACvE,QAAQ;AAEN,WAAO,CAAC;AAAA,EACV;AAEA,MAAI,CAAC,SAAS,IAAI;AAChB,WAAO,CAAC;AAAA,EACV;AAEA,QAAM,OAA4B,MAAM,SAAS,KAAK;AACtD,QAAM,aAA0C,CAAC;AAEjD,aAAW,SAAS,KAAK,QAAQ;AAE/B,UAAM,YAAY,MAAM,KAAK,SAAS,GAAG,IAAI,MAAM,KAAK,MAAM,GAAG,EAAE,IAAI,IAAI;AAC3E,UAAM,UAAU,cAAc;AAC9B,UAAM,WAAW,MAAM,KAAK,MAAM,GAAG,EAAE,CAAC;AACxC,UAAM,UAAU,UAAU,QAAQ;AAGlC,QAAI,MAAM,SAAS,WAAW,gBAAgB,MAAM,SAAS,UAAU,SAAS,YAAY,GAAG;AAC7F;AAAA,IACF;AAEA,UAAM,cAAc,oBAAoB,UAAU,MAAM,SAAS,OAAO;AAExE,eAAW,OAAO,IAAI;AAAA,MACpB,MAAM,MAAM,SAAS,iBACjB,GAAG,QAAQ,KAAK,MAAM,QAAQ,cAAc,MAC5C;AAAA,MACJ;AAAA,MACA,MAAM;AAAA,IACR;AAGA,QAAI,aAAa,cAAc,UAAU;AACvC,YAAM,SAAS,UAAU,MAAM,IAAI;AACnC,iBAAW,MAAM,IAAI;AAAA,QACnB,MAAM,MAAM;AAAA,QACZ;AAAA,QACA,MAAM;AAAA,MACR;AAAA,IACF;AAAA,EACF;AAEA,SAAO;AACT;AAWO,SAAS,kBAAkB,cAAiD;AACjF,aAAW,CAAC,IAAI,MAAM,KAAK,OAAO,QAAQ,YAAY,GAAG;AACvD,WAAO,EAAE,IAAI;AAAA,EACf;AACF;AAQA,eAAsB,qBACpB,UAAkB,0BACoB;AACtC,QAAM,aAAa,MAAM,kBAAkB,OAAO;AAClD,oBAAkB,UAAU;AAC5B,SAAO,EAAE,GAAG,OAAO;AACrB;","names":[]}
@@ -7,6 +7,9 @@ function adaptTool(tool, getD1) {
7
7
  // Convert JSON schema to TypeBox format — pi-agent-core uses TypeBox
8
8
  // but accepts plain JSON schemas for the tool definition sent to the LLM.
9
9
  // We provide parameters as a TypeBox-like schema.
10
+ // OrigenTool uses plain JSON schema objects; pi-agent-core expects TSchema (TypeBox).
11
+ // TypeBox accepts plain JSON schemas at runtime — the type mismatch is cosmetic.
12
+ // We widen to TSchema to satisfy the type system while preserving runtime correctness.
10
13
  parameters: {
11
14
  type: "object",
12
15
  ...tool.parameters
@@ -37,6 +40,18 @@ var OLLAMA_MODELS = {
37
40
  contextWindow: 8192,
38
41
  maxTokens: 4096
39
42
  },
43
+ "ollama/llama3.1": {
44
+ id: "llama3.1",
45
+ name: "Llama 3.1 (Ollama)",
46
+ api: "openai-completions",
47
+ provider: "ollama",
48
+ baseUrl: "http://localhost:11434/v1",
49
+ reasoning: false,
50
+ input: ["text"],
51
+ cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
52
+ contextWindow: 131072,
53
+ maxTokens: 4096
54
+ },
40
55
  "ollama/gemma3": {
41
56
  id: "gemma3",
42
57
  name: "Gemma 3 (Ollama)",
@@ -46,12 +61,12 @@ var OLLAMA_MODELS = {
46
61
  reasoning: false,
47
62
  input: ["text"],
48
63
  cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
49
- contextWindow: 8192,
50
- maxTokens: 4096
64
+ contextWindow: 131072,
65
+ maxTokens: 8192
51
66
  },
52
67
  "ollama/mistral": {
53
68
  id: "mistral",
54
- name: "Mistral (Ollama)",
69
+ name: "Mistral 7B (Ollama)",
55
70
  api: "openai-completions",
56
71
  provider: "ollama",
57
72
  baseUrl: "http://localhost:11434/v1",
@@ -61,6 +76,18 @@ var OLLAMA_MODELS = {
61
76
  contextWindow: 32768,
62
77
  maxTokens: 4096
63
78
  },
79
+ "ollama/mistral-nemo": {
80
+ id: "mistral-nemo",
81
+ name: "Mistral Nemo (Ollama)",
82
+ api: "openai-completions",
83
+ provider: "ollama",
84
+ baseUrl: "http://localhost:11434/v1",
85
+ reasoning: false,
86
+ input: ["text"],
87
+ cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
88
+ contextWindow: 131072,
89
+ maxTokens: 4096
90
+ },
64
91
  "ollama/qwen3": {
65
92
  id: "qwen3",
66
93
  name: "Qwen 3 (Ollama)",
@@ -70,8 +97,8 @@ var OLLAMA_MODELS = {
70
97
  reasoning: false,
71
98
  input: ["text"],
72
99
  cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
73
- contextWindow: 32768,
74
- maxTokens: 4096
100
+ contextWindow: 131072,
101
+ maxTokens: 8192
75
102
  },
76
103
  "ollama/deepseek-r1": {
77
104
  id: "deepseek-r1",
@@ -82,8 +109,32 @@ var OLLAMA_MODELS = {
82
109
  reasoning: true,
83
110
  input: ["text"],
84
111
  cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
85
- contextWindow: 65536,
112
+ contextWindow: 131072,
86
113
  maxTokens: 8192
114
+ },
115
+ "ollama/codellama": {
116
+ id: "codellama",
117
+ name: "Code Llama (Ollama)",
118
+ api: "openai-completions",
119
+ provider: "ollama",
120
+ baseUrl: "http://localhost:11434/v1",
121
+ reasoning: false,
122
+ input: ["text"],
123
+ cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
124
+ contextWindow: 16384,
125
+ maxTokens: 4096
126
+ },
127
+ "ollama/phi3": {
128
+ id: "phi3",
129
+ name: "Phi-3 (Ollama)",
130
+ api: "openai-completions",
131
+ provider: "ollama",
132
+ baseUrl: "http://localhost:11434/v1",
133
+ reasoning: false,
134
+ input: ["text"],
135
+ cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
136
+ contextWindow: 131072,
137
+ maxTokens: 4096
87
138
  }
88
139
  };
89
140
  var DEFAULT_MODEL = {
@@ -146,7 +197,7 @@ function resolveModel(modelId, options) {
146
197
  }
147
198
  };
148
199
  }
149
- const providers = ["openrouter", "anthropic", "google", "openai", "deepseek", "groq", "xai"];
200
+ const providers = ["openrouter", "anthropic", "google", "openai", "deepseek", "groq", "xai", "ollama"];
150
201
  for (const provider of providers) {
151
202
  try {
152
203
  const model = getModel(provider, modelId);
@@ -289,8 +340,9 @@ export {
289
340
  resolveModel,
290
341
  convertMessages,
291
342
  buildContext,
343
+ defaultCitationExtractor,
292
344
  translateEvent,
293
345
  createEventStream,
294
346
  agentToStreamEvents
295
347
  };
296
- //# sourceMappingURL=chunk-TECUAB3E.js.map
348
+ //# sourceMappingURL=chunk-K3FE63XL.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"sources":["../src/adapter.ts"],"sourcesContent":["/**\n * Adapter: bridges Origen's simple types to pi-agent-core/pi-ai types.\n *\n * - OrigenTool → AgentTool (injects D1Provider)\n * - pi-ai Model resolution (OpenRouter, Ollama, Anthropic, Google)\n * - StreamEvent translation (AgentEvent → Origen's StreamEvent)\n */\n\nimport { getModel } from \"@mariozechner/pi-ai\";\nimport type { Model, Api, Message, Context, Tool } from \"@mariozechner/pi-ai\";\nimport type { AgentTool, AgentEvent, AgentMessage } from \"@mariozechner/pi-agent-core\";\nimport type { TSchema } from \"typebox\";\nimport type { OrigenTool, StreamEvent } from \"./agent\";\nimport type { D1Provider, Citation, UsageInfo } from \"./types\";\n\n// ── Tool adapter ─────────────────────────────────────────────────────\n\n/**\n * Convert an OrigenTool into a pi-agent-core AgentTool.\n * The D1Provider is captured in closure so the tool's execute gets it.\n */\nexport function adaptTool(tool: OrigenTool, getD1: D1Provider): AgentTool {\n return {\n name: tool.name,\n description: tool.description,\n // Convert JSON schema to TypeBox format — pi-agent-core uses TypeBox\n // but accepts plain JSON schemas for the tool definition sent to the LLM.\n // We provide parameters as a TypeBox-like schema.\n // OrigenTool uses plain JSON schema objects; pi-agent-core expects TSchema (TypeBox).\n // TypeBox accepts plain JSON schemas at runtime — the type mismatch is cosmetic.\n // We widen to TSchema to satisfy the type system while preserving runtime correctness.\n parameters: {\n type: \"object\",\n ...tool.parameters,\n } as TSchema,\n label: tool.name,\n execute: async (_toolCallId, params, _signal) => {\n const result = await tool.execute(params as Record<string, unknown>, getD1);\n return {\n content: [{ type: \"text\" as const, text: result }],\n details: {},\n };\n },\n };\n}\n\n/** Adapt all OrigenTools for an Agent instance. */\nexport function adaptTools(tools: OrigenTool[], getD1: D1Provider): AgentTool[] {\n return tools.map((t) => adaptTool(t, getD1));\n}\n\n// ── Model resolution ──────────────────────────────────────────────────\n\nexport interface ModelResolutionOptions {\n /** Ollama base URL, e.g. \"http://localhost:11434/v1\" */\n ollamaBaseUrl?: string;\n}\n\n/** Known Ollama models with their config defaults.\n * Context windows and maxTokens are conservative defaults — actual values\n * depend on the specific quantization the user has installed.\n * For models not in this list, resolveModel() creates a generic Ollama config.\n */\nconst OLLAMA_MODELS: Record<string, Partial<Model<Api>>> = {\n \"ollama/llama3\": {\n id: \"llama3\",\n name: \"Llama 3 (Ollama)\",\n api: \"openai-completions\",\n provider: \"ollama\",\n baseUrl: \"http://localhost:11434/v1\",\n reasoning: false,\n input: [\"text\"],\n cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },\n contextWindow: 8192,\n maxTokens: 4096,\n },\n \"ollama/llama3.1\": {\n id: \"llama3.1\",\n name: \"Llama 3.1 (Ollama)\",\n api: \"openai-completions\",\n provider: \"ollama\",\n baseUrl: \"http://localhost:11434/v1\",\n reasoning: false,\n input: [\"text\"],\n cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },\n contextWindow: 131072,\n maxTokens: 4096,\n },\n \"ollama/gemma3\": {\n id: \"gemma3\",\n name: \"Gemma 3 (Ollama)\",\n api: \"openai-completions\",\n provider: \"ollama\",\n baseUrl: \"http://localhost:11434/v1\",\n reasoning: false,\n input: [\"text\"],\n cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },\n contextWindow: 131072,\n maxTokens: 8192,\n },\n \"ollama/mistral\": {\n id: \"mistral\",\n name: \"Mistral 7B (Ollama)\",\n api: \"openai-completions\",\n provider: \"ollama\",\n baseUrl: \"http://localhost:11434/v1\",\n reasoning: false,\n input: [\"text\"],\n cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },\n contextWindow: 32768,\n maxTokens: 4096,\n },\n \"ollama/mistral-nemo\": {\n id: \"mistral-nemo\",\n name: \"Mistral Nemo (Ollama)\",\n api: \"openai-completions\",\n provider: \"ollama\",\n baseUrl: \"http://localhost:11434/v1\",\n reasoning: false,\n input: [\"text\"],\n cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },\n contextWindow: 131072,\n maxTokens: 4096,\n },\n \"ollama/qwen3\": {\n id: \"qwen3\",\n name: \"Qwen 3 (Ollama)\",\n api: \"openai-completions\",\n provider: \"ollama\",\n baseUrl: \"http://localhost:11434/v1\",\n reasoning: false,\n input: [\"text\"],\n cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },\n contextWindow: 131072,\n maxTokens: 8192,\n },\n \"ollama/deepseek-r1\": {\n id: \"deepseek-r1\",\n name: \"DeepSeek R1 (Ollama)\",\n api: \"openai-completions\",\n provider: \"ollama\",\n baseUrl: \"http://localhost:11434/v1\",\n reasoning: true,\n input: [\"text\"],\n cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },\n contextWindow: 131072,\n maxTokens: 8192,\n },\n \"ollama/codellama\": {\n id: \"codellama\",\n name: \"Code Llama (Ollama)\",\n api: \"openai-completions\",\n provider: \"ollama\",\n baseUrl: \"http://localhost:11434/v1\",\n reasoning: false,\n input: [\"text\"],\n cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },\n contextWindow: 16384,\n maxTokens: 4096,\n },\n \"ollama/phi3\": {\n id: \"phi3\",\n name: \"Phi-3 (Ollama)\",\n api: \"openai-completions\",\n provider: \"ollama\",\n baseUrl: \"http://localhost:11434/v1\",\n reasoning: false,\n input: [\"text\"],\n cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },\n contextWindow: 131072,\n maxTokens: 4096,\n },\n};\n\nconst DEFAULT_MODEL: Model<Api> = {\n id: \"openrouter/free\",\n name: \"Free (Auto)\",\n api: \"openai-completions\",\n provider: \"openrouter\",\n baseUrl: \"https://openrouter.ai/api/v1\",\n reasoning: false,\n input: [\"text\"],\n cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },\n contextWindow: 128000,\n maxTokens: 4096,\n};\n\n/**\n * Resolve a model ID string to a pi-ai Model object.\n * Tries pi-ai's registry first, then falls back to built-in Ollama definitions.\n */\nexport function resolveModel(modelId: string, options?: ModelResolutionOptions): Model<Api> {\n // Try Ollama models first\n if (modelId.startsWith(\"ollama/\")) {\n const ollamaDef = OLLAMA_MODELS[modelId];\n if (ollamaDef) {\n const baseUrl = options?.ollamaBaseUrl ?? ollamaDef.baseUrl ?? \"http://localhost:11434/v1\";\n return {\n ...DEFAULT_MODEL,\n ...ollamaDef,\n baseUrl,\n compat: {\n supportsStore: false,\n supportsDeveloperRole: false,\n supportsReasoningEffort: false,\n supportsUsageInStreaming: false,\n maxTokensField: \"max_tokens\",\n requiresToolResultName: false,\n requiresAssistantAfterToolResult: false,\n requiresThinkingAsText: true,\n requiresReasoningContentOnAssistantMessages: false,\n thinkingFormat: \"openai\",\n supportsStrictMode: false,\n supportsLongCacheRetention: false,\n },\n } as Model<Api>;\n }\n // Generic Ollama model: user typed a custom model name\n const customId = modelId.replace(\"ollama/\", \"\");\n return {\n ...DEFAULT_MODEL,\n id: customId,\n name: `${customId} (Ollama)`,\n provider: \"ollama\",\n baseUrl: options?.ollamaBaseUrl ?? \"http://localhost:11434/v1\",\n compat: {\n supportsStore: false,\n supportsDeveloperRole: false,\n supportsReasoningEffort: false,\n supportsUsageInStreaming: false,\n maxTokensField: \"max_tokens\",\n requiresToolResultName: false,\n requiresAssistantAfterToolResult: false,\n requiresThinkingAsText: true,\n requiresReasoningContentOnAssistantMessages: false,\n thinkingFormat: \"openai\",\n supportsStrictMode: false,\n supportsLongCacheRetention: false,\n },\n } as Model<Api>;\n }\n\n // Try pi-ai's model registry (OpenRouter, Anthropic, Google, etc.)\n // pi-ai groups by provider, so we try known providers.\n // getModel's type signature requires specific (KnownProvider, ModelKey) pairs\n // for full type inference, but we're resolving dynamically at runtime.\n // The try/catch handles any invalid provider+model combinations.\n const providers: string[] = [\"openrouter\", \"anthropic\", \"google\", \"openai\", \"deepseek\", \"groq\", \"xai\", \"ollama\"];\n for (const provider of providers) {\n try {\n // Type assertion required: getModel's generics are too narrow for dynamic lookup.\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n const model = getModel(provider as any, modelId as any);\n if (model) return model as Model<Api>;\n } catch {\n // Not found in this provider, try next\n }\n }\n\n // Fallback: create a generic OpenRouter-compatible model\n return {\n ...DEFAULT_MODEL,\n id: modelId,\n name: modelId,\n };\n}\n\n// ── Message conversion ────────────────────────────────────────────────\n\n/** Convert Origen's simple messages to pi-ai Message format. */\nexport function convertMessages(\n messages: Array<{ role: \"user\" | \"assistant\"; content: string }>\n): Message[] {\n // Origen uses simple string messages. pi-ai's Message union type includes\n // UserMessage (content: string | ...[]) and AssistantMessage (content: ...[]).\n // Our messages have role \"user\" (valid UserMessage) or \"assistant\" (simplified —\n // real AssistantMessages have structured content, but pi-agent-core accepts\n // simplified messages at runtime). We cast to satisfy TypeScript while\n // maintaining runtime correctness.\n return messages.map((m) => ({\n role: m.role,\n content: m.content,\n timestamp: Date.now(),\n })) as Message[];\n}\n\n// ── Context builder ───────────────────────────────────────────────────\n\n/** Build a pi-ai Context from Origen's config. */\nexport function buildContext(\n systemPrompt: string,\n messages: Message[],\n adaptedTools: AgentTool[]\n): Context {\n return {\n systemPrompt,\n messages,\n tools: adaptedTools.map((t) => ({\n name: t.name,\n description: t.description,\n parameters: t.parameters,\n })),\n };\n}\n\n// ── Event translation ─────────────────────────────────────────────────\n\n/** Default citation extractor — [BOOK CHAPTER:VERSE] patterns (e.g., [GEN 1:1]).\n * Bible-specific pattern. Consumers should provide their own extractCitations\n * for non-biblical citation formats. Exported for reuse and testing.\n */\nexport function defaultCitationExtractor(text: string): Citation[] {\n const citations: Citation[] = [];\n const regex = /\\[([A-Z]{3})\\s+(\\d+):(\\d+)\\]/g;\n let match;\n while ((match = regex.exec(text)) !== null) {\n citations.push({ book: match[1], chapter: parseInt(match[2]), verse: parseInt(match[3]) });\n }\n return citations;\n}\n\n/** Translate a pi-agent-core AgentEvent into an Origen StreamEvent. */\nexport function translateEvent(\n event: AgentEvent,\n extractCitations?: (text: string) => Citation[]\n): StreamEvent | null {\n switch (event.type) {\n case \"message_update\": {\n const assistantEvent = event.assistantMessageEvent;\n if (assistantEvent.type === \"text_delta\") {\n return { type: \"text\" as const, content: assistantEvent.delta };\n }\n if (assistantEvent.type === \"thinking_delta\") {\n return { type: \"reasoning\" as const, content: assistantEvent.delta };\n }\n return null;\n }\n case \"tool_execution_start\": {\n return {\n type: \"tool_call\" as const,\n name: event.toolName,\n args: event.args as Record<string, unknown>,\n };\n }\n case \"tool_execution_end\": {\n const resultText = event.result?.content\n ?.filter((c: any) => c.type === \"text\")\n .map((c: any) => c.text)\n .join(\"\\n\") ?? \"\";\n return {\n type: \"tool_result\" as const,\n name: event.toolName,\n result: resultText,\n };\n }\n case \"agent_end\": {\n // Find the final assistant message\n const assistantMsg = event.messages\n .filter((m): m is any => m.role === \"assistant\")\n .pop();\n const text = assistantMsg?.content\n ?.filter((c: any) => c.type === \"text\")\n .map((c: any) => c.text)\n .join(\"\") ?? \"\";\n const usage: UsageInfo | undefined = assistantMsg?.usage\n ? {\n promptTokens: assistantMsg.usage.input,\n completionTokens: assistantMsg.usage.output,\n totalCost: assistantMsg.usage.cost?.total,\n }\n : undefined;\n const citFn = extractCitations ?? defaultCitationExtractor;\n // Check for error\n if (assistantMsg?.stopReason === \"error\" || assistantMsg?.stopReason === \"aborted\") {\n return {\n type: \"error\" as const,\n message: assistantMsg.errorMessage ?? \"Agent encountered an error\",\n };\n }\n return {\n type: \"done\" as const,\n message: text,\n citations: citFn(text),\n usage,\n };\n }\n default:\n return null;\n }\n}\n\n/**\n * Eagerly subscribe to an Agent and return an async iterable of Origen StreamEvents.\n *\n * CRITICAL: The subscription is created synchronously when this function is called,\n * BEFORE agent.prompt() starts. This avoids the race condition where events\n * emitted during prompt() are missed if subscription happens after.\n *\n * Usage:\n * const { stream, unsubscribe } = createEventStream(agent, extractCitations);\n * agent.prompt(messages); // events flow into stream via active subscription\n * for await (const event of stream) { ... }\n */\nexport function createEventStream(\n agent: any, // Agent from pi-agent-core\n extractCitations?: (text: string) => Citation[]\n): {\n stream: AsyncGenerator<StreamEvent>;\n unsubscribe: () => void;\n} {\n const queue: StreamEvent[] = [];\n let resolve: (() => void) | null = null;\n let done = false;\n\n // Subscribe IMMEDIATELY (before prompt is called)\n const unsubscribe = agent.subscribe((event: AgentEvent) => {\n const translated = translateEvent(event, extractCitations);\n if (translated) {\n queue.push(translated);\n if (resolve) {\n resolve();\n resolve = null;\n }\n }\n if (event.type === \"agent_end\") {\n done = true;\n if (resolve) {\n resolve();\n resolve = null;\n }\n }\n });\n\n async function* stream(): AsyncGenerator<StreamEvent> {\n try {\n while (!done || queue.length > 0) {\n if (queue.length > 0) {\n yield queue.shift()!;\n continue;\n }\n if (done) break;\n await new Promise<void>((r) => { resolve = r; });\n }\n } finally {\n unsubscribe();\n }\n }\n\n return { stream: stream(), unsubscribe };\n}\n\n/**\n * Subscribe to an Agent and yield Origen StreamEvents.\n * Handles the full lifecycle from agent_start to agent_end.\n *\n * @deprecated Use createEventStream() instead to avoid race conditions.\n * This function subscribes lazily (on first iteration) which can miss events\n * if the agent has already started emitting.\n */\nexport async function* agentToStreamEvents(\n agent: any,\n extractCitations?: (text: string) => Citation[]\n): AsyncGenerator<StreamEvent> {\n yield* createEventStream(agent, extractCitations).stream;\n}"],"mappings":";AAQA,SAAS,gBAAgB;AAalB,SAAS,UAAU,MAAkB,OAA8B;AACxE,SAAO;AAAA,IACL,MAAM,KAAK;AAAA,IACX,aAAa,KAAK;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,IAOlB,YAAY;AAAA,MACV,MAAM;AAAA,MACN,GAAG,KAAK;AAAA,IACV;AAAA,IACA,OAAO,KAAK;AAAA,IACZ,SAAS,OAAO,aAAa,QAAQ,YAAY;AAC/C,YAAM,SAAS,MAAM,KAAK,QAAQ,QAAmC,KAAK;AAC1E,aAAO;AAAA,QACL,SAAS,CAAC,EAAE,MAAM,QAAiB,MAAM,OAAO,CAAC;AAAA,QACjD,SAAS,CAAC;AAAA,MACZ;AAAA,IACF;AAAA,EACF;AACF;AAGO,SAAS,WAAW,OAAqB,OAAgC;AAC9E,SAAO,MAAM,IAAI,CAAC,MAAM,UAAU,GAAG,KAAK,CAAC;AAC7C;AAcA,IAAM,gBAAqD;AAAA,EACzD,iBAAiB;AAAA,IACf,IAAI;AAAA,IACJ,MAAM;AAAA,IACN,KAAK;AAAA,IACL,UAAU;AAAA,IACV,SAAS;AAAA,IACT,WAAW;AAAA,IACX,OAAO,CAAC,MAAM;AAAA,IACd,MAAM,EAAE,OAAO,GAAG,QAAQ,GAAG,WAAW,GAAG,YAAY,EAAE;AAAA,IACzD,eAAe;AAAA,IACf,WAAW;AAAA,EACb;AAAA,EACA,mBAAmB;AAAA,IACjB,IAAI;AAAA,IACJ,MAAM;AAAA,IACN,KAAK;AAAA,IACL,UAAU;AAAA,IACV,SAAS;AAAA,IACT,WAAW;AAAA,IACX,OAAO,CAAC,MAAM;AAAA,IACd,MAAM,EAAE,OAAO,GAAG,QAAQ,GAAG,WAAW,GAAG,YAAY,EAAE;AAAA,IACzD,eAAe;AAAA,IACf,WAAW;AAAA,EACb;AAAA,EACA,iBAAiB;AAAA,IACf,IAAI;AAAA,IACJ,MAAM;AAAA,IACN,KAAK;AAAA,IACL,UAAU;AAAA,IACV,SAAS;AAAA,IACT,WAAW;AAAA,IACX,OAAO,CAAC,MAAM;AAAA,IACd,MAAM,EAAE,OAAO,GAAG,QAAQ,GAAG,WAAW,GAAG,YAAY,EAAE;AAAA,IACzD,eAAe;AAAA,IACf,WAAW;AAAA,EACb;AAAA,EACA,kBAAkB;AAAA,IAChB,IAAI;AAAA,IACJ,MAAM;AAAA,IACN,KAAK;AAAA,IACL,UAAU;AAAA,IACV,SAAS;AAAA,IACT,WAAW;AAAA,IACX,OAAO,CAAC,MAAM;AAAA,IACd,MAAM,EAAE,OAAO,GAAG,QAAQ,GAAG,WAAW,GAAG,YAAY,EAAE;AAAA,IACzD,eAAe;AAAA,IACf,WAAW;AAAA,EACb;AAAA,EACA,uBAAuB;AAAA,IACrB,IAAI;AAAA,IACJ,MAAM;AAAA,IACN,KAAK;AAAA,IACL,UAAU;AAAA,IACV,SAAS;AAAA,IACT,WAAW;AAAA,IACX,OAAO,CAAC,MAAM;AAAA,IACd,MAAM,EAAE,OAAO,GAAG,QAAQ,GAAG,WAAW,GAAG,YAAY,EAAE;AAAA,IACzD,eAAe;AAAA,IACf,WAAW;AAAA,EACb;AAAA,EACA,gBAAgB;AAAA,IACd,IAAI;AAAA,IACJ,MAAM;AAAA,IACN,KAAK;AAAA,IACL,UAAU;AAAA,IACV,SAAS;AAAA,IACT,WAAW;AAAA,IACX,OAAO,CAAC,MAAM;AAAA,IACd,MAAM,EAAE,OAAO,GAAG,QAAQ,GAAG,WAAW,GAAG,YAAY,EAAE;AAAA,IACzD,eAAe;AAAA,IACf,WAAW;AAAA,EACb;AAAA,EACA,sBAAsB;AAAA,IACpB,IAAI;AAAA,IACJ,MAAM;AAAA,IACN,KAAK;AAAA,IACL,UAAU;AAAA,IACV,SAAS;AAAA,IACT,WAAW;AAAA,IACX,OAAO,CAAC,MAAM;AAAA,IACd,MAAM,EAAE,OAAO,GAAG,QAAQ,GAAG,WAAW,GAAG,YAAY,EAAE;AAAA,IACzD,eAAe;AAAA,IACf,WAAW;AAAA,EACb;AAAA,EACA,oBAAoB;AAAA,IAClB,IAAI;AAAA,IACJ,MAAM;AAAA,IACN,KAAK;AAAA,IACL,UAAU;AAAA,IACV,SAAS;AAAA,IACT,WAAW;AAAA,IACX,OAAO,CAAC,MAAM;AAAA,IACd,MAAM,EAAE,OAAO,GAAG,QAAQ,GAAG,WAAW,GAAG,YAAY,EAAE;AAAA,IACzD,eAAe;AAAA,IACf,WAAW;AAAA,EACb;AAAA,EACA,eAAe;AAAA,IACb,IAAI;AAAA,IACJ,MAAM;AAAA,IACN,KAAK;AAAA,IACL,UAAU;AAAA,IACV,SAAS;AAAA,IACT,WAAW;AAAA,IACX,OAAO,CAAC,MAAM;AAAA,IACd,MAAM,EAAE,OAAO,GAAG,QAAQ,GAAG,WAAW,GAAG,YAAY,EAAE;AAAA,IACzD,eAAe;AAAA,IACf,WAAW;AAAA,EACb;AACF;AAEA,IAAM,gBAA4B;AAAA,EAChC,IAAI;AAAA,EACJ,MAAM;AAAA,EACN,KAAK;AAAA,EACL,UAAU;AAAA,EACV,SAAS;AAAA,EACT,WAAW;AAAA,EACX,OAAO,CAAC,MAAM;AAAA,EACd,MAAM,EAAE,OAAO,GAAG,QAAQ,GAAG,WAAW,GAAG,YAAY,EAAE;AAAA,EACzD,eAAe;AAAA,EACf,WAAW;AACb;AAMO,SAAS,aAAa,SAAiB,SAA8C;AAE1F,MAAI,QAAQ,WAAW,SAAS,GAAG;AACjC,UAAM,YAAY,cAAc,OAAO;AACvC,QAAI,WAAW;AACb,YAAM,UAAU,SAAS,iBAAiB,UAAU,WAAW;AAC/D,aAAO;AAAA,QACL,GAAG;AAAA,QACH,GAAG;AAAA,QACH;AAAA,QACA,QAAQ;AAAA,UACN,eAAe;AAAA,UACf,uBAAuB;AAAA,UACvB,yBAAyB;AAAA,UACzB,0BAA0B;AAAA,UAC1B,gBAAgB;AAAA,UAChB,wBAAwB;AAAA,UACxB,kCAAkC;AAAA,UAClC,wBAAwB;AAAA,UACxB,6CAA6C;AAAA,UAC7C,gBAAgB;AAAA,UAChB,oBAAoB;AAAA,UACpB,4BAA4B;AAAA,QAC9B;AAAA,MACF;AAAA,IACF;AAEA,UAAM,WAAW,QAAQ,QAAQ,WAAW,EAAE;AAC9C,WAAO;AAAA,MACL,GAAG;AAAA,MACH,IAAI;AAAA,MACJ,MAAM,GAAG,QAAQ;AAAA,MACjB,UAAU;AAAA,MACV,SAAS,SAAS,iBAAiB;AAAA,MACnC,QAAQ;AAAA,QACN,eAAe;AAAA,QACf,uBAAuB;AAAA,QACvB,yBAAyB;AAAA,QACzB,0BAA0B;AAAA,QAC1B,gBAAgB;AAAA,QAChB,wBAAwB;AAAA,QACxB,kCAAkC;AAAA,QAClC,wBAAwB;AAAA,QACxB,6CAA6C;AAAA,QAC7C,gBAAgB;AAAA,QAChB,oBAAoB;AAAA,QACpB,4BAA4B;AAAA,MAC9B;AAAA,IACF;AAAA,EACF;AAOA,QAAM,YAAsB,CAAC,cAAc,aAAa,UAAU,UAAU,YAAY,QAAQ,OAAO,QAAQ;AAC/G,aAAW,YAAY,WAAW;AAChC,QAAI;AAGF,YAAM,QAAQ,SAAS,UAAiB,OAAc;AACtD,UAAI,MAAO,QAAO;AAAA,IACpB,QAAQ;AAAA,IAER;AAAA,EACF;AAGA,SAAO;AAAA,IACL,GAAG;AAAA,IACH,IAAI;AAAA,IACJ,MAAM;AAAA,EACR;AACF;AAKO,SAAS,gBACd,UACW;AAOX,SAAO,SAAS,IAAI,CAAC,OAAO;AAAA,IAC1B,MAAM,EAAE;AAAA,IACR,SAAS,EAAE;AAAA,IACX,WAAW,KAAK,IAAI;AAAA,EACtB,EAAE;AACJ;AAKO,SAAS,aACd,cACA,UACA,cACS;AACT,SAAO;AAAA,IACL;AAAA,IACA;AAAA,IACA,OAAO,aAAa,IAAI,CAAC,OAAO;AAAA,MAC9B,MAAM,EAAE;AAAA,MACR,aAAa,EAAE;AAAA,MACf,YAAY,EAAE;AAAA,IAChB,EAAE;AAAA,EACJ;AACF;AAQO,SAAS,yBAAyB,MAA0B;AACjE,QAAM,YAAwB,CAAC;AAC/B,QAAM,QAAQ;AACd,MAAI;AACJ,UAAQ,QAAQ,MAAM,KAAK,IAAI,OAAO,MAAM;AAC1C,cAAU,KAAK,EAAE,MAAM,MAAM,CAAC,GAAG,SAAS,SAAS,MAAM,CAAC,CAAC,GAAG,OAAO,SAAS,MAAM,CAAC,CAAC,EAAE,CAAC;AAAA,EAC3F;AACA,SAAO;AACT;AAGO,SAAS,eACd,OACA,kBACoB;AACpB,UAAQ,MAAM,MAAM;AAAA,IAClB,KAAK,kBAAkB;AACrB,YAAM,iBAAiB,MAAM;AAC7B,UAAI,eAAe,SAAS,cAAc;AACxC,eAAO,EAAE,MAAM,QAAiB,SAAS,eAAe,MAAM;AAAA,MAChE;AACA,UAAI,eAAe,SAAS,kBAAkB;AAC5C,eAAO,EAAE,MAAM,aAAsB,SAAS,eAAe,MAAM;AAAA,MACrE;AACA,aAAO;AAAA,IACT;AAAA,IACA,KAAK,wBAAwB;AAC3B,aAAO;AAAA,QACL,MAAM;AAAA,QACN,MAAM,MAAM;AAAA,QACZ,MAAM,MAAM;AAAA,MACd;AAAA,IACF;AAAA,IACA,KAAK,sBAAsB;AACzB,YAAM,aAAa,MAAM,QAAQ,SAC7B,OAAO,CAAC,MAAW,EAAE,SAAS,MAAM,EACrC,IAAI,CAAC,MAAW,EAAE,IAAI,EACtB,KAAK,IAAI,KAAK;AACjB,aAAO;AAAA,QACL,MAAM;AAAA,QACN,MAAM,MAAM;AAAA,QACZ,QAAQ;AAAA,MACV;AAAA,IACF;AAAA,IACA,KAAK,aAAa;AAEhB,YAAM,eAAe,MAAM,SACxB,OAAO,CAAC,MAAgB,EAAE,SAAS,WAAW,EAC9C,IAAI;AACP,YAAM,OAAO,cAAc,SACvB,OAAO,CAAC,MAAW,EAAE,SAAS,MAAM,EACrC,IAAI,CAAC,MAAW,EAAE,IAAI,EACtB,KAAK,EAAE,KAAK;AACf,YAAM,QAA+B,cAAc,QAC/C;AAAA,QACE,cAAc,aAAa,MAAM;AAAA,QACjC,kBAAkB,aAAa,MAAM;AAAA,QACrC,WAAW,aAAa,MAAM,MAAM;AAAA,MACtC,IACA;AACJ,YAAM,QAAQ,oBAAoB;AAElC,UAAI,cAAc,eAAe,WAAW,cAAc,eAAe,WAAW;AAClF,eAAO;AAAA,UACL,MAAM;AAAA,UACN,SAAS,aAAa,gBAAgB;AAAA,QACxC;AAAA,MACF;AACA,aAAO;AAAA,QACL,MAAM;AAAA,QACN,SAAS;AAAA,QACT,WAAW,MAAM,IAAI;AAAA,QACrB;AAAA,MACF;AAAA,IACF;AAAA,IACA;AACE,aAAO;AAAA,EACX;AACF;AAcO,SAAS,kBACd,OACA,kBAIA;AACA,QAAM,QAAuB,CAAC;AAC9B,MAAI,UAA+B;AACnC,MAAI,OAAO;AAGX,QAAM,cAAc,MAAM,UAAU,CAAC,UAAsB;AACzD,UAAM,aAAa,eAAe,OAAO,gBAAgB;AACzD,QAAI,YAAY;AACd,YAAM,KAAK,UAAU;AACrB,UAAI,SAAS;AACX,gBAAQ;AACR,kBAAU;AAAA,MACZ;AAAA,IACF;AACA,QAAI,MAAM,SAAS,aAAa;AAC9B,aAAO;AACP,UAAI,SAAS;AACX,gBAAQ;AACR,kBAAU;AAAA,MACZ;AAAA,IACF;AAAA,EACF,CAAC;AAED,kBAAgB,SAAsC;AACpD,QAAI;AACF,aAAO,CAAC,QAAQ,MAAM,SAAS,GAAG;AAChC,YAAI,MAAM,SAAS,GAAG;AACpB,gBAAM,MAAM,MAAM;AAClB;AAAA,QACF;AACA,YAAI,KAAM;AACV,cAAM,IAAI,QAAc,CAAC,MAAM;AAAE,oBAAU;AAAA,QAAG,CAAC;AAAA,MACjD;AAAA,IACF,UAAE;AACA,kBAAY;AAAA,IACd;AAAA,EACF;AAEA,SAAO,EAAE,QAAQ,OAAO,GAAG,YAAY;AACzC;AAUA,gBAAuB,oBACrB,OACA,kBAC6B;AAC7B,SAAO,kBAAkB,OAAO,gBAAgB,EAAE;AACpD;","names":[]}