@moikapy/origen 0.4.2 → 0.5.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +170 -25
- package/dist/{adapter-DruYlWW-.d.ts → adapter-DXIT3O2f.d.ts} +6 -1
- package/dist/adapter.d.ts +1 -1
- package/dist/adapter.js +3 -1
- package/dist/chunk-GK5KZOHB.js +201 -0
- package/dist/chunk-GK5KZOHB.js.map +1 -0
- package/dist/{chunk-TECUAB3E.js → chunk-K3FE63XL.js} +60 -8
- package/dist/chunk-K3FE63XL.js.map +1 -0
- package/dist/index.d.ts +2 -2
- package/dist/index.js +11 -3
- package/dist/index.js.map +1 -1
- package/dist/models.d.ts +41 -9
- package/dist/models.js +7 -1
- package/package.json +1 -1
- package/dist/chunk-ECRY7XDR.js +0 -109
- package/dist/chunk-ECRY7XDR.js.map +0 -1
- package/dist/chunk-TECUAB3E.js.map +0 -1
package/README.md
CHANGED
|
@@ -2,18 +2,28 @@
|
|
|
2
2
|
|
|
3
3
|
> Multi-Provider Agent Engine — an agent harness, not a chatbot.
|
|
4
4
|
|
|
5
|
-
Named after **Origen of Alexandria** (c. 185–254 AD) — the early church's greatest scholar. This package is
|
|
5
|
+
Named after **Origen of Alexandria** (c. 185–254 AD) — the early church's greatest scholar. This package is a **generic** agent harness that wraps any LLM provider with tool calling, streaming, Soul.md personas, and Cloudflare D1 integration.
|
|
6
6
|
|
|
7
7
|
**Domain-specific tools live in separate packages.** For Bible study, see `@moikapy/scholar-tools`.
|
|
8
8
|
|
|
9
|
-
##
|
|
9
|
+
## Features
|
|
10
|
+
|
|
11
|
+
- **Multi-provider**: OpenRouter, Ollama, Anthropic, Google, OpenAI, DeepSeek, Groq, xAI via `@mariozechner/pi-ai`
|
|
12
|
+
- **Streaming first**: `streamOrigen()` yields typed events (reasoning, tool calls, text deltas, citations)
|
|
13
|
+
- **Parallel tool execution**: Tools run concurrently by default; sequential mode available
|
|
14
|
+
- **Soul.md personas**: Declarative persona definitions with profiles, moods, and voice tuning
|
|
15
|
+
- **D1 integration**: Tools receive a `D1Provider` for Cloudflare D1 database access
|
|
16
|
+
- **Provider-aware auth**: `getApiKey(provider)` resolves keys per-provider (OAuth PKCE, local Ollama, etc.)
|
|
17
|
+
- **Abort support**: Pass `signal: AbortSignal` to cancel streaming mid-flight
|
|
18
|
+
- **Citation extraction**: Pluggable `extractCitations` for domain-specific parsing
|
|
19
|
+
- **Thinking models**: Automatic extended reasoning for DeepSeek R1, Claude Sonnet 4, Gemini 2.5 Flash
|
|
10
20
|
|
|
11
|
-
|
|
21
|
+
## Providers
|
|
12
22
|
|
|
13
23
|
| Provider | Models | Auth |
|
|
14
24
|
|---|---|---|
|
|
15
25
|
| **OpenRouter** | 275+ models, free tier available | OAuth PKCE / API key |
|
|
16
|
-
| **Ollama** | Llama 3, Gemma 3, Mistral, Qwen 3, DeepSeek R1 | Local (no key needed) |
|
|
26
|
+
| **Ollama** | Llama 3, Gemma 3, Mistral, Qwen 3, DeepSeek R1, + any custom model | Local (no key needed) |
|
|
17
27
|
| **Anthropic** | Claude Sonnet 4, etc. | API key |
|
|
18
28
|
| **Google** | Gemini 2.5 Flash, etc. | API key |
|
|
19
29
|
| **OpenAI** | GPT-4o, etc. | API key |
|
|
@@ -52,7 +62,6 @@ const config: AgentConfig = {
|
|
|
52
62
|
tools: [myTool],
|
|
53
63
|
getD1: async () => myD1Database,
|
|
54
64
|
model: "openrouter/free",
|
|
55
|
-
// Provider-aware key resolution
|
|
56
65
|
getApiKey: async (provider) => {
|
|
57
66
|
if (provider === "ollama") return "ollama";
|
|
58
67
|
return getOpenRouterKey();
|
|
@@ -73,14 +82,44 @@ for await (const event of streamOrigen(messages, context, config)) {
|
|
|
73
82
|
|
|
74
83
|
## Ollama Support
|
|
75
84
|
|
|
85
|
+
### Static Model List
|
|
86
|
+
|
|
87
|
+
Origen ships with hardcoded entries for popular Ollama models (Llama 3, Gemma 3, DeepSeek R1, etc.). These serve as fallbacks when Ollama isn't running.
|
|
88
|
+
|
|
89
|
+
### Dynamic Discovery
|
|
90
|
+
|
|
91
|
+
When Ollama is running, Origen can pull the full list of available models — including cloud models (`:cloud` suffix) — via `fetchOllamaModels()`:
|
|
92
|
+
|
|
93
|
+
```typescript
|
|
94
|
+
import { discoverOllamaModels, getModelsForUI } from "@moikapy/origen/models";
|
|
95
|
+
|
|
96
|
+
// Fetch from Ollama, merge into MODELS registry, return combined map
|
|
97
|
+
const allModels = await discoverOllamaModels("http://localhost:11434");
|
|
98
|
+
console.log(allModels["ollama/llama3.2"]); // { name: "llama3.2 (3.2B)", description: "Local — 3.2B (Q4_K_M) requires Ollama", free: true }
|
|
99
|
+
|
|
100
|
+
// Or do it in two steps for more control:
|
|
101
|
+
import { fetchOllamaModels, mergeOllamaModels } from "@moikapy/origen/models";
|
|
102
|
+
|
|
103
|
+
const discovered = await fetchOllamaModels("http://localhost:11434");
|
|
104
|
+
// Filter, transform, or inspect before merging
|
|
105
|
+
mergeOllamaModels(discovered);
|
|
106
|
+
```
|
|
107
|
+
|
|
108
|
+
**Key behaviors:**
|
|
109
|
+
- Embedding models (e.g., `nomic-embed-text`) are automatically filtered out
|
|
110
|
+
- Cloud models (tagged `:cloud`) are tagged as "Cloud" in their description
|
|
111
|
+
- Reasoning model families (DeepSeek R1, Qwen3, etc.) are auto-detected
|
|
112
|
+
- Network errors return empty results — no crash, just static fallbacks
|
|
113
|
+
- 5-second timeout — won't block startup if Ollama is slow
|
|
114
|
+
|
|
115
|
+
### Custom Models
|
|
116
|
+
|
|
117
|
+
Use `ollama/<model-name>` and Origen constructs a generic OpenAI-compatible config pointing at your Ollama server:
|
|
118
|
+
|
|
76
119
|
```typescript
|
|
77
120
|
const config: AgentConfig = {
|
|
78
|
-
model: "ollama/
|
|
121
|
+
model: "ollama/my-custom-finetune",
|
|
79
122
|
ollamaBaseUrl: "http://localhost:11434/v1",
|
|
80
|
-
getApiKey: async (provider) => {
|
|
81
|
-
if (provider === "ollama") return "ollama"; // Ollama doesn't need a real key
|
|
82
|
-
return undefined;
|
|
83
|
-
},
|
|
84
123
|
// ...
|
|
85
124
|
};
|
|
86
125
|
```
|
|
@@ -100,21 +139,24 @@ console.log(concise.buildPrompt()); // Concise version
|
|
|
100
139
|
|
|
101
140
|
### Supported Soul.md Fields
|
|
102
141
|
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
142
|
+
| Section | Fields |
|
|
143
|
+
|---|---|
|
|
144
|
+
| **identity** | role, archetype, domain_focus, non_goals |
|
|
145
|
+
| **relationship** | stance, user_model_default, trust_baseline |
|
|
146
|
+
| **voice** | formality, warmth, verbosity, jargon, formatting, banned_phrases, preferred_phrases, emoji_policy |
|
|
147
|
+
| **interaction** | clarifying_questions, uncertainty, disagreement, confirmations |
|
|
148
|
+
| **cognition** | mode, depth, verification (fact_checking, cross_validation) |
|
|
149
|
+
| **safety** | refusal_style, privacy, speculation, no_fabrication, no_false_certainty |
|
|
150
|
+
| **actions** | when_to_use_tools, explain_actions, failover |
|
|
151
|
+
| **state** | dynamic moods with trigger-based transitions |
|
|
152
|
+
| **profiles** | named overlays (concise, scholarly, friendly, etc.) |
|
|
153
|
+
| **composition** | extends, mixins, merge_policy |
|
|
154
|
+
|
|
155
|
+
## API Reference
|
|
114
156
|
|
|
115
157
|
### `streamOrigen(messages, context, config, apiKey?)`
|
|
116
158
|
|
|
117
|
-
Async generator yielding `StreamEvent`s. Handles the full agent loop with parallel tool execution.
|
|
159
|
+
Async generator yielding `StreamEvent`s. Handles the full agent loop with parallel tool execution. Events: `reasoning`, `tool_call`, `tool_result`, `text`, `done`, `error`.
|
|
118
160
|
|
|
119
161
|
### `callOrigen(messages, context, config, apiKey?)`
|
|
120
162
|
|
|
@@ -122,12 +164,20 @@ Non-streaming wrapper. Returns `{ message, citations, usage }`.
|
|
|
122
164
|
|
|
123
165
|
### `checkAuth(getApiKey)`
|
|
124
166
|
|
|
125
|
-
Provider-aware auth check. Returns `{ authenticated, apiKey, provider, error? }`.
|
|
167
|
+
Provider-aware auth check. Tries OpenRouter → Ollama → Anthropic. Returns `{ authenticated, apiKey, provider, error? }`.
|
|
126
168
|
|
|
127
169
|
### `checkOpenRouterAuth(getApiKey)`
|
|
128
170
|
|
|
129
171
|
OpenRouter-only auth check (backward compat).
|
|
130
172
|
|
|
173
|
+
### `resolveModel(modelId, options?)`
|
|
174
|
+
|
|
175
|
+
Resolves a model ID string to a pi-ai `Model` object. Handles Ollama models, pi-ai registry lookups, and generic fallbacks.
|
|
176
|
+
|
|
177
|
+
### `createEventStream(agent, extractCitations?)`
|
|
178
|
+
|
|
179
|
+
Eagerly subscribes to an Agent and returns `{ stream, unsubscribe }`. Subscribes synchronously before `prompt()` to avoid race conditions.
|
|
180
|
+
|
|
131
181
|
## Configuration
|
|
132
182
|
|
|
133
183
|
```typescript
|
|
@@ -150,16 +200,111 @@ interface AgentConfig {
|
|
|
150
200
|
## Models
|
|
151
201
|
|
|
152
202
|
```typescript
|
|
153
|
-
import {
|
|
203
|
+
import {
|
|
204
|
+
MODELS, DEFAULT_MODEL, THINKING_MODELS,
|
|
205
|
+
supportsThinking, isOllamaModel,
|
|
206
|
+
getModelsByProvider, getModelsForUI,
|
|
207
|
+
fetchOllamaModels, mergeOllamaModels, discoverOllamaModels,
|
|
208
|
+
} from "@moikapy/origen/models";
|
|
154
209
|
```
|
|
155
210
|
|
|
156
|
-
|
|
211
|
+
### Built-in Model IDs
|
|
212
|
+
|
|
213
|
+
| ID | Name | Free? |
|
|
214
|
+
|---|---|---|
|
|
215
|
+
| `openrouter/free` | Free (Auto) | ✅ |
|
|
216
|
+
| `google/gemma-4-31b-it:free` | Gemma 4 31B | ✅ |
|
|
217
|
+
| `nvidia/nemotron-3-super-120b-a12b:free` | Nemotron 3 Super | ✅ |
|
|
218
|
+
| `deepseek/deepseek-r1:free` | DeepSeek R1 (Free) | ✅ |
|
|
219
|
+
| `qwen/qwen3-coder:free` | Qwen3 Coder | ✅ |
|
|
220
|
+
| `ollama/llama3` | Llama 3 (Ollama) | ✅ |
|
|
221
|
+
| `ollama/llama3.1` | Llama 3.1 8B (Ollama) | ✅ |
|
|
222
|
+
| `ollama/gemma3` | Gemma 3 (Ollama) | ✅ |
|
|
223
|
+
| `ollama/mistral` | Mistral 7B (Ollama) | ✅ |
|
|
224
|
+
| `ollama/qwen3` | Qwen 3 (Ollama) | ✅ |
|
|
225
|
+
| `ollama/deepseek-r1` | DeepSeek R1 (Ollama) | ✅ |
|
|
226
|
+
| `ollama/codellama` | Code Llama (Ollama) | ✅ |
|
|
227
|
+
| `ollama/phi3` | Phi-3 (Ollama) | ✅ |
|
|
228
|
+
| `openrouter/auto` | Auto (All) | ❌ |
|
|
229
|
+
| `anthropic/claude-sonnet-4` | Claude Sonnet 4 | ❌ |
|
|
230
|
+
| `google/gemini-2.5-flash-preview` | Gemini 2.5 Flash | ❌ |
|
|
231
|
+
|
|
232
|
+
### Thinking Models
|
|
233
|
+
|
|
234
|
+
Extended reasoning support for: `anthropic/claude-sonnet-4`, `deepseek/deepseek-r1:free`, `google/gemini-2.5-flash-preview`, `ollama/deepseek-r1`.
|
|
235
|
+
|
|
236
|
+
### Dynamic Ollama Discovery
|
|
237
|
+
|
|
238
|
+
Ollama models can be discovered at runtime from a running server:
|
|
239
|
+
|
|
240
|
+
| Function | Description |
|
|
241
|
+
|---|---|
|
|
242
|
+
| `fetchOllamaModels(baseUrl?)` | Query Ollama `/api/tags`, return `Record<string, ModelConfig>` |
|
|
243
|
+
| `mergeOllamaModels(models)` | Merge discovered models into the static `MODELS` registry |
|
|
244
|
+
| `discoverOllamaModels(baseUrl?)` | One-shot: fetch + merge + return combined map |
|
|
245
|
+
|
|
246
|
+
## Exports Map
|
|
247
|
+
|
|
248
|
+
```json
|
|
249
|
+
{
|
|
250
|
+
".": "Main entry — streamOrigen, callOrigen, checkAuth, MODELS, Soul, etc.",
|
|
251
|
+
"./models": "MODELS registry, supportsThinking, isOllamaModel, getModelsByProvider",
|
|
252
|
+
"./soul": "Soul class, loadSoul, SoulConfig types",
|
|
253
|
+
"./adapter": "resolveModel, createEventStream, adaptTools, convertMessages"
|
|
254
|
+
}
|
|
255
|
+
```
|
|
256
|
+
|
|
257
|
+
## Architecture
|
|
258
|
+
|
|
259
|
+
```
|
|
260
|
+
┌─────────────────────────────────────────────────────┐
|
|
261
|
+
│ Your App │
|
|
262
|
+
│ streamOrigen(messages, context, config) │
|
|
263
|
+
└────────┬──────────────┬─────────────────────────────┘
|
|
264
|
+
│ │
|
|
265
|
+
┌────▼────┐ ┌─────▼──────┐
|
|
266
|
+
│ agent.ts│ │ adapter.ts │
|
|
267
|
+
│ Agent │ │ Tool adapt │
|
|
268
|
+
│ loop │ │ Model res. │
|
|
269
|
+
│ Auth │ │ Event xlate│
|
|
270
|
+
└────┬────┘ └─────┬──────┘
|
|
271
|
+
│ │
|
|
272
|
+
┌────▼──────────────▼────┐
|
|
273
|
+
│ pi-ai + pi-agent-core│
|
|
274
|
+
│ (LLM providers, │
|
|
275
|
+
│ streaming, tools) │
|
|
276
|
+
└───────────────────────┘
|
|
277
|
+
```
|
|
157
278
|
|
|
279
|
+
- **agent.ts** — Agent loop, auth checks, event types. Orchestrates `Agent` from pi-agent-core.
|
|
280
|
+
- **adapter.ts** — Bridges Origen's simple types to pi-ai/pi-agent-core. Tool adaptation, model resolution, event translation, eager event stream.
|
|
281
|
+
- **models.ts** — Model registry with UI-safe configs, thinking model detection, provider filtering.
|
|
282
|
+
- **soul.ts** — Soul.md RFC-1 parser with YAML front matter, profile overlays, prompt generation.
|
|
283
|
+
- **types.ts** — Zero-dependency types (D1Like, Citation, UsageInfo, ReadingContext).
|
|
284
|
+
|
|
285
|
+
## Changelog
|
|
286
|
+
|
|
287
|
+
### v0.5 (current)
|
|
288
|
+
- **Dynamic Ollama discovery**: `fetchOllamaModels()`, `mergeOllamaModels()`, `discoverOllamaModels()` pull live models from Ollama's `/api/tags` endpoint
|
|
289
|
+
- **Improved Ollama model catalog**: Added Llama 3.1, Mistral Nemo, Code Llama, Phi-3; updated context windows
|
|
290
|
+
- **Ollama provider in resolveModel**: Now tries `ollama` in the pi-ai registry loop
|
|
291
|
+
- **Exported `defaultCitationExtractor`**: Now importable for custom citation pipelines
|
|
292
|
+
- **Reduced `as any` casts**: Replaced with documented type assertions where pi-ai generics are too narrow
|
|
293
|
+
|
|
294
|
+
### v0.4
|
|
295
|
+
- **Zod validation**: Optional `inputSchema` on `OrigenTool` for runtime parameter validation
|
|
296
|
+
- **Qwen3 Coder**: Added `qwen/qwen3-coder:free` to model registry
|
|
297
|
+
- **getModelsForUI()**: UI-safe model configs without internal fields
|
|
298
|
+
- **getModelsByProvider()**: Filter models by provider prefix
|
|
299
|
+
- **UIModelConfig type**: Stripped model config safe for client-side use
|
|
300
|
+
|
|
301
|
+
### v0.3
|
|
158
302
|
- **Multi-provider**: OpenRouter, Ollama, Anthropic, Google, DeepSeek, Groq, xAI via pi-ai
|
|
159
303
|
- **Parallel tool execution**: Tools run concurrently by default
|
|
160
304
|
- **Abort support**: Pass `signal: AbortSignal` to cancel streaming
|
|
161
305
|
- **Soul.md personas**: Declarative persona definitions with profiles and moods
|
|
162
306
|
- **Provider-aware auth**: `getApiKey(provider)` resolves keys per-provider
|
|
307
|
+
- **Eager event stream**: `createEventStream()` subscribes before prompt to avoid race conditions
|
|
163
308
|
- **No more hardcoded SSE parser**: Delegate to pi-ai + pi-agent-core
|
|
164
309
|
|
|
165
310
|
## License
|
|
@@ -172,6 +172,11 @@ declare function convertMessages(messages: Array<{
|
|
|
172
172
|
}>): Message[];
|
|
173
173
|
/** Build a pi-ai Context from Origen's config. */
|
|
174
174
|
declare function buildContext(systemPrompt: string, messages: Message[], adaptedTools: AgentTool[]): Context;
|
|
175
|
+
/** Default citation extractor — [BOOK CHAPTER:VERSE] patterns (e.g., [GEN 1:1]).
|
|
176
|
+
* Bible-specific pattern. Consumers should provide their own extractCitations
|
|
177
|
+
* for non-biblical citation formats. Exported for reuse and testing.
|
|
178
|
+
*/
|
|
179
|
+
declare function defaultCitationExtractor(text: string): Citation[];
|
|
175
180
|
/** Translate a pi-agent-core AgentEvent into an Origen StreamEvent. */
|
|
176
181
|
declare function translateEvent(event: AgentEvent, extractCitations?: (text: string) => Citation[]): StreamEvent | null;
|
|
177
182
|
/**
|
|
@@ -201,4 +206,4 @@ extractCitations?: (text: string) => Citation[]): {
|
|
|
201
206
|
*/
|
|
202
207
|
declare function agentToStreamEvents(agent: any, extractCitations?: (text: string) => Citation[]): AsyncGenerator<StreamEvent>;
|
|
203
208
|
|
|
204
|
-
export { type AgentConfig as A, type Citation as C, type D1Like as D, type ModelResolutionOptions as M, type OrigenTool as O, type ReadingContext as R, type StreamEvent as S, type UsageInfo as U, type AgentResponse as a, type AuthCheckResult as b, type D1Provider as c, type ModelConfig as d, callOrigen as e, checkAuth as f, checkOpenRouterAuth as g, createEventStream as h,
|
|
209
|
+
export { type AgentConfig as A, type Citation as C, type D1Like as D, type ModelResolutionOptions as M, type OrigenTool as O, type ReadingContext as R, type StreamEvent as S, type UsageInfo as U, type AgentResponse as a, type AuthCheckResult as b, type D1Provider as c, type ModelConfig as d, callOrigen as e, checkAuth as f, checkOpenRouterAuth as g, createEventStream as h, defaultCitationExtractor as i, adaptTool as j, adaptTools as k, agentToStreamEvents as l, buildContext as m, convertMessages as n, resolveModel as r, streamOrigen as s, translateEvent as t };
|
package/dist/adapter.d.ts
CHANGED
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
import '@mariozechner/pi-ai';
|
|
2
2
|
import '@mariozechner/pi-agent-core';
|
|
3
|
-
export { M as ModelResolutionOptions,
|
|
3
|
+
export { M as ModelResolutionOptions, j as adaptTool, k as adaptTools, l as agentToStreamEvents, m as buildContext, n as convertMessages, h as createEventStream, i as defaultCitationExtractor, r as resolveModel, t as translateEvent } from './adapter-DXIT3O2f.js';
|
|
4
4
|
import 'zod';
|
|
5
5
|
import './models.js';
|
package/dist/adapter.js
CHANGED
|
@@ -5,9 +5,10 @@ import {
|
|
|
5
5
|
buildContext,
|
|
6
6
|
convertMessages,
|
|
7
7
|
createEventStream,
|
|
8
|
+
defaultCitationExtractor,
|
|
8
9
|
resolveModel,
|
|
9
10
|
translateEvent
|
|
10
|
-
} from "./chunk-
|
|
11
|
+
} from "./chunk-K3FE63XL.js";
|
|
11
12
|
export {
|
|
12
13
|
adaptTool,
|
|
13
14
|
adaptTools,
|
|
@@ -15,6 +16,7 @@ export {
|
|
|
15
16
|
buildContext,
|
|
16
17
|
convertMessages,
|
|
17
18
|
createEventStream,
|
|
19
|
+
defaultCitationExtractor,
|
|
18
20
|
resolveModel,
|
|
19
21
|
translateEvent
|
|
20
22
|
};
|
|
@@ -0,0 +1,201 @@
|
|
|
1
|
+
// src/models.ts
|
|
2
|
+
function buildStaticModels() {
|
|
3
|
+
const models = {};
|
|
4
|
+
models["openrouter/free"] = {
|
|
5
|
+
name: "Free (Auto)",
|
|
6
|
+
description: "Free \u2014 auto-selects best free model for your request",
|
|
7
|
+
free: true
|
|
8
|
+
};
|
|
9
|
+
models["google/gemma-4-31b-it:free"] = {
|
|
10
|
+
name: "Gemma 4 31B",
|
|
11
|
+
description: "Free \u2014 great quality for Bible study",
|
|
12
|
+
free: true
|
|
13
|
+
};
|
|
14
|
+
models["nvidia/nemotron-3-super-120b-a12b:free"] = {
|
|
15
|
+
name: "Nemotron 3 Super",
|
|
16
|
+
description: "Free \u2014 large model, strong reasoning",
|
|
17
|
+
free: true
|
|
18
|
+
};
|
|
19
|
+
models["deepseek/deepseek-r1:free"] = {
|
|
20
|
+
name: "DeepSeek R1 (Free)",
|
|
21
|
+
description: "Free \u2014 reasoning with thinking support",
|
|
22
|
+
free: true
|
|
23
|
+
};
|
|
24
|
+
models["qwen/qwen3-coder:free"] = {
|
|
25
|
+
name: "Qwen3 Coder",
|
|
26
|
+
description: "Free \u2014 480B parameters, excellent tool use",
|
|
27
|
+
free: true
|
|
28
|
+
};
|
|
29
|
+
models["openrouter/auto"] = {
|
|
30
|
+
name: "Auto (All)",
|
|
31
|
+
description: "Auto-selects best model (requires credits)",
|
|
32
|
+
free: false
|
|
33
|
+
};
|
|
34
|
+
models["anthropic/claude-sonnet-4"] = {
|
|
35
|
+
name: "Claude Sonnet 4",
|
|
36
|
+
description: "Premium \u2014 excellent quality + reasoning (requires credits)",
|
|
37
|
+
free: false
|
|
38
|
+
};
|
|
39
|
+
models["google/gemini-2.5-flash-preview"] = {
|
|
40
|
+
name: "Gemini 2.5 Flash",
|
|
41
|
+
description: "Premium \u2014 fast with thinking (requires credits)",
|
|
42
|
+
free: false
|
|
43
|
+
};
|
|
44
|
+
models["ollama/llama3"] = {
|
|
45
|
+
name: "Llama 3 (Ollama)",
|
|
46
|
+
description: "Local \u2014 Meta's Llama 3, requires Ollama",
|
|
47
|
+
free: true
|
|
48
|
+
};
|
|
49
|
+
models["ollama/llama3.1"] = {
|
|
50
|
+
name: "Llama 3.1 (Ollama)",
|
|
51
|
+
description: "Local \u2014 Llama 3.1 8B, 128K context, requires Ollama",
|
|
52
|
+
free: true
|
|
53
|
+
};
|
|
54
|
+
models["ollama/gemma3"] = {
|
|
55
|
+
name: "Gemma 3 (Ollama)",
|
|
56
|
+
description: "Local \u2014 Google's Gemma 3, requires Ollama",
|
|
57
|
+
free: true
|
|
58
|
+
};
|
|
59
|
+
models["ollama/mistral"] = {
|
|
60
|
+
name: "Mistral 7B (Ollama)",
|
|
61
|
+
description: "Local \u2014 Mistral's 7B model, requires Ollama",
|
|
62
|
+
free: true
|
|
63
|
+
};
|
|
64
|
+
models["ollama/qwen3"] = {
|
|
65
|
+
name: "Qwen 3 (Ollama)",
|
|
66
|
+
description: "Local \u2014 Alibaba's Qwen 3, requires Ollama",
|
|
67
|
+
free: true
|
|
68
|
+
};
|
|
69
|
+
models["ollama/deepseek-r1"] = {
|
|
70
|
+
name: "DeepSeek R1 (Ollama)",
|
|
71
|
+
description: "Local \u2014 reasoning model, requires Ollama",
|
|
72
|
+
free: true
|
|
73
|
+
};
|
|
74
|
+
models["ollama/codellama"] = {
|
|
75
|
+
name: "Code Llama (Ollama)",
|
|
76
|
+
description: "Local \u2014 code-focused Llama variant, requires Ollama",
|
|
77
|
+
free: true
|
|
78
|
+
};
|
|
79
|
+
models["ollama/phi3"] = {
|
|
80
|
+
name: "Phi-3 (Ollama)",
|
|
81
|
+
description: "Local \u2014 Microsoft's small but capable model, requires Ollama",
|
|
82
|
+
free: true
|
|
83
|
+
};
|
|
84
|
+
return models;
|
|
85
|
+
}
|
|
86
|
+
var MODELS = buildStaticModels();
|
|
87
|
+
var DEFAULT_MODEL_ID = "openrouter/free";
|
|
88
|
+
var DEFAULT_MODEL = DEFAULT_MODEL_ID;
|
|
89
|
+
var THINKING_MODELS = /* @__PURE__ */ new Set([
|
|
90
|
+
"anthropic/claude-sonnet-4",
|
|
91
|
+
"deepseek/deepseek-r1:free",
|
|
92
|
+
"google/gemini-2.5-flash-preview",
|
|
93
|
+
"ollama/deepseek-r1"
|
|
94
|
+
]);
|
|
95
|
+
function supportsThinking(model) {
|
|
96
|
+
return THINKING_MODELS.has(model);
|
|
97
|
+
}
|
|
98
|
+
function isOllamaModel(model) {
|
|
99
|
+
return model.startsWith("ollama/");
|
|
100
|
+
}
|
|
101
|
+
function getModelsByProvider(provider) {
|
|
102
|
+
return Object.keys(MODELS).filter((id) => id.startsWith(`${provider}/`));
|
|
103
|
+
}
|
|
104
|
+
function getModelsForUI() {
|
|
105
|
+
const uiModels = {};
|
|
106
|
+
for (const [id, config] of Object.entries(MODELS)) {
|
|
107
|
+
uiModels[id] = { name: config.name, description: config.description, free: config.free };
|
|
108
|
+
}
|
|
109
|
+
return uiModels;
|
|
110
|
+
}
|
|
111
|
+
var REASONING_FAMILIES = /* @__PURE__ */ new Set([
|
|
112
|
+
"deepseek-r1",
|
|
113
|
+
"deepseek-r1-distill",
|
|
114
|
+
"qwq",
|
|
115
|
+
"qwen3",
|
|
116
|
+
"kimi-k2",
|
|
117
|
+
"glm-5.1",
|
|
118
|
+
"gemma4"
|
|
119
|
+
]);
|
|
120
|
+
function describeOllamaModel(name, details, isCloud) {
|
|
121
|
+
const location = isCloud ? "Cloud" : "Local";
|
|
122
|
+
const family = details.family || name.split(":")[0].split("-")[0];
|
|
123
|
+
const params = details.parameter_size;
|
|
124
|
+
const quant = details.quantization_level;
|
|
125
|
+
const isReasoning = details.families?.some((f) => REASONING_FAMILIES.has(f)) ?? REASONING_FAMILIES.has(family);
|
|
126
|
+
const parts = [location, "\u2014"];
|
|
127
|
+
if (params && params !== "") {
|
|
128
|
+
parts.push(params);
|
|
129
|
+
}
|
|
130
|
+
if (quant && quant !== "") {
|
|
131
|
+
parts.push(`(${quant})`);
|
|
132
|
+
}
|
|
133
|
+
if (isReasoning) {
|
|
134
|
+
parts.push("reasoning");
|
|
135
|
+
}
|
|
136
|
+
parts.push("requires Ollama");
|
|
137
|
+
return parts.join(" ");
|
|
138
|
+
}
|
|
139
|
+
async function fetchOllamaModels(baseUrl = "http://localhost:11434") {
|
|
140
|
+
const tagsUrl = `${baseUrl.replace(/\/v1$/, "")}/api/tags`;
|
|
141
|
+
let response;
|
|
142
|
+
try {
|
|
143
|
+
response = await fetch(tagsUrl, { signal: AbortSignal.timeout(5e3) });
|
|
144
|
+
} catch {
|
|
145
|
+
return {};
|
|
146
|
+
}
|
|
147
|
+
if (!response.ok) {
|
|
148
|
+
return {};
|
|
149
|
+
}
|
|
150
|
+
const data = await response.json();
|
|
151
|
+
const discovered = {};
|
|
152
|
+
for (const model of data.models) {
|
|
153
|
+
const tagSuffix = model.name.includes(":") ? model.name.split(":").pop() : "";
|
|
154
|
+
const isCloud = tagSuffix === "cloud";
|
|
155
|
+
const baseName = model.name.split(":")[0];
|
|
156
|
+
const modelId = `ollama/${baseName}`;
|
|
157
|
+
if (model.details?.family === "nomic-bert" || model.details?.families?.includes("nomic-bert")) {
|
|
158
|
+
continue;
|
|
159
|
+
}
|
|
160
|
+
const description = describeOllamaModel(baseName, model.details, isCloud);
|
|
161
|
+
discovered[modelId] = {
|
|
162
|
+
name: model.details?.parameter_size ? `${baseName} (${model.details.parameter_size})` : baseName,
|
|
163
|
+
description,
|
|
164
|
+
free: true
|
|
165
|
+
};
|
|
166
|
+
if (tagSuffix && tagSuffix !== "latest") {
|
|
167
|
+
const fullId = `ollama/${model.name}`;
|
|
168
|
+
discovered[fullId] = {
|
|
169
|
+
name: model.name,
|
|
170
|
+
description,
|
|
171
|
+
free: true
|
|
172
|
+
};
|
|
173
|
+
}
|
|
174
|
+
}
|
|
175
|
+
return discovered;
|
|
176
|
+
}
|
|
177
|
+
function mergeOllamaModels(ollamaModels) {
|
|
178
|
+
for (const [id, config] of Object.entries(ollamaModels)) {
|
|
179
|
+
MODELS[id] = config;
|
|
180
|
+
}
|
|
181
|
+
}
|
|
182
|
+
async function discoverOllamaModels(baseUrl = "http://localhost:11434") {
|
|
183
|
+
const discovered = await fetchOllamaModels(baseUrl);
|
|
184
|
+
mergeOllamaModels(discovered);
|
|
185
|
+
return { ...MODELS };
|
|
186
|
+
}
|
|
187
|
+
|
|
188
|
+
export {
|
|
189
|
+
MODELS,
|
|
190
|
+
DEFAULT_MODEL_ID,
|
|
191
|
+
DEFAULT_MODEL,
|
|
192
|
+
THINKING_MODELS,
|
|
193
|
+
supportsThinking,
|
|
194
|
+
isOllamaModel,
|
|
195
|
+
getModelsByProvider,
|
|
196
|
+
getModelsForUI,
|
|
197
|
+
fetchOllamaModels,
|
|
198
|
+
mergeOllamaModels,
|
|
199
|
+
discoverOllamaModels
|
|
200
|
+
};
|
|
201
|
+
//# sourceMappingURL=chunk-GK5KZOHB.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"sources":["../src/models.ts"],"sourcesContent":["/**\n * Origen model configuration.\n *\n * Static entries for cloud providers (OpenRouter, Anthropic, Google, etc.)\n * plus dynamic Ollama model discovery via GET /api/tags.\n *\n * Hardcoded Ollama entries serve as fallbacks when Ollama isn't reachable.\n * When connected, fetchOllamaModels() pulls the live model list and merges\n * it with the static entries.\n */\n\nimport { getModel } from \"@mariozechner/pi-ai\";\nimport type { Model, Api } from \"@mariozechner/pi-ai\";\nexport type { Model as ProviderModel, Api } from \"@mariozechner/pi-ai\";\n\n// ── Types ─────────────────────────────────────────────────────────────\n\nexport interface ModelConfig {\n name: string;\n description: string;\n free: boolean;\n}\n\n/** UI-facing model config — safe to send to the client. Strips internal fields. */\nexport type UIModelConfig = ModelConfig;\n\n/** Ollama /api/tags response shape. */\ninterface OllamaModelResponse {\n models: Array<{\n name: string;\n model: string;\n modified_at: string;\n size: number;\n digest: string;\n details: {\n parent_model: string;\n format: string;\n family: string;\n families: string[] | null;\n parameter_size: string;\n quantization_level: string;\n };\n }>;\n}\n\n// ── Static model registry (cloud + hardcoded Ollama defaults) ────────\n\nfunction buildStaticModels(): Record<string, ModelConfig> {\n const models: Record<string, ModelConfig> = {};\n\n // ── OpenRouter (free tier) ───────────────────────────\n models[\"openrouter/free\"] = {\n name: \"Free (Auto)\",\n description: \"Free — auto-selects best free model for your request\",\n free: true,\n };\n models[\"google/gemma-4-31b-it:free\"] = {\n name: \"Gemma 4 31B\",\n description: \"Free — great quality for Bible study\",\n free: true,\n };\n models[\"nvidia/nemotron-3-super-120b-a12b:free\"] = {\n name: \"Nemotron 3 Super\",\n description: \"Free — large model, strong reasoning\",\n free: true,\n };\n models[\"deepseek/deepseek-r1:free\"] = {\n name: \"DeepSeek R1 (Free)\",\n description: \"Free — reasoning with thinking support\",\n free: true,\n };\n models[\"qwen/qwen3-coder:free\"] = {\n name: \"Qwen3 Coder\",\n description: \"Free — 480B parameters, excellent tool use\",\n free: true,\n };\n\n // ── OpenRouter (premium) ─────────────────────────────\n models[\"openrouter/auto\"] = {\n name: \"Auto (All)\",\n description: \"Auto-selects best model (requires credits)\",\n free: false,\n };\n models[\"anthropic/claude-sonnet-4\"] = {\n name: \"Claude Sonnet 4\",\n description: \"Premium — excellent quality + reasoning (requires credits)\",\n free: false,\n };\n models[\"google/gemini-2.5-flash-preview\"] = {\n name: \"Gemini 2.5 Flash\",\n description: \"Premium — fast with thinking (requires credits)\",\n free: false,\n };\n\n // ── Ollama (local, always free — fallback defaults) ──\n models[\"ollama/llama3\"] = {\n name: \"Llama 3 (Ollama)\",\n description: \"Local — Meta's Llama 3, requires Ollama\",\n free: true,\n };\n models[\"ollama/llama3.1\"] = {\n name: \"Llama 3.1 (Ollama)\",\n description: \"Local — Llama 3.1 8B, 128K context, requires Ollama\",\n free: true,\n };\n models[\"ollama/gemma3\"] = {\n name: \"Gemma 3 (Ollama)\",\n description: \"Local — Google's Gemma 3, requires Ollama\",\n free: true,\n };\n models[\"ollama/mistral\"] = {\n name: \"Mistral 7B (Ollama)\",\n description: \"Local — Mistral's 7B model, requires Ollama\",\n free: true,\n };\n models[\"ollama/qwen3\"] = {\n name: \"Qwen 3 (Ollama)\",\n description: \"Local — Alibaba's Qwen 3, requires Ollama\",\n free: true,\n };\n models[\"ollama/deepseek-r1\"] = {\n name: \"DeepSeek R1 (Ollama)\",\n description: \"Local — reasoning model, requires Ollama\",\n free: true,\n };\n models[\"ollama/codellama\"] = {\n name: \"Code Llama (Ollama)\",\n description: \"Local — code-focused Llama variant, requires Ollama\",\n free: true,\n };\n models[\"ollama/phi3\"] = {\n name: \"Phi-3 (Ollama)\",\n description: \"Local — Microsoft's small but capable model, requires Ollama\",\n free: true,\n };\n\n return models;\n}\n\nexport const MODELS: Record<string, ModelConfig> = buildStaticModels();\nexport type ModelId = keyof typeof MODELS;\n\n/** Default model — free router, works with $0 credits */\nexport const DEFAULT_MODEL_ID: ModelId = \"openrouter/free\";\n\n/** Backward compat alias */\nexport const DEFAULT_MODEL: ModelId = DEFAULT_MODEL_ID;\n\n/** Models that support extended thinking */\nexport const THINKING_MODELS: ReadonlySet<string> = new Set<string>([\n \"anthropic/claude-sonnet-4\",\n \"deepseek/deepseek-r1:free\",\n \"google/gemini-2.5-flash-preview\",\n \"ollama/deepseek-r1\",\n]);\n\n/** Check if a model supports extended thinking */\nexport function supportsThinking(model: string): boolean {\n return THINKING_MODELS.has(model);\n}\n\n/** Check if a model is an Ollama model */\nexport function isOllamaModel(model: string): boolean {\n return model.startsWith(\"ollama/\");\n}\n\n/** Get all model IDs for a specific provider prefix */\nexport function getModelsByProvider(provider: string): string[] {\n return Object.keys(MODELS).filter((id) => id.startsWith(`${provider}/`));\n}\n\n/** Get models as a simple UI map (name, description, free). No internal fields. */\nexport function getModelsForUI(): Record<string, UIModelConfig> {\n const uiModels: Record<string, UIModelConfig> = {};\n for (const [id, config] of Object.entries(MODELS)) {\n uiModels[id] = { name: config.name, description: config.description, free: config.free };\n }\n return uiModels;\n}\n\n// ── Dynamic Ollama discovery ─────────────────────────────────────────\n\n/** Known reasoning model families — used to tag discovered models. */\nconst REASONING_FAMILIES = new Set([\n \"deepseek-r1\", \"deepseek-r1-distill\", \"qwq\", \"qwen3\", \"kimi-k2\",\n \"glm-5.1\", \"gemma4\",\n]);\n\n/** Derive a human-readable description from Ollama model details. */\nfunction describeOllamaModel(\n name: string,\n details: OllamaModelResponse[\"models\"][number][\"details\"],\n isCloud: boolean,\n): string {\n const location = isCloud ? \"Cloud\" : \"Local\";\n const family = details.family || name.split(\":\")[0].split(\"-\")[0];\n const params = details.parameter_size;\n const quant = details.quantization_level;\n const isReasoning = details.families?.some((f) => REASONING_FAMILIES.has(f)) ?? REASONING_FAMILIES.has(family);\n\n const parts: string[] = [location, \"—\"];\n\n if (params && params !== \"\") {\n parts.push(params);\n }\n if (quant && quant !== \"\") {\n parts.push(`(${quant})`);\n }\n if (isReasoning) {\n parts.push(\"reasoning\");\n }\n parts.push(\"requires Ollama\");\n\n return parts.join(\" \");\n}\n\n/**\n * Fetch available models from a running Ollama server.\n *\n * Calls GET /api/tags on the Ollama server and returns model configs\n * merged with the static defaults. Cloud models (e.g., foo:cloud)\n * are included alongside local models.\n *\n * @param baseUrl - Ollama server URL (default: http://localhost:11434)\n * @returns Object with discovered Ollama model configs (keyed by \"ollama/<name>\")\n */\nexport async function fetchOllamaModels(\n baseUrl: string = \"http://localhost:11434\",\n): Promise<Record<string, ModelConfig>> {\n const tagsUrl = `${baseUrl.replace(/\\/v1$/, \"\")}/api/tags`;\n\n let response: Response;\n try {\n response = await fetch(tagsUrl, { signal: AbortSignal.timeout(5000) });\n } catch {\n // Ollama not reachable — return empty, callers use static defaults\n return {};\n }\n\n if (!response.ok) {\n return {};\n }\n\n const data: OllamaModelResponse = await response.json();\n const discovered: Record<string, ModelConfig> = {};\n\n for (const model of data.models) {\n // Strip tag suffix for a cleaner ID (e.g., \"llama3.2:latest\" → \"llama3.2\")\n const tagSuffix = model.name.includes(\":\") ? model.name.split(\":\").pop() : \"\";\n const isCloud = tagSuffix === \"cloud\";\n const baseName = model.name.split(\":\")[0];\n const modelId = `ollama/${baseName}`;\n\n // Skip embedding models\n if (model.details?.family === \"nomic-bert\" || model.details?.families?.includes(\"nomic-bert\")) {\n continue;\n }\n\n const description = describeOllamaModel(baseName, model.details, isCloud);\n\n discovered[modelId] = {\n name: model.details?.parameter_size\n ? `${baseName} (${model.details.parameter_size})`\n : baseName,\n description,\n free: true,\n };\n\n // If there's a tag like :cloud or :latest, also register the full tagged name\n if (tagSuffix && tagSuffix !== \"latest\") {\n const fullId = `ollama/${model.name}`;\n discovered[fullId] = {\n name: model.name,\n description,\n free: true,\n };\n }\n }\n\n return discovered;\n}\n\n/**\n * Merge dynamically discovered Ollama models into the static MODELS registry.\n *\n * Static defaults are kept as fallbacks. Discovered models override\n * entries with the same key (e.g., \"ollama/llama3\" from the server\n * replaces the hardcoded entry with live data).\n *\n * @param ollamaModels - Models returned by fetchOllamaModels()\n */\nexport function mergeOllamaModels(ollamaModels: Record<string, ModelConfig>): void {\n for (const [id, config] of Object.entries(ollamaModels)) {\n MODELS[id] = config;\n }\n}\n\n/**\n * One-shot: fetch Ollama models and merge them into the registry.\n * Returns the combined model map.\n *\n * @param baseUrl - Ollama server URL (default: http://localhost:11434)\n */\nexport async function discoverOllamaModels(\n baseUrl: string = \"http://localhost:11434\",\n): Promise<Record<string, ModelConfig>> {\n const discovered = await fetchOllamaModels(baseUrl);\n mergeOllamaModels(discovered);\n return { ...MODELS };\n}"],"mappings":";AA+CA,SAAS,oBAAiD;AACxD,QAAM,SAAsC,CAAC;AAG7C,SAAO,iBAAiB,IAAI;AAAA,IAC1B,MAAM;AAAA,IACN,aAAa;AAAA,IACb,MAAM;AAAA,EACR;AACA,SAAO,4BAA4B,IAAI;AAAA,IACrC,MAAM;AAAA,IACN,aAAa;AAAA,IACb,MAAM;AAAA,EACR;AACA,SAAO,wCAAwC,IAAI;AAAA,IACjD,MAAM;AAAA,IACN,aAAa;AAAA,IACb,MAAM;AAAA,EACR;AACA,SAAO,2BAA2B,IAAI;AAAA,IACpC,MAAM;AAAA,IACN,aAAa;AAAA,IACb,MAAM;AAAA,EACR;AACA,SAAO,uBAAuB,IAAI;AAAA,IAChC,MAAM;AAAA,IACN,aAAa;AAAA,IACb,MAAM;AAAA,EACR;AAGA,SAAO,iBAAiB,IAAI;AAAA,IAC1B,MAAM;AAAA,IACN,aAAa;AAAA,IACb,MAAM;AAAA,EACR;AACA,SAAO,2BAA2B,IAAI;AAAA,IACpC,MAAM;AAAA,IACN,aAAa;AAAA,IACb,MAAM;AAAA,EACR;AACA,SAAO,iCAAiC,IAAI;AAAA,IAC1C,MAAM;AAAA,IACN,aAAa;AAAA,IACb,MAAM;AAAA,EACR;AAGA,SAAO,eAAe,IAAI;AAAA,IACxB,MAAM;AAAA,IACN,aAAa;AAAA,IACb,MAAM;AAAA,EACR;AACA,SAAO,iBAAiB,IAAI;AAAA,IAC1B,MAAM;AAAA,IACN,aAAa;AAAA,IACb,MAAM;AAAA,EACR;AACA,SAAO,eAAe,IAAI;AAAA,IACxB,MAAM;AAAA,IACN,aAAa;AAAA,IACb,MAAM;AAAA,EACR;AACA,SAAO,gBAAgB,IAAI;AAAA,IACzB,MAAM;AAAA,IACN,aAAa;AAAA,IACb,MAAM;AAAA,EACR;AACA,SAAO,cAAc,IAAI;AAAA,IACvB,MAAM;AAAA,IACN,aAAa;AAAA,IACb,MAAM;AAAA,EACR;AACA,SAAO,oBAAoB,IAAI;AAAA,IAC7B,MAAM;AAAA,IACN,aAAa;AAAA,IACb,MAAM;AAAA,EACR;AACA,SAAO,kBAAkB,IAAI;AAAA,IAC3B,MAAM;AAAA,IACN,aAAa;AAAA,IACb,MAAM;AAAA,EACR;AACA,SAAO,aAAa,IAAI;AAAA,IACtB,MAAM;AAAA,IACN,aAAa;AAAA,IACb,MAAM;AAAA,EACR;AAEA,SAAO;AACT;AAEO,IAAM,SAAsC,kBAAkB;AAI9D,IAAM,mBAA4B;AAGlC,IAAM,gBAAyB;AAG/B,IAAM,kBAAuC,oBAAI,IAAY;AAAA,EAClE;AAAA,EACA;AAAA,EACA;AAAA,EACA;AACF,CAAC;AAGM,SAAS,iBAAiB,OAAwB;AACvD,SAAO,gBAAgB,IAAI,KAAK;AAClC;AAGO,SAAS,cAAc,OAAwB;AACpD,SAAO,MAAM,WAAW,SAAS;AACnC;AAGO,SAAS,oBAAoB,UAA4B;AAC9D,SAAO,OAAO,KAAK,MAAM,EAAE,OAAO,CAAC,OAAO,GAAG,WAAW,GAAG,QAAQ,GAAG,CAAC;AACzE;AAGO,SAAS,iBAAgD;AAC9D,QAAM,WAA0C,CAAC;AACjD,aAAW,CAAC,IAAI,MAAM,KAAK,OAAO,QAAQ,MAAM,GAAG;AACjD,aAAS,EAAE,IAAI,EAAE,MAAM,OAAO,MAAM,aAAa,OAAO,aAAa,MAAM,OAAO,KAAK;AAAA,EACzF;AACA,SAAO;AACT;AAKA,IAAM,qBAAqB,oBAAI,IAAI;AAAA,EACjC;AAAA,EAAe;AAAA,EAAuB;AAAA,EAAO;AAAA,EAAS;AAAA,EACtD;AAAA,EAAW;AACb,CAAC;AAGD,SAAS,oBACP,MACA,SACA,SACQ;AACR,QAAM,WAAW,UAAU,UAAU;AACrC,QAAM,SAAS,QAAQ,UAAU,KAAK,MAAM,GAAG,EAAE,CAAC,EAAE,MAAM,GAAG,EAAE,CAAC;AAChE,QAAM,SAAS,QAAQ;AACvB,QAAM,QAAQ,QAAQ;AACtB,QAAM,cAAc,QAAQ,UAAU,KAAK,CAAC,MAAM,mBAAmB,IAAI,CAAC,CAAC,KAAK,mBAAmB,IAAI,MAAM;AAE7G,QAAM,QAAkB,CAAC,UAAU,QAAG;AAEtC,MAAI,UAAU,WAAW,IAAI;AAC3B,UAAM,KAAK,MAAM;AAAA,EACnB;AACA,MAAI,SAAS,UAAU,IAAI;AACzB,UAAM,KAAK,IAAI,KAAK,GAAG;AAAA,EACzB;AACA,MAAI,aAAa;AACf,UAAM,KAAK,WAAW;AAAA,EACxB;AACA,QAAM,KAAK,iBAAiB;AAE5B,SAAO,MAAM,KAAK,GAAG;AACvB;AAYA,eAAsB,kBACpB,UAAkB,0BACoB;AACtC,QAAM,UAAU,GAAG,QAAQ,QAAQ,SAAS,EAAE,CAAC;AAE/C,MAAI;AACJ,MAAI;AACF,eAAW,MAAM,MAAM,SAAS,EAAE,QAAQ,YAAY,QAAQ,GAAI,EAAE,CAAC;AAAA,EACvE,QAAQ;AAEN,WAAO,CAAC;AAAA,EACV;AAEA,MAAI,CAAC,SAAS,IAAI;AAChB,WAAO,CAAC;AAAA,EACV;AAEA,QAAM,OAA4B,MAAM,SAAS,KAAK;AACtD,QAAM,aAA0C,CAAC;AAEjD,aAAW,SAAS,KAAK,QAAQ;AAE/B,UAAM,YAAY,MAAM,KAAK,SAAS,GAAG,IAAI,MAAM,KAAK,MAAM,GAAG,EAAE,IAAI,IAAI;AAC3E,UAAM,UAAU,cAAc;AAC9B,UAAM,WAAW,MAAM,KAAK,MAAM,GAAG,EAAE,CAAC;AACxC,UAAM,UAAU,UAAU,QAAQ;AAGlC,QAAI,MAAM,SAAS,WAAW,gBAAgB,MAAM,SAAS,UAAU,SAAS,YAAY,GAAG;AAC7F;AAAA,IACF;AAEA,UAAM,cAAc,oBAAoB,UAAU,MAAM,SAAS,OAAO;AAExE,eAAW,OAAO,IAAI;AAAA,MACpB,MAAM,MAAM,SAAS,iBACjB,GAAG,QAAQ,KAAK,MAAM,QAAQ,cAAc,MAC5C;AAAA,MACJ;AAAA,MACA,MAAM;AAAA,IACR;AAGA,QAAI,aAAa,cAAc,UAAU;AACvC,YAAM,SAAS,UAAU,MAAM,IAAI;AACnC,iBAAW,MAAM,IAAI;AAAA,QACnB,MAAM,MAAM;AAAA,QACZ;AAAA,QACA,MAAM;AAAA,MACR;AAAA,IACF;AAAA,EACF;AAEA,SAAO;AACT;AAWO,SAAS,kBAAkB,cAAiD;AACjF,aAAW,CAAC,IAAI,MAAM,KAAK,OAAO,QAAQ,YAAY,GAAG;AACvD,WAAO,EAAE,IAAI;AAAA,EACf;AACF;AAQA,eAAsB,qBACpB,UAAkB,0BACoB;AACtC,QAAM,aAAa,MAAM,kBAAkB,OAAO;AAClD,oBAAkB,UAAU;AAC5B,SAAO,EAAE,GAAG,OAAO;AACrB;","names":[]}
|
|
@@ -7,6 +7,9 @@ function adaptTool(tool, getD1) {
|
|
|
7
7
|
// Convert JSON schema to TypeBox format — pi-agent-core uses TypeBox
|
|
8
8
|
// but accepts plain JSON schemas for the tool definition sent to the LLM.
|
|
9
9
|
// We provide parameters as a TypeBox-like schema.
|
|
10
|
+
// OrigenTool uses plain JSON schema objects; pi-agent-core expects TSchema (TypeBox).
|
|
11
|
+
// TypeBox accepts plain JSON schemas at runtime — the type mismatch is cosmetic.
|
|
12
|
+
// We widen to TSchema to satisfy the type system while preserving runtime correctness.
|
|
10
13
|
parameters: {
|
|
11
14
|
type: "object",
|
|
12
15
|
...tool.parameters
|
|
@@ -37,6 +40,18 @@ var OLLAMA_MODELS = {
|
|
|
37
40
|
contextWindow: 8192,
|
|
38
41
|
maxTokens: 4096
|
|
39
42
|
},
|
|
43
|
+
"ollama/llama3.1": {
|
|
44
|
+
id: "llama3.1",
|
|
45
|
+
name: "Llama 3.1 (Ollama)",
|
|
46
|
+
api: "openai-completions",
|
|
47
|
+
provider: "ollama",
|
|
48
|
+
baseUrl: "http://localhost:11434/v1",
|
|
49
|
+
reasoning: false,
|
|
50
|
+
input: ["text"],
|
|
51
|
+
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
|
|
52
|
+
contextWindow: 131072,
|
|
53
|
+
maxTokens: 4096
|
|
54
|
+
},
|
|
40
55
|
"ollama/gemma3": {
|
|
41
56
|
id: "gemma3",
|
|
42
57
|
name: "Gemma 3 (Ollama)",
|
|
@@ -46,12 +61,12 @@ var OLLAMA_MODELS = {
|
|
|
46
61
|
reasoning: false,
|
|
47
62
|
input: ["text"],
|
|
48
63
|
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
|
|
49
|
-
contextWindow:
|
|
50
|
-
maxTokens:
|
|
64
|
+
contextWindow: 131072,
|
|
65
|
+
maxTokens: 8192
|
|
51
66
|
},
|
|
52
67
|
"ollama/mistral": {
|
|
53
68
|
id: "mistral",
|
|
54
|
-
name: "Mistral (Ollama)",
|
|
69
|
+
name: "Mistral 7B (Ollama)",
|
|
55
70
|
api: "openai-completions",
|
|
56
71
|
provider: "ollama",
|
|
57
72
|
baseUrl: "http://localhost:11434/v1",
|
|
@@ -61,6 +76,18 @@ var OLLAMA_MODELS = {
|
|
|
61
76
|
contextWindow: 32768,
|
|
62
77
|
maxTokens: 4096
|
|
63
78
|
},
|
|
79
|
+
"ollama/mistral-nemo": {
|
|
80
|
+
id: "mistral-nemo",
|
|
81
|
+
name: "Mistral Nemo (Ollama)",
|
|
82
|
+
api: "openai-completions",
|
|
83
|
+
provider: "ollama",
|
|
84
|
+
baseUrl: "http://localhost:11434/v1",
|
|
85
|
+
reasoning: false,
|
|
86
|
+
input: ["text"],
|
|
87
|
+
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
|
|
88
|
+
contextWindow: 131072,
|
|
89
|
+
maxTokens: 4096
|
|
90
|
+
},
|
|
64
91
|
"ollama/qwen3": {
|
|
65
92
|
id: "qwen3",
|
|
66
93
|
name: "Qwen 3 (Ollama)",
|
|
@@ -70,8 +97,8 @@ var OLLAMA_MODELS = {
|
|
|
70
97
|
reasoning: false,
|
|
71
98
|
input: ["text"],
|
|
72
99
|
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
|
|
73
|
-
contextWindow:
|
|
74
|
-
maxTokens:
|
|
100
|
+
contextWindow: 131072,
|
|
101
|
+
maxTokens: 8192
|
|
75
102
|
},
|
|
76
103
|
"ollama/deepseek-r1": {
|
|
77
104
|
id: "deepseek-r1",
|
|
@@ -82,8 +109,32 @@ var OLLAMA_MODELS = {
|
|
|
82
109
|
reasoning: true,
|
|
83
110
|
input: ["text"],
|
|
84
111
|
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
|
|
85
|
-
contextWindow:
|
|
112
|
+
contextWindow: 131072,
|
|
86
113
|
maxTokens: 8192
|
|
114
|
+
},
|
|
115
|
+
"ollama/codellama": {
|
|
116
|
+
id: "codellama",
|
|
117
|
+
name: "Code Llama (Ollama)",
|
|
118
|
+
api: "openai-completions",
|
|
119
|
+
provider: "ollama",
|
|
120
|
+
baseUrl: "http://localhost:11434/v1",
|
|
121
|
+
reasoning: false,
|
|
122
|
+
input: ["text"],
|
|
123
|
+
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
|
|
124
|
+
contextWindow: 16384,
|
|
125
|
+
maxTokens: 4096
|
|
126
|
+
},
|
|
127
|
+
"ollama/phi3": {
|
|
128
|
+
id: "phi3",
|
|
129
|
+
name: "Phi-3 (Ollama)",
|
|
130
|
+
api: "openai-completions",
|
|
131
|
+
provider: "ollama",
|
|
132
|
+
baseUrl: "http://localhost:11434/v1",
|
|
133
|
+
reasoning: false,
|
|
134
|
+
input: ["text"],
|
|
135
|
+
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
|
|
136
|
+
contextWindow: 131072,
|
|
137
|
+
maxTokens: 4096
|
|
87
138
|
}
|
|
88
139
|
};
|
|
89
140
|
var DEFAULT_MODEL = {
|
|
@@ -146,7 +197,7 @@ function resolveModel(modelId, options) {
|
|
|
146
197
|
}
|
|
147
198
|
};
|
|
148
199
|
}
|
|
149
|
-
const providers = ["openrouter", "anthropic", "google", "openai", "deepseek", "groq", "xai"];
|
|
200
|
+
const providers = ["openrouter", "anthropic", "google", "openai", "deepseek", "groq", "xai", "ollama"];
|
|
150
201
|
for (const provider of providers) {
|
|
151
202
|
try {
|
|
152
203
|
const model = getModel(provider, modelId);
|
|
@@ -289,8 +340,9 @@ export {
|
|
|
289
340
|
resolveModel,
|
|
290
341
|
convertMessages,
|
|
291
342
|
buildContext,
|
|
343
|
+
defaultCitationExtractor,
|
|
292
344
|
translateEvent,
|
|
293
345
|
createEventStream,
|
|
294
346
|
agentToStreamEvents
|
|
295
347
|
};
|
|
296
|
-
//# sourceMappingURL=chunk-
|
|
348
|
+
//# sourceMappingURL=chunk-K3FE63XL.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"sources":["../src/adapter.ts"],"sourcesContent":["/**\n * Adapter: bridges Origen's simple types to pi-agent-core/pi-ai types.\n *\n * - OrigenTool → AgentTool (injects D1Provider)\n * - pi-ai Model resolution (OpenRouter, Ollama, Anthropic, Google)\n * - StreamEvent translation (AgentEvent → Origen's StreamEvent)\n */\n\nimport { getModel } from \"@mariozechner/pi-ai\";\nimport type { Model, Api, Message, Context, Tool } from \"@mariozechner/pi-ai\";\nimport type { AgentTool, AgentEvent, AgentMessage } from \"@mariozechner/pi-agent-core\";\nimport type { TSchema } from \"typebox\";\nimport type { OrigenTool, StreamEvent } from \"./agent\";\nimport type { D1Provider, Citation, UsageInfo } from \"./types\";\n\n// ── Tool adapter ─────────────────────────────────────────────────────\n\n/**\n * Convert an OrigenTool into a pi-agent-core AgentTool.\n * The D1Provider is captured in closure so the tool's execute gets it.\n */\nexport function adaptTool(tool: OrigenTool, getD1: D1Provider): AgentTool {\n return {\n name: tool.name,\n description: tool.description,\n // Convert JSON schema to TypeBox format — pi-agent-core uses TypeBox\n // but accepts plain JSON schemas for the tool definition sent to the LLM.\n // We provide parameters as a TypeBox-like schema.\n // OrigenTool uses plain JSON schema objects; pi-agent-core expects TSchema (TypeBox).\n // TypeBox accepts plain JSON schemas at runtime — the type mismatch is cosmetic.\n // We widen to TSchema to satisfy the type system while preserving runtime correctness.\n parameters: {\n type: \"object\",\n ...tool.parameters,\n } as TSchema,\n label: tool.name,\n execute: async (_toolCallId, params, _signal) => {\n const result = await tool.execute(params as Record<string, unknown>, getD1);\n return {\n content: [{ type: \"text\" as const, text: result }],\n details: {},\n };\n },\n };\n}\n\n/** Adapt all OrigenTools for an Agent instance. */\nexport function adaptTools(tools: OrigenTool[], getD1: D1Provider): AgentTool[] {\n return tools.map((t) => adaptTool(t, getD1));\n}\n\n// ── Model resolution ──────────────────────────────────────────────────\n\nexport interface ModelResolutionOptions {\n /** Ollama base URL, e.g. \"http://localhost:11434/v1\" */\n ollamaBaseUrl?: string;\n}\n\n/** Known Ollama models with their config defaults.\n * Context windows and maxTokens are conservative defaults — actual values\n * depend on the specific quantization the user has installed.\n * For models not in this list, resolveModel() creates a generic Ollama config.\n */\nconst OLLAMA_MODELS: Record<string, Partial<Model<Api>>> = {\n \"ollama/llama3\": {\n id: \"llama3\",\n name: \"Llama 3 (Ollama)\",\n api: \"openai-completions\",\n provider: \"ollama\",\n baseUrl: \"http://localhost:11434/v1\",\n reasoning: false,\n input: [\"text\"],\n cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },\n contextWindow: 8192,\n maxTokens: 4096,\n },\n \"ollama/llama3.1\": {\n id: \"llama3.1\",\n name: \"Llama 3.1 (Ollama)\",\n api: \"openai-completions\",\n provider: \"ollama\",\n baseUrl: \"http://localhost:11434/v1\",\n reasoning: false,\n input: [\"text\"],\n cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },\n contextWindow: 131072,\n maxTokens: 4096,\n },\n \"ollama/gemma3\": {\n id: \"gemma3\",\n name: \"Gemma 3 (Ollama)\",\n api: \"openai-completions\",\n provider: \"ollama\",\n baseUrl: \"http://localhost:11434/v1\",\n reasoning: false,\n input: [\"text\"],\n cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },\n contextWindow: 131072,\n maxTokens: 8192,\n },\n \"ollama/mistral\": {\n id: \"mistral\",\n name: \"Mistral 7B (Ollama)\",\n api: \"openai-completions\",\n provider: \"ollama\",\n baseUrl: \"http://localhost:11434/v1\",\n reasoning: false,\n input: [\"text\"],\n cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },\n contextWindow: 32768,\n maxTokens: 4096,\n },\n \"ollama/mistral-nemo\": {\n id: \"mistral-nemo\",\n name: \"Mistral Nemo (Ollama)\",\n api: \"openai-completions\",\n provider: \"ollama\",\n baseUrl: \"http://localhost:11434/v1\",\n reasoning: false,\n input: [\"text\"],\n cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },\n contextWindow: 131072,\n maxTokens: 4096,\n },\n \"ollama/qwen3\": {\n id: \"qwen3\",\n name: \"Qwen 3 (Ollama)\",\n api: \"openai-completions\",\n provider: \"ollama\",\n baseUrl: \"http://localhost:11434/v1\",\n reasoning: false,\n input: [\"text\"],\n cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },\n contextWindow: 131072,\n maxTokens: 8192,\n },\n \"ollama/deepseek-r1\": {\n id: \"deepseek-r1\",\n name: \"DeepSeek R1 (Ollama)\",\n api: \"openai-completions\",\n provider: \"ollama\",\n baseUrl: \"http://localhost:11434/v1\",\n reasoning: true,\n input: [\"text\"],\n cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },\n contextWindow: 131072,\n maxTokens: 8192,\n },\n \"ollama/codellama\": {\n id: \"codellama\",\n name: \"Code Llama (Ollama)\",\n api: \"openai-completions\",\n provider: \"ollama\",\n baseUrl: \"http://localhost:11434/v1\",\n reasoning: false,\n input: [\"text\"],\n cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },\n contextWindow: 16384,\n maxTokens: 4096,\n },\n \"ollama/phi3\": {\n id: \"phi3\",\n name: \"Phi-3 (Ollama)\",\n api: \"openai-completions\",\n provider: \"ollama\",\n baseUrl: \"http://localhost:11434/v1\",\n reasoning: false,\n input: [\"text\"],\n cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },\n contextWindow: 131072,\n maxTokens: 4096,\n },\n};\n\nconst DEFAULT_MODEL: Model<Api> = {\n id: \"openrouter/free\",\n name: \"Free (Auto)\",\n api: \"openai-completions\",\n provider: \"openrouter\",\n baseUrl: \"https://openrouter.ai/api/v1\",\n reasoning: false,\n input: [\"text\"],\n cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },\n contextWindow: 128000,\n maxTokens: 4096,\n};\n\n/**\n * Resolve a model ID string to a pi-ai Model object.\n * Tries pi-ai's registry first, then falls back to built-in Ollama definitions.\n */\nexport function resolveModel(modelId: string, options?: ModelResolutionOptions): Model<Api> {\n // Try Ollama models first\n if (modelId.startsWith(\"ollama/\")) {\n const ollamaDef = OLLAMA_MODELS[modelId];\n if (ollamaDef) {\n const baseUrl = options?.ollamaBaseUrl ?? ollamaDef.baseUrl ?? \"http://localhost:11434/v1\";\n return {\n ...DEFAULT_MODEL,\n ...ollamaDef,\n baseUrl,\n compat: {\n supportsStore: false,\n supportsDeveloperRole: false,\n supportsReasoningEffort: false,\n supportsUsageInStreaming: false,\n maxTokensField: \"max_tokens\",\n requiresToolResultName: false,\n requiresAssistantAfterToolResult: false,\n requiresThinkingAsText: true,\n requiresReasoningContentOnAssistantMessages: false,\n thinkingFormat: \"openai\",\n supportsStrictMode: false,\n supportsLongCacheRetention: false,\n },\n } as Model<Api>;\n }\n // Generic Ollama model: user typed a custom model name\n const customId = modelId.replace(\"ollama/\", \"\");\n return {\n ...DEFAULT_MODEL,\n id: customId,\n name: `${customId} (Ollama)`,\n provider: \"ollama\",\n baseUrl: options?.ollamaBaseUrl ?? \"http://localhost:11434/v1\",\n compat: {\n supportsStore: false,\n supportsDeveloperRole: false,\n supportsReasoningEffort: false,\n supportsUsageInStreaming: false,\n maxTokensField: \"max_tokens\",\n requiresToolResultName: false,\n requiresAssistantAfterToolResult: false,\n requiresThinkingAsText: true,\n requiresReasoningContentOnAssistantMessages: false,\n thinkingFormat: \"openai\",\n supportsStrictMode: false,\n supportsLongCacheRetention: false,\n },\n } as Model<Api>;\n }\n\n // Try pi-ai's model registry (OpenRouter, Anthropic, Google, etc.)\n // pi-ai groups by provider, so we try known providers.\n // getModel's type signature requires specific (KnownProvider, ModelKey) pairs\n // for full type inference, but we're resolving dynamically at runtime.\n // The try/catch handles any invalid provider+model combinations.\n const providers: string[] = [\"openrouter\", \"anthropic\", \"google\", \"openai\", \"deepseek\", \"groq\", \"xai\", \"ollama\"];\n for (const provider of providers) {\n try {\n // Type assertion required: getModel's generics are too narrow for dynamic lookup.\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n const model = getModel(provider as any, modelId as any);\n if (model) return model as Model<Api>;\n } catch {\n // Not found in this provider, try next\n }\n }\n\n // Fallback: create a generic OpenRouter-compatible model\n return {\n ...DEFAULT_MODEL,\n id: modelId,\n name: modelId,\n };\n}\n\n// ── Message conversion ────────────────────────────────────────────────\n\n/** Convert Origen's simple messages to pi-ai Message format. */\nexport function convertMessages(\n messages: Array<{ role: \"user\" | \"assistant\"; content: string }>\n): Message[] {\n // Origen uses simple string messages. pi-ai's Message union type includes\n // UserMessage (content: string | ...[]) and AssistantMessage (content: ...[]).\n // Our messages have role \"user\" (valid UserMessage) or \"assistant\" (simplified —\n // real AssistantMessages have structured content, but pi-agent-core accepts\n // simplified messages at runtime). We cast to satisfy TypeScript while\n // maintaining runtime correctness.\n return messages.map((m) => ({\n role: m.role,\n content: m.content,\n timestamp: Date.now(),\n })) as Message[];\n}\n\n// ── Context builder ───────────────────────────────────────────────────\n\n/** Build a pi-ai Context from Origen's config. */\nexport function buildContext(\n systemPrompt: string,\n messages: Message[],\n adaptedTools: AgentTool[]\n): Context {\n return {\n systemPrompt,\n messages,\n tools: adaptedTools.map((t) => ({\n name: t.name,\n description: t.description,\n parameters: t.parameters,\n })),\n };\n}\n\n// ── Event translation ─────────────────────────────────────────────────\n\n/** Default citation extractor — [BOOK CHAPTER:VERSE] patterns (e.g., [GEN 1:1]).\n * Bible-specific pattern. Consumers should provide their own extractCitations\n * for non-biblical citation formats. Exported for reuse and testing.\n */\nexport function defaultCitationExtractor(text: string): Citation[] {\n const citations: Citation[] = [];\n const regex = /\\[([A-Z]{3})\\s+(\\d+):(\\d+)\\]/g;\n let match;\n while ((match = regex.exec(text)) !== null) {\n citations.push({ book: match[1], chapter: parseInt(match[2]), verse: parseInt(match[3]) });\n }\n return citations;\n}\n\n/** Translate a pi-agent-core AgentEvent into an Origen StreamEvent. */\nexport function translateEvent(\n event: AgentEvent,\n extractCitations?: (text: string) => Citation[]\n): StreamEvent | null {\n switch (event.type) {\n case \"message_update\": {\n const assistantEvent = event.assistantMessageEvent;\n if (assistantEvent.type === \"text_delta\") {\n return { type: \"text\" as const, content: assistantEvent.delta };\n }\n if (assistantEvent.type === \"thinking_delta\") {\n return { type: \"reasoning\" as const, content: assistantEvent.delta };\n }\n return null;\n }\n case \"tool_execution_start\": {\n return {\n type: \"tool_call\" as const,\n name: event.toolName,\n args: event.args as Record<string, unknown>,\n };\n }\n case \"tool_execution_end\": {\n const resultText = event.result?.content\n ?.filter((c: any) => c.type === \"text\")\n .map((c: any) => c.text)\n .join(\"\\n\") ?? \"\";\n return {\n type: \"tool_result\" as const,\n name: event.toolName,\n result: resultText,\n };\n }\n case \"agent_end\": {\n // Find the final assistant message\n const assistantMsg = event.messages\n .filter((m): m is any => m.role === \"assistant\")\n .pop();\n const text = assistantMsg?.content\n ?.filter((c: any) => c.type === \"text\")\n .map((c: any) => c.text)\n .join(\"\") ?? \"\";\n const usage: UsageInfo | undefined = assistantMsg?.usage\n ? {\n promptTokens: assistantMsg.usage.input,\n completionTokens: assistantMsg.usage.output,\n totalCost: assistantMsg.usage.cost?.total,\n }\n : undefined;\n const citFn = extractCitations ?? defaultCitationExtractor;\n // Check for error\n if (assistantMsg?.stopReason === \"error\" || assistantMsg?.stopReason === \"aborted\") {\n return {\n type: \"error\" as const,\n message: assistantMsg.errorMessage ?? \"Agent encountered an error\",\n };\n }\n return {\n type: \"done\" as const,\n message: text,\n citations: citFn(text),\n usage,\n };\n }\n default:\n return null;\n }\n}\n\n/**\n * Eagerly subscribe to an Agent and return an async iterable of Origen StreamEvents.\n *\n * CRITICAL: The subscription is created synchronously when this function is called,\n * BEFORE agent.prompt() starts. This avoids the race condition where events\n * emitted during prompt() are missed if subscription happens after.\n *\n * Usage:\n * const { stream, unsubscribe } = createEventStream(agent, extractCitations);\n * agent.prompt(messages); // events flow into stream via active subscription\n * for await (const event of stream) { ... }\n */\nexport function createEventStream(\n agent: any, // Agent from pi-agent-core\n extractCitations?: (text: string) => Citation[]\n): {\n stream: AsyncGenerator<StreamEvent>;\n unsubscribe: () => void;\n} {\n const queue: StreamEvent[] = [];\n let resolve: (() => void) | null = null;\n let done = false;\n\n // Subscribe IMMEDIATELY (before prompt is called)\n const unsubscribe = agent.subscribe((event: AgentEvent) => {\n const translated = translateEvent(event, extractCitations);\n if (translated) {\n queue.push(translated);\n if (resolve) {\n resolve();\n resolve = null;\n }\n }\n if (event.type === \"agent_end\") {\n done = true;\n if (resolve) {\n resolve();\n resolve = null;\n }\n }\n });\n\n async function* stream(): AsyncGenerator<StreamEvent> {\n try {\n while (!done || queue.length > 0) {\n if (queue.length > 0) {\n yield queue.shift()!;\n continue;\n }\n if (done) break;\n await new Promise<void>((r) => { resolve = r; });\n }\n } finally {\n unsubscribe();\n }\n }\n\n return { stream: stream(), unsubscribe };\n}\n\n/**\n * Subscribe to an Agent and yield Origen StreamEvents.\n * Handles the full lifecycle from agent_start to agent_end.\n *\n * @deprecated Use createEventStream() instead to avoid race conditions.\n * This function subscribes lazily (on first iteration) which can miss events\n * if the agent has already started emitting.\n */\nexport async function* agentToStreamEvents(\n agent: any,\n extractCitations?: (text: string) => Citation[]\n): AsyncGenerator<StreamEvent> {\n yield* createEventStream(agent, extractCitations).stream;\n}"],"mappings":";AAQA,SAAS,gBAAgB;AAalB,SAAS,UAAU,MAAkB,OAA8B;AACxE,SAAO;AAAA,IACL,MAAM,KAAK;AAAA,IACX,aAAa,KAAK;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,IAOlB,YAAY;AAAA,MACV,MAAM;AAAA,MACN,GAAG,KAAK;AAAA,IACV;AAAA,IACA,OAAO,KAAK;AAAA,IACZ,SAAS,OAAO,aAAa,QAAQ,YAAY;AAC/C,YAAM,SAAS,MAAM,KAAK,QAAQ,QAAmC,KAAK;AAC1E,aAAO;AAAA,QACL,SAAS,CAAC,EAAE,MAAM,QAAiB,MAAM,OAAO,CAAC;AAAA,QACjD,SAAS,CAAC;AAAA,MACZ;AAAA,IACF;AAAA,EACF;AACF;AAGO,SAAS,WAAW,OAAqB,OAAgC;AAC9E,SAAO,MAAM,IAAI,CAAC,MAAM,UAAU,GAAG,KAAK,CAAC;AAC7C;AAcA,IAAM,gBAAqD;AAAA,EACzD,iBAAiB;AAAA,IACf,IAAI;AAAA,IACJ,MAAM;AAAA,IACN,KAAK;AAAA,IACL,UAAU;AAAA,IACV,SAAS;AAAA,IACT,WAAW;AAAA,IACX,OAAO,CAAC,MAAM;AAAA,IACd,MAAM,EAAE,OAAO,GAAG,QAAQ,GAAG,WAAW,GAAG,YAAY,EAAE;AAAA,IACzD,eAAe;AAAA,IACf,WAAW;AAAA,EACb;AAAA,EACA,mBAAmB;AAAA,IACjB,IAAI;AAAA,IACJ,MAAM;AAAA,IACN,KAAK;AAAA,IACL,UAAU;AAAA,IACV,SAAS;AAAA,IACT,WAAW;AAAA,IACX,OAAO,CAAC,MAAM;AAAA,IACd,MAAM,EAAE,OAAO,GAAG,QAAQ,GAAG,WAAW,GAAG,YAAY,EAAE;AAAA,IACzD,eAAe;AAAA,IACf,WAAW;AAAA,EACb;AAAA,EACA,iBAAiB;AAAA,IACf,IAAI;AAAA,IACJ,MAAM;AAAA,IACN,KAAK;AAAA,IACL,UAAU;AAAA,IACV,SAAS;AAAA,IACT,WAAW;AAAA,IACX,OAAO,CAAC,MAAM;AAAA,IACd,MAAM,EAAE,OAAO,GAAG,QAAQ,GAAG,WAAW,GAAG,YAAY,EAAE;AAAA,IACzD,eAAe;AAAA,IACf,WAAW;AAAA,EACb;AAAA,EACA,kBAAkB;AAAA,IAChB,IAAI;AAAA,IACJ,MAAM;AAAA,IACN,KAAK;AAAA,IACL,UAAU;AAAA,IACV,SAAS;AAAA,IACT,WAAW;AAAA,IACX,OAAO,CAAC,MAAM;AAAA,IACd,MAAM,EAAE,OAAO,GAAG,QAAQ,GAAG,WAAW,GAAG,YAAY,EAAE;AAAA,IACzD,eAAe;AAAA,IACf,WAAW;AAAA,EACb;AAAA,EACA,uBAAuB;AAAA,IACrB,IAAI;AAAA,IACJ,MAAM;AAAA,IACN,KAAK;AAAA,IACL,UAAU;AAAA,IACV,SAAS;AAAA,IACT,WAAW;AAAA,IACX,OAAO,CAAC,MAAM;AAAA,IACd,MAAM,EAAE,OAAO,GAAG,QAAQ,GAAG,WAAW,GAAG,YAAY,EAAE;AAAA,IACzD,eAAe;AAAA,IACf,WAAW;AAAA,EACb;AAAA,EACA,gBAAgB;AAAA,IACd,IAAI;AAAA,IACJ,MAAM;AAAA,IACN,KAAK;AAAA,IACL,UAAU;AAAA,IACV,SAAS;AAAA,IACT,WAAW;AAAA,IACX,OAAO,CAAC,MAAM;AAAA,IACd,MAAM,EAAE,OAAO,GAAG,QAAQ,GAAG,WAAW,GAAG,YAAY,EAAE;AAAA,IACzD,eAAe;AAAA,IACf,WAAW;AAAA,EACb;AAAA,EACA,sBAAsB;AAAA,IACpB,IAAI;AAAA,IACJ,MAAM;AAAA,IACN,KAAK;AAAA,IACL,UAAU;AAAA,IACV,SAAS;AAAA,IACT,WAAW;AAAA,IACX,OAAO,CAAC,MAAM;AAAA,IACd,MAAM,EAAE,OAAO,GAAG,QAAQ,GAAG,WAAW,GAAG,YAAY,EAAE;AAAA,IACzD,eAAe;AAAA,IACf,WAAW;AAAA,EACb;AAAA,EACA,oBAAoB;AAAA,IAClB,IAAI;AAAA,IACJ,MAAM;AAAA,IACN,KAAK;AAAA,IACL,UAAU;AAAA,IACV,SAAS;AAAA,IACT,WAAW;AAAA,IACX,OAAO,CAAC,MAAM;AAAA,IACd,MAAM,EAAE,OAAO,GAAG,QAAQ,GAAG,WAAW,GAAG,YAAY,EAAE;AAAA,IACzD,eAAe;AAAA,IACf,WAAW;AAAA,EACb;AAAA,EACA,eAAe;AAAA,IACb,IAAI;AAAA,IACJ,MAAM;AAAA,IACN,KAAK;AAAA,IACL,UAAU;AAAA,IACV,SAAS;AAAA,IACT,WAAW;AAAA,IACX,OAAO,CAAC,MAAM;AAAA,IACd,MAAM,EAAE,OAAO,GAAG,QAAQ,GAAG,WAAW,GAAG,YAAY,EAAE;AAAA,IACzD,eAAe;AAAA,IACf,WAAW;AAAA,EACb;AACF;AAEA,IAAM,gBAA4B;AAAA,EAChC,IAAI;AAAA,EACJ,MAAM;AAAA,EACN,KAAK;AAAA,EACL,UAAU;AAAA,EACV,SAAS;AAAA,EACT,WAAW;AAAA,EACX,OAAO,CAAC,MAAM;AAAA,EACd,MAAM,EAAE,OAAO,GAAG,QAAQ,GAAG,WAAW,GAAG,YAAY,EAAE;AAAA,EACzD,eAAe;AAAA,EACf,WAAW;AACb;AAMO,SAAS,aAAa,SAAiB,SAA8C;AAE1F,MAAI,QAAQ,WAAW,SAAS,GAAG;AACjC,UAAM,YAAY,cAAc,OAAO;AACvC,QAAI,WAAW;AACb,YAAM,UAAU,SAAS,iBAAiB,UAAU,WAAW;AAC/D,aAAO;AAAA,QACL,GAAG;AAAA,QACH,GAAG;AAAA,QACH;AAAA,QACA,QAAQ;AAAA,UACN,eAAe;AAAA,UACf,uBAAuB;AAAA,UACvB,yBAAyB;AAAA,UACzB,0BAA0B;AAAA,UAC1B,gBAAgB;AAAA,UAChB,wBAAwB;AAAA,UACxB,kCAAkC;AAAA,UAClC,wBAAwB;AAAA,UACxB,6CAA6C;AAAA,UAC7C,gBAAgB;AAAA,UAChB,oBAAoB;AAAA,UACpB,4BAA4B;AAAA,QAC9B;AAAA,MACF;AAAA,IACF;AAEA,UAAM,WAAW,QAAQ,QAAQ,WAAW,EAAE;AAC9C,WAAO;AAAA,MACL,GAAG;AAAA,MACH,IAAI;AAAA,MACJ,MAAM,GAAG,QAAQ;AAAA,MACjB,UAAU;AAAA,MACV,SAAS,SAAS,iBAAiB;AAAA,MACnC,QAAQ;AAAA,QACN,eAAe;AAAA,QACf,uBAAuB;AAAA,QACvB,yBAAyB;AAAA,QACzB,0BAA0B;AAAA,QAC1B,gBAAgB;AAAA,QAChB,wBAAwB;AAAA,QACxB,kCAAkC;AAAA,QAClC,wBAAwB;AAAA,QACxB,6CAA6C;AAAA,QAC7C,gBAAgB;AAAA,QAChB,oBAAoB;AAAA,QACpB,4BAA4B;AAAA,MAC9B;AAAA,IACF;AAAA,EACF;AAOA,QAAM,YAAsB,CAAC,cAAc,aAAa,UAAU,UAAU,YAAY,QAAQ,OAAO,QAAQ;AAC/G,aAAW,YAAY,WAAW;AAChC,QAAI;AAGF,YAAM,QAAQ,SAAS,UAAiB,OAAc;AACtD,UAAI,MAAO,QAAO;AAAA,IACpB,QAAQ;AAAA,IAER;AAAA,EACF;AAGA,SAAO;AAAA,IACL,GAAG;AAAA,IACH,IAAI;AAAA,IACJ,MAAM;AAAA,EACR;AACF;AAKO,SAAS,gBACd,UACW;AAOX,SAAO,SAAS,IAAI,CAAC,OAAO;AAAA,IAC1B,MAAM,EAAE;AAAA,IACR,SAAS,EAAE;AAAA,IACX,WAAW,KAAK,IAAI;AAAA,EACtB,EAAE;AACJ;AAKO,SAAS,aACd,cACA,UACA,cACS;AACT,SAAO;AAAA,IACL;AAAA,IACA;AAAA,IACA,OAAO,aAAa,IAAI,CAAC,OAAO;AAAA,MAC9B,MAAM,EAAE;AAAA,MACR,aAAa,EAAE;AAAA,MACf,YAAY,EAAE;AAAA,IAChB,EAAE;AAAA,EACJ;AACF;AAQO,SAAS,yBAAyB,MAA0B;AACjE,QAAM,YAAwB,CAAC;AAC/B,QAAM,QAAQ;AACd,MAAI;AACJ,UAAQ,QAAQ,MAAM,KAAK,IAAI,OAAO,MAAM;AAC1C,cAAU,KAAK,EAAE,MAAM,MAAM,CAAC,GAAG,SAAS,SAAS,MAAM,CAAC,CAAC,GAAG,OAAO,SAAS,MAAM,CAAC,CAAC,EAAE,CAAC;AAAA,EAC3F;AACA,SAAO;AACT;AAGO,SAAS,eACd,OACA,kBACoB;AACpB,UAAQ,MAAM,MAAM;AAAA,IAClB,KAAK,kBAAkB;AACrB,YAAM,iBAAiB,MAAM;AAC7B,UAAI,eAAe,SAAS,cAAc;AACxC,eAAO,EAAE,MAAM,QAAiB,SAAS,eAAe,MAAM;AAAA,MAChE;AACA,UAAI,eAAe,SAAS,kBAAkB;AAC5C,eAAO,EAAE,MAAM,aAAsB,SAAS,eAAe,MAAM;AAAA,MACrE;AACA,aAAO;AAAA,IACT;AAAA,IACA,KAAK,wBAAwB;AAC3B,aAAO;AAAA,QACL,MAAM;AAAA,QACN,MAAM,MAAM;AAAA,QACZ,MAAM,MAAM;AAAA,MACd;AAAA,IACF;AAAA,IACA,KAAK,sBAAsB;AACzB,YAAM,aAAa,MAAM,QAAQ,SAC7B,OAAO,CAAC,MAAW,EAAE,SAAS,MAAM,EACrC,IAAI,CAAC,MAAW,EAAE,IAAI,EACtB,KAAK,IAAI,KAAK;AACjB,aAAO;AAAA,QACL,MAAM;AAAA,QACN,MAAM,MAAM;AAAA,QACZ,QAAQ;AAAA,MACV;AAAA,IACF;AAAA,IACA,KAAK,aAAa;AAEhB,YAAM,eAAe,MAAM,SACxB,OAAO,CAAC,MAAgB,EAAE,SAAS,WAAW,EAC9C,IAAI;AACP,YAAM,OAAO,cAAc,SACvB,OAAO,CAAC,MAAW,EAAE,SAAS,MAAM,EACrC,IAAI,CAAC,MAAW,EAAE,IAAI,EACtB,KAAK,EAAE,KAAK;AACf,YAAM,QAA+B,cAAc,QAC/C;AAAA,QACE,cAAc,aAAa,MAAM;AAAA,QACjC,kBAAkB,aAAa,MAAM;AAAA,QACrC,WAAW,aAAa,MAAM,MAAM;AAAA,MACtC,IACA;AACJ,YAAM,QAAQ,oBAAoB;AAElC,UAAI,cAAc,eAAe,WAAW,cAAc,eAAe,WAAW;AAClF,eAAO;AAAA,UACL,MAAM;AAAA,UACN,SAAS,aAAa,gBAAgB;AAAA,QACxC;AAAA,MACF;AACA,aAAO;AAAA,QACL,MAAM;AAAA,QACN,SAAS;AAAA,QACT,WAAW,MAAM,IAAI;AAAA,QACrB;AAAA,MACF;AAAA,IACF;AAAA,IACA;AACE,aAAO;AAAA,EACX;AACF;AAcO,SAAS,kBACd,OACA,kBAIA;AACA,QAAM,QAAuB,CAAC;AAC9B,MAAI,UAA+B;AACnC,MAAI,OAAO;AAGX,QAAM,cAAc,MAAM,UAAU,CAAC,UAAsB;AACzD,UAAM,aAAa,eAAe,OAAO,gBAAgB;AACzD,QAAI,YAAY;AACd,YAAM,KAAK,UAAU;AACrB,UAAI,SAAS;AACX,gBAAQ;AACR,kBAAU;AAAA,MACZ;AAAA,IACF;AACA,QAAI,MAAM,SAAS,aAAa;AAC9B,aAAO;AACP,UAAI,SAAS;AACX,gBAAQ;AACR,kBAAU;AAAA,MACZ;AAAA,IACF;AAAA,EACF,CAAC;AAED,kBAAgB,SAAsC;AACpD,QAAI;AACF,aAAO,CAAC,QAAQ,MAAM,SAAS,GAAG;AAChC,YAAI,MAAM,SAAS,GAAG;AACpB,gBAAM,MAAM,MAAM;AAClB;AAAA,QACF;AACA,YAAI,KAAM;AACV,cAAM,IAAI,QAAc,CAAC,MAAM;AAAE,oBAAU;AAAA,QAAG,CAAC;AAAA,MACjD;AAAA,IACF,UAAE;AACA,kBAAY;AAAA,IACd;AAAA,EACF;AAEA,SAAO,EAAE,QAAQ,OAAO,GAAG,YAAY;AACzC;AAUA,gBAAuB,oBACrB,OACA,kBAC6B;AAC7B,SAAO,kBAAkB,OAAO,gBAAgB,EAAE;AACpD;","names":[]}
|
package/dist/index.d.ts
CHANGED
|
@@ -1,5 +1,5 @@
|
|
|
1
|
-
export { A as AgentConfig, a as AgentResponse, b as AuthCheckResult, C as Citation, D as D1Like, c as D1Provider, M as ModelResolutionOptions, d as OrigenModelConfig, O as OrigenTool, R as ReadingContext, S as StreamEvent, U as UsageInfo, e as callOrigen, f as checkAuth, g as checkOpenRouterAuth, h as createEventStream, r as resolveModel, s as streamOrigen } from './adapter-
|
|
2
|
-
export { DEFAULT_MODEL, DEFAULT_MODEL_ID, MODELS, ModelConfig, ModelId, THINKING_MODELS, UIModelConfig, getModelsByProvider, getModelsForUI, isOllamaModel, supportsThinking } from './models.js';
|
|
1
|
+
export { A as AgentConfig, a as AgentResponse, b as AuthCheckResult, C as Citation, D as D1Like, c as D1Provider, M as ModelResolutionOptions, d as OrigenModelConfig, O as OrigenTool, R as ReadingContext, S as StreamEvent, U as UsageInfo, e as callOrigen, f as checkAuth, g as checkOpenRouterAuth, h as createEventStream, i as defaultCitationExtractor, r as resolveModel, s as streamOrigen } from './adapter-DXIT3O2f.js';
|
|
2
|
+
export { DEFAULT_MODEL, DEFAULT_MODEL_ID, MODELS, ModelConfig, ModelId, THINKING_MODELS, UIModelConfig, discoverOllamaModels, fetchOllamaModels, getModelsByProvider, getModelsForUI, isOllamaModel, mergeOllamaModels, supportsThinking } from './models.js';
|
|
3
3
|
export { Soul, loadSoul } from './soul.js';
|
|
4
4
|
import '@mariozechner/pi-ai';
|
|
5
5
|
import '@mariozechner/pi-agent-core';
|
package/dist/index.js
CHANGED
|
@@ -3,11 +3,14 @@ import {
|
|
|
3
3
|
DEFAULT_MODEL_ID,
|
|
4
4
|
MODELS,
|
|
5
5
|
THINKING_MODELS,
|
|
6
|
+
discoverOllamaModels,
|
|
7
|
+
fetchOllamaModels,
|
|
6
8
|
getModelsByProvider,
|
|
7
9
|
getModelsForUI,
|
|
8
10
|
isOllamaModel,
|
|
11
|
+
mergeOllamaModels,
|
|
9
12
|
supportsThinking
|
|
10
|
-
} from "./chunk-
|
|
13
|
+
} from "./chunk-GK5KZOHB.js";
|
|
11
14
|
import {
|
|
12
15
|
loadSoul
|
|
13
16
|
} from "./chunk-QF3XSUMT.js";
|
|
@@ -15,8 +18,9 @@ import {
|
|
|
15
18
|
adaptTools,
|
|
16
19
|
convertMessages,
|
|
17
20
|
createEventStream,
|
|
21
|
+
defaultCitationExtractor,
|
|
18
22
|
resolveModel
|
|
19
|
-
} from "./chunk-
|
|
23
|
+
} from "./chunk-K3FE63XL.js";
|
|
20
24
|
|
|
21
25
|
// src/agent.ts
|
|
22
26
|
import { Agent } from "@mariozechner/pi-agent-core";
|
|
@@ -51,7 +55,7 @@ async function* streamOrigen(messages, context, config, apiKey) {
|
|
|
51
55
|
const extractCitations = config.extractCitations;
|
|
52
56
|
const model = resolveModel(modelId, { ollamaBaseUrl: config.ollamaBaseUrl });
|
|
53
57
|
const adaptedTools = adaptTools(config.tools, config.getD1);
|
|
54
|
-
|
|
58
|
+
const piMessages = convertMessages(messages);
|
|
55
59
|
if (context && piMessages.length > 0) {
|
|
56
60
|
const lastIdx = piMessages.length - 1;
|
|
57
61
|
const lastMsg = piMessages[lastIdx];
|
|
@@ -123,10 +127,14 @@ export {
|
|
|
123
127
|
checkAuth,
|
|
124
128
|
checkOpenRouterAuth,
|
|
125
129
|
createEventStream,
|
|
130
|
+
defaultCitationExtractor,
|
|
131
|
+
discoverOllamaModels,
|
|
132
|
+
fetchOllamaModels,
|
|
126
133
|
getModelsByProvider,
|
|
127
134
|
getModelsForUI,
|
|
128
135
|
isOllamaModel,
|
|
129
136
|
loadSoul,
|
|
137
|
+
mergeOllamaModels,
|
|
130
138
|
resolveModel,
|
|
131
139
|
streamOrigen,
|
|
132
140
|
supportsThinking
|
package/dist/index.js.map
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"sources":["../src/agent.ts"],"sourcesContent":["/**\n * Origen — Agent Engine (v0.3)\n *\n * Multi-provider agent harness built on pi-ai + pi-agent-core.\n * Supports OpenRouter, Ollama, Anthropic, Google, and any OpenAI-compatible API.\n * Soul.md personas, streaming, parallel tool execution, abort support.\n */\n\nimport { Agent } from \"@mariozechner/pi-agent-core\";\nimport { streamSimple } from \"@mariozechner/pi-ai\";\nimport type { AgentEvent } from \"@mariozechner/pi-agent-core\";\nimport { z } from \"zod\";\nimport {\n adaptTools,\n convertMessages,\n buildContext,\n createEventStream,\n resolveModel,\n} from \"./adapter\";\nimport { DEFAULT_MODEL_ID, THINKING_MODELS, type ModelId } from \"./models\";\nimport type { D1Provider, Citation, UsageInfo } from \"./types\";\n\n// ── Tool definition ───────────────────────────────────────────────────\n\n/**\n * A tool that the host app registers with Origen.\n * Simple interface: name, description, JSON schema, and an execute function\n * that receives (args, getD1). The adapter wraps this into pi-agent-core's AgentTool.\n */\nexport interface OrigenTool {\n name: string;\n description: string;\n /** OpenAI function-calling parameter schema (JSON) */\n parameters: Record<string, unknown>;\n /** Zod schema for runtime validation (optional) */\n inputSchema?: z.ZodType;\n execute: (args: Record<string, unknown>, getD1: D1Provider) => Promise<string>;\n}\n\n// ── Agent configuration ───────────────────────────────────────────────\n\nexport interface AgentConfig {\n appName?: string;\n systemPrompt?: string;\n tools: OrigenTool[];\n getD1: D1Provider;\n model?: ModelId;\n maxSteps?: number;\n /** Custom citation extractor */\n extractCitations?: (text: string) => Citation[];\n /** Dynamic API key resolution per provider (e.g., for expiring OAuth tokens) */\n getApiKey?: (provider: string) => Promise<string | undefined>;\n /** Ollama base URL override (default: http://localhost:11434/v1) */\n ollamaBaseUrl?: string;\n /** Tool execution mode: \"parallel\" (default) or \"sequential\" */\n toolExecution?: \"sequential\" | \"parallel\";\n /** Abort signal for cancellation */\n signal?: AbortSignal;\n /** Reasoning/thinking level for models that support it */\n thinkingLevel?: \"off\" | \"minimal\" | \"low\" | \"medium\" | \"high\";\n}\n\n// ── Auth check ────────────────────────────────────────\n\nexport interface AuthCheckResult {\n authenticated: boolean;\n apiKey: string | null;\n provider?: string;\n error?: string;\n}\n\n/**\n * Provider-aware auth check. Tests key availability for each provider.\n * If no provider argument, checks OpenRouter + Ollama availability.\n */\nexport async function checkAuth(\n getApiKey: ((provider: string) => Promise<string | undefined>) | (() => Promise<string | null>),\n): Promise<AuthCheckResult> {\n // Normalize to per-provider signature\n const getProviderKey = getApiKey.length >= 1\n ? getApiKey as (provider: string) => Promise<string | undefined>\n : async (provider: string) => {\n const key = await (getApiKey as () => Promise<string | null>)();\n return key ?? undefined;\n };\n\n // Try OpenRouter first\n const orKey = await getProviderKey(\"openrouter\");\n if (orKey) return { authenticated: true, apiKey: orKey, provider: \"openrouter\" };\n\n // Try Ollama\n const ollamaKey = await getProviderKey(\"ollama\");\n if (ollamaKey) return { authenticated: true, apiKey: ollamaKey, provider: \"ollama\" };\n\n // Try Anthropic\n const anthropicKey = await getProviderKey(\"anthropic\");\n if (anthropicKey) return { authenticated: true, apiKey: anthropicKey, provider: \"anthropic\" };\n\n return {\n authenticated: false,\n apiKey: null,\n error: \"Connect your OpenRouter account or configure Ollama to enable AI-powered study.\",\n };\n}\n\n/** Convenience: check OpenRouter auth only (backward compat). */\nexport async function checkOpenRouterAuth(\n getApiKey: () => Promise<string | null>\n): Promise<AuthCheckResult> {\n const apiKey = await getApiKey();\n if (!apiKey) {\n return { authenticated: false, apiKey: null, error: \"Connect your OpenRouter account to enable AI-powered study.\" };\n }\n return { authenticated: true, apiKey, provider: \"openrouter\" };\n}\n\n// ── Stream event types ─────────────────────────────────────────────────\n\nexport type StreamEvent =\n | { type: \"reasoning\"; content: string }\n | { type: \"tool_call\"; name: string; args: Record<string, unknown> }\n | { type: \"tool_result\"; name: string; result: string }\n | { type: \"text\"; content: string }\n | { type: \"done\"; message: string; citations: Citation[]; usage?: UsageInfo }\n | { type: \"error\"; message: string };\n\n// ── Streaming agent call ───────────────────────────────────────────────\n\nexport async function* streamOrigen(\n messages: Array<{ role: \"user\" | \"assistant\"; content: string }>,\n context: Record<string, unknown> | undefined,\n config: AgentConfig,\n apiKey?: string,\n): AsyncGenerator<StreamEvent> {\n const systemPrompt = config.systemPrompt ?? `You are ${config.appName ?? \"Origen\"}, an AI assistant. Use your tools to help the user.`;\n const modelId = config.model ?? DEFAULT_MODEL_ID;\n const maxSteps = config.maxSteps ?? 5;\n const extractCitations = config.extractCitations;\n\n // Resolve model to pi-ai Model object\n const model = resolveModel(modelId, { ollamaBaseUrl: config.ollamaBaseUrl });\n\n // Adapt tools to AgentTool format\n const adaptedTools = adaptTools(config.tools, config.getD1);\n\n // Convert messages\n let piMessages = convertMessages(messages);\n\n // Inject context into last user message\n if (context && piMessages.length > 0) {\n const lastIdx = piMessages.length - 1;\n const lastMsg = piMessages[lastIdx];\n if (lastMsg.role === \"user\") {\n piMessages[lastIdx] = {\n ...lastMsg,\n content: `[Context: ${JSON.stringify(context)}] ${typeof lastMsg.content === \"string\" ? lastMsg.content : \"\"}`,\n };\n }\n }\n\n // Resolve API key per provider\n const resolveApiKey = async (provider: string): Promise<string | undefined> => {\n if (config.getApiKey) return config.getApiKey(provider);\n if (apiKey) return apiKey;\n return undefined;\n };\n\n // Create Agent\n const agent = new Agent({\n initialState: {\n systemPrompt,\n model,\n thinkingLevel: config.thinkingLevel ?? (THINKING_MODELS.has(modelId) ? \"medium\" : \"off\"),\n tools: adaptedTools,\n messages: piMessages as any,\n },\n getApiKey: resolveApiKey,\n toolExecution: config.toolExecution ?? \"parallel\",\n });\n\n // CRITICAL: Create event stream BEFORE calling prompt.\n // createEventStream subscribes eagerly (synchronously), so no events\n // are missed even though agent.prompt() emits events during execution.\n const { stream, unsubscribe } = createEventStream(agent, extractCitations);\n\n let streamError: string | null = null;\n\n // Start prompt without awaiting — events flow through active subscription\n agent.prompt(piMessages as any).catch((error) => {\n // If prompt throws without emitting agent_end, capture error\n // to yield after the stream ends\n streamError = error instanceof Error ? error.message : String(error);\n unsubscribe(); // clean up since agent won't emit agent_end\n });\n\n try {\n for await (const event of stream) {\n yield event;\n }\n } finally {\n unsubscribe();\n }\n\n // If prompt() threw without emitting events, yield the error now\n if (streamError) {\n yield { type: \"error\", message: `Agent error: ${streamError}` };\n }\n}\n\n// ── Non-streaming agent call ──────────────────────────────────────────\n\nexport interface AgentResponse {\n message: string;\n citations: Citation[];\n usage?: UsageInfo;\n}\n\nexport async function callOrigen(\n messages: Array<{ role: \"user\" | \"assistant\"; content: string }>,\n context: Record<string, unknown> | undefined,\n config: AgentConfig,\n apiKey?: string,\n): Promise<AgentResponse> {\n let message = \"\";\n const citations: Citation[] = [];\n let usage: UsageInfo | undefined;\n\n for await (const event of streamOrigen(messages, context, config, apiKey)) {\n switch (event.type) {\n case \"text\": message += event.content; break;\n case \"done\": citations.push(...event.citations); usage = event.usage; break;\n case \"error\": throw new Error(event.message);\n }\n }\n\n return { message, citations, usage };\n}"],"mappings":";;;;;;;;;;;;;;;;;;;;;AAQA,SAAS,aAAa;AAmEtB,eAAsB,UACpB,WAC0B;AAE1B,QAAM,iBAAiB,UAAU,UAAU,IACvC,YACA,OAAO,aAAqB;AAC1B,UAAM,MAAM,MAAO,UAA2C;AAC9D,WAAO,OAAO;AAAA,EAChB;AAGJ,QAAM,QAAQ,MAAM,eAAe,YAAY;AAC/C,MAAI,MAAO,QAAO,EAAE,eAAe,MAAM,QAAQ,OAAO,UAAU,aAAa;AAG/E,QAAM,YAAY,MAAM,eAAe,QAAQ;AAC/C,MAAI,UAAW,QAAO,EAAE,eAAe,MAAM,QAAQ,WAAW,UAAU,SAAS;AAGnF,QAAM,eAAe,MAAM,eAAe,WAAW;AACrD,MAAI,aAAc,QAAO,EAAE,eAAe,MAAM,QAAQ,cAAc,UAAU,YAAY;AAE5F,SAAO;AAAA,IACL,eAAe;AAAA,IACf,QAAQ;AAAA,IACR,OAAO;AAAA,EACT;AACF;AAGA,eAAsB,oBACpB,WAC0B;AAC1B,QAAM,SAAS,MAAM,UAAU;AAC/B,MAAI,CAAC,QAAQ;AACX,WAAO,EAAE,eAAe,OAAO,QAAQ,MAAM,OAAO,8DAA8D;AAAA,EACpH;AACA,SAAO,EAAE,eAAe,MAAM,QAAQ,UAAU,aAAa;AAC/D;AAcA,gBAAuB,aACrB,UACA,SACA,QACA,QAC6B;AAC7B,QAAM,eAAe,OAAO,gBAAgB,WAAW,OAAO,WAAW,QAAQ;AACjF,QAAM,UAAU,OAAO,SAAS;AAChC,QAAM,WAAW,OAAO,YAAY;AACpC,QAAM,mBAAmB,OAAO;AAGhC,QAAM,QAAQ,aAAa,SAAS,EAAE,eAAe,OAAO,cAAc,CAAC;AAG3E,QAAM,eAAe,WAAW,OAAO,OAAO,OAAO,KAAK;AAG1D,MAAI,aAAa,gBAAgB,QAAQ;AAGzC,MAAI,WAAW,WAAW,SAAS,GAAG;AACpC,UAAM,UAAU,WAAW,SAAS;AACpC,UAAM,UAAU,WAAW,OAAO;AAClC,QAAI,QAAQ,SAAS,QAAQ;AAC3B,iBAAW,OAAO,IAAI;AAAA,QACpB,GAAG;AAAA,QACH,SAAS,aAAa,KAAK,UAAU,OAAO,CAAC,KAAK,OAAO,QAAQ,YAAY,WAAW,QAAQ,UAAU,EAAE;AAAA,MAC9G;AAAA,IACF;AAAA,EACF;AAGA,QAAM,gBAAgB,OAAO,aAAkD;AAC7E,QAAI,OAAO,UAAW,QAAO,OAAO,UAAU,QAAQ;AACtD,QAAI,OAAQ,QAAO;AACnB,WAAO;AAAA,EACT;AAGA,QAAM,QAAQ,IAAI,MAAM;AAAA,IACtB,cAAc;AAAA,MACZ;AAAA,MACA;AAAA,MACA,eAAe,OAAO,kBAAkB,gBAAgB,IAAI,OAAO,IAAI,WAAW;AAAA,MAClF,OAAO;AAAA,MACP,UAAU;AAAA,IACZ;AAAA,IACA,WAAW;AAAA,IACX,eAAe,OAAO,iBAAiB;AAAA,EACzC,CAAC;AAKD,QAAM,EAAE,QAAQ,YAAY,IAAI,kBAAkB,OAAO,gBAAgB;AAEzE,MAAI,cAA6B;AAGjC,QAAM,OAAO,UAAiB,EAAE,MAAM,CAAC,UAAU;AAG/C,kBAAc,iBAAiB,QAAQ,MAAM,UAAU,OAAO,KAAK;AACnE,gBAAY;AAAA,EACd,CAAC;AAED,MAAI;AACF,qBAAiB,SAAS,QAAQ;AAChC,YAAM;AAAA,IACR;AAAA,EACF,UAAE;AACA,gBAAY;AAAA,EACd;AAGA,MAAI,aAAa;AACf,UAAM,EAAE,MAAM,SAAS,SAAS,gBAAgB,WAAW,GAAG;AAAA,EAChE;AACF;AAUA,eAAsB,WACpB,UACA,SACA,QACA,QACwB;AACxB,MAAI,UAAU;AACd,QAAM,YAAwB,CAAC;AAC/B,MAAI;AAEJ,mBAAiB,SAAS,aAAa,UAAU,SAAS,QAAQ,MAAM,GAAG;AACzE,YAAQ,MAAM,MAAM;AAAA,MAClB,KAAK;AAAQ,mBAAW,MAAM;AAAS;AAAA,MACvC,KAAK;AAAQ,kBAAU,KAAK,GAAG,MAAM,SAAS;AAAG,gBAAQ,MAAM;AAAO;AAAA,MACtE,KAAK;AAAS,cAAM,IAAI,MAAM,MAAM,OAAO;AAAA,IAC7C;AAAA,EACF;AAEA,SAAO,EAAE,SAAS,WAAW,MAAM;AACrC;","names":[]}
|
|
1
|
+
{"version":3,"sources":["../src/agent.ts"],"sourcesContent":["/**\n * Origen — Agent Engine (v0.3)\n *\n * Multi-provider agent harness built on pi-ai + pi-agent-core.\n * Supports OpenRouter, Ollama, Anthropic, Google, and any OpenAI-compatible API.\n * Soul.md personas, streaming, parallel tool execution, abort support.\n */\n\nimport { Agent } from \"@mariozechner/pi-agent-core\";\nimport { streamSimple } from \"@mariozechner/pi-ai\";\nimport type { AgentEvent } from \"@mariozechner/pi-agent-core\";\nimport { z } from \"zod\";\nimport {\n adaptTools,\n convertMessages,\n buildContext,\n createEventStream,\n resolveModel,\n} from \"./adapter\";\nimport type { AgentMessage } from \"@mariozechner/pi-agent-core\";\nimport { DEFAULT_MODEL_ID, THINKING_MODELS, type ModelId } from \"./models\";\nimport type { D1Provider, Citation, UsageInfo } from \"./types\";\n\n// ── Tool definition ───────────────────────────────────────────────────\n\n/**\n * A tool that the host app registers with Origen.\n * Simple interface: name, description, JSON schema, and an execute function\n * that receives (args, getD1). The adapter wraps this into pi-agent-core's AgentTool.\n */\nexport interface OrigenTool {\n name: string;\n description: string;\n /** OpenAI function-calling parameter schema (JSON) */\n parameters: Record<string, unknown>;\n /** Zod schema for runtime validation (optional) */\n inputSchema?: z.ZodType;\n execute: (args: Record<string, unknown>, getD1: D1Provider) => Promise<string>;\n}\n\n// ── Agent configuration ───────────────────────────────────────────────\n\nexport interface AgentConfig {\n appName?: string;\n systemPrompt?: string;\n tools: OrigenTool[];\n getD1: D1Provider;\n model?: ModelId;\n maxSteps?: number;\n /** Custom citation extractor */\n extractCitations?: (text: string) => Citation[];\n /** Dynamic API key resolution per provider (e.g., for expiring OAuth tokens) */\n getApiKey?: (provider: string) => Promise<string | undefined>;\n /** Ollama base URL override (default: http://localhost:11434/v1) */\n ollamaBaseUrl?: string;\n /** Tool execution mode: \"parallel\" (default) or \"sequential\" */\n toolExecution?: \"sequential\" | \"parallel\";\n /** Abort signal for cancellation */\n signal?: AbortSignal;\n /** Reasoning/thinking level for models that support it */\n thinkingLevel?: \"off\" | \"minimal\" | \"low\" | \"medium\" | \"high\";\n}\n\n// ── Auth check ────────────────────────────────────────\n\nexport interface AuthCheckResult {\n authenticated: boolean;\n apiKey: string | null;\n provider?: string;\n error?: string;\n}\n\n/**\n * Provider-aware auth check. Tests key availability for each provider.\n * If no provider argument, checks OpenRouter + Ollama availability.\n */\nexport async function checkAuth(\n getApiKey: ((provider: string) => Promise<string | undefined>) | (() => Promise<string | null>),\n): Promise<AuthCheckResult> {\n // Normalize to per-provider signature\n const getProviderKey = getApiKey.length >= 1\n ? getApiKey as (provider: string) => Promise<string | undefined>\n : async (provider: string) => {\n const key = await (getApiKey as () => Promise<string | null>)();\n return key ?? undefined;\n };\n\n // Try OpenRouter first\n const orKey = await getProviderKey(\"openrouter\");\n if (orKey) return { authenticated: true, apiKey: orKey, provider: \"openrouter\" };\n\n // Try Ollama\n const ollamaKey = await getProviderKey(\"ollama\");\n if (ollamaKey) return { authenticated: true, apiKey: ollamaKey, provider: \"ollama\" };\n\n // Try Anthropic\n const anthropicKey = await getProviderKey(\"anthropic\");\n if (anthropicKey) return { authenticated: true, apiKey: anthropicKey, provider: \"anthropic\" };\n\n return {\n authenticated: false,\n apiKey: null,\n error: \"Connect your OpenRouter account or configure Ollama to enable AI-powered study.\",\n };\n}\n\n/** Convenience: check OpenRouter auth only (backward compat). */\nexport async function checkOpenRouterAuth(\n getApiKey: () => Promise<string | null>\n): Promise<AuthCheckResult> {\n const apiKey = await getApiKey();\n if (!apiKey) {\n return { authenticated: false, apiKey: null, error: \"Connect your OpenRouter account to enable AI-powered study.\" };\n }\n return { authenticated: true, apiKey, provider: \"openrouter\" };\n}\n\n// ── Stream event types ─────────────────────────────────────────────────\n\nexport type StreamEvent =\n | { type: \"reasoning\"; content: string }\n | { type: \"tool_call\"; name: string; args: Record<string, unknown> }\n | { type: \"tool_result\"; name: string; result: string }\n | { type: \"text\"; content: string }\n | { type: \"done\"; message: string; citations: Citation[]; usage?: UsageInfo }\n | { type: \"error\"; message: string };\n\n// ── Streaming agent call ───────────────────────────────────────────────\n\nexport async function* streamOrigen(\n messages: Array<{ role: \"user\" | \"assistant\"; content: string }>,\n context: Record<string, unknown> | undefined,\n config: AgentConfig,\n apiKey?: string,\n): AsyncGenerator<StreamEvent> {\n const systemPrompt = config.systemPrompt ?? `You are ${config.appName ?? \"Origen\"}, an AI assistant. Use your tools to help the user.`;\n const modelId = config.model ?? DEFAULT_MODEL_ID;\n const maxSteps = config.maxSteps ?? 5;\n const extractCitations = config.extractCitations;\n\n // Resolve model to pi-ai Model object\n const model = resolveModel(modelId, { ollamaBaseUrl: config.ollamaBaseUrl });\n\n // Adapt tools to AgentTool format\n const adaptedTools = adaptTools(config.tools, config.getD1);\n\n // Convert messages — Origen's simple {role, content} maps to pi-ai UserMessages.\n // Assistant messages lack thinking/toolCall content, so we cast through the union.\n const piMessages = convertMessages(messages) as AgentMessage[];\n\n // Inject context into last user message\n if (context && piMessages.length > 0) {\n const lastIdx = piMessages.length - 1;\n const lastMsg = piMessages[lastIdx];\n if (lastMsg.role === \"user\") {\n piMessages[lastIdx] = {\n ...lastMsg,\n content: `[Context: ${JSON.stringify(context)}] ${typeof lastMsg.content === \"string\" ? lastMsg.content : \"\"}`,\n };\n }\n }\n\n // Resolve API key per provider\n const resolveApiKey = async (provider: string): Promise<string | undefined> => {\n if (config.getApiKey) return config.getApiKey(provider);\n if (apiKey) return apiKey;\n return undefined;\n };\n\n // Create Agent\n const agent = new Agent({\n initialState: {\n systemPrompt,\n model,\n thinkingLevel: config.thinkingLevel ?? (THINKING_MODELS.has(modelId) ? \"medium\" : \"off\"),\n tools: adaptedTools,\n messages: piMessages,\n },\n getApiKey: resolveApiKey,\n toolExecution: config.toolExecution ?? \"parallel\",\n });\n\n // CRITICAL: Create event stream BEFORE calling prompt.\n // createEventStream subscribes eagerly (synchronously), so no events\n // are missed even though agent.prompt() emits events during execution.\n const { stream, unsubscribe } = createEventStream(agent, extractCitations);\n\n let streamError: string | null = null;\n\n // Start prompt without awaiting — events flow through active subscription\n agent.prompt(piMessages).catch((error) => {\n // If prompt throws without emitting agent_end, capture error\n // to yield after the stream ends\n streamError = error instanceof Error ? error.message : String(error);\n unsubscribe(); // clean up since agent won't emit agent_end\n });\n\n try {\n for await (const event of stream) {\n yield event;\n }\n } finally {\n unsubscribe();\n }\n\n // If prompt() threw without emitting events, yield the error now\n if (streamError) {\n yield { type: \"error\", message: `Agent error: ${streamError}` };\n }\n}\n\n// ── Non-streaming agent call ──────────────────────────────────────────\n\nexport interface AgentResponse {\n message: string;\n citations: Citation[];\n usage?: UsageInfo;\n}\n\nexport async function callOrigen(\n messages: Array<{ role: \"user\" | \"assistant\"; content: string }>,\n context: Record<string, unknown> | undefined,\n config: AgentConfig,\n apiKey?: string,\n): Promise<AgentResponse> {\n let message = \"\";\n const citations: Citation[] = [];\n let usage: UsageInfo | undefined;\n\n for await (const event of streamOrigen(messages, context, config, apiKey)) {\n switch (event.type) {\n case \"text\": message += event.content; break;\n case \"done\": citations.push(...event.citations); usage = event.usage; break;\n case \"error\": throw new Error(event.message);\n }\n }\n\n return { message, citations, usage };\n}"],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;AAQA,SAAS,aAAa;AAoEtB,eAAsB,UACpB,WAC0B;AAE1B,QAAM,iBAAiB,UAAU,UAAU,IACvC,YACA,OAAO,aAAqB;AAC1B,UAAM,MAAM,MAAO,UAA2C;AAC9D,WAAO,OAAO;AAAA,EAChB;AAGJ,QAAM,QAAQ,MAAM,eAAe,YAAY;AAC/C,MAAI,MAAO,QAAO,EAAE,eAAe,MAAM,QAAQ,OAAO,UAAU,aAAa;AAG/E,QAAM,YAAY,MAAM,eAAe,QAAQ;AAC/C,MAAI,UAAW,QAAO,EAAE,eAAe,MAAM,QAAQ,WAAW,UAAU,SAAS;AAGnF,QAAM,eAAe,MAAM,eAAe,WAAW;AACrD,MAAI,aAAc,QAAO,EAAE,eAAe,MAAM,QAAQ,cAAc,UAAU,YAAY;AAE5F,SAAO;AAAA,IACL,eAAe;AAAA,IACf,QAAQ;AAAA,IACR,OAAO;AAAA,EACT;AACF;AAGA,eAAsB,oBACpB,WAC0B;AAC1B,QAAM,SAAS,MAAM,UAAU;AAC/B,MAAI,CAAC,QAAQ;AACX,WAAO,EAAE,eAAe,OAAO,QAAQ,MAAM,OAAO,8DAA8D;AAAA,EACpH;AACA,SAAO,EAAE,eAAe,MAAM,QAAQ,UAAU,aAAa;AAC/D;AAcA,gBAAuB,aACrB,UACA,SACA,QACA,QAC6B;AAC7B,QAAM,eAAe,OAAO,gBAAgB,WAAW,OAAO,WAAW,QAAQ;AACjF,QAAM,UAAU,OAAO,SAAS;AAChC,QAAM,WAAW,OAAO,YAAY;AACpC,QAAM,mBAAmB,OAAO;AAGhC,QAAM,QAAQ,aAAa,SAAS,EAAE,eAAe,OAAO,cAAc,CAAC;AAG3E,QAAM,eAAe,WAAW,OAAO,OAAO,OAAO,KAAK;AAI1D,QAAM,aAAa,gBAAgB,QAAQ;AAG3C,MAAI,WAAW,WAAW,SAAS,GAAG;AACpC,UAAM,UAAU,WAAW,SAAS;AACpC,UAAM,UAAU,WAAW,OAAO;AAClC,QAAI,QAAQ,SAAS,QAAQ;AAC3B,iBAAW,OAAO,IAAI;AAAA,QACpB,GAAG;AAAA,QACH,SAAS,aAAa,KAAK,UAAU,OAAO,CAAC,KAAK,OAAO,QAAQ,YAAY,WAAW,QAAQ,UAAU,EAAE;AAAA,MAC9G;AAAA,IACF;AAAA,EACF;AAGA,QAAM,gBAAgB,OAAO,aAAkD;AAC7E,QAAI,OAAO,UAAW,QAAO,OAAO,UAAU,QAAQ;AACtD,QAAI,OAAQ,QAAO;AACnB,WAAO;AAAA,EACT;AAGA,QAAM,QAAQ,IAAI,MAAM;AAAA,IACtB,cAAc;AAAA,MACZ;AAAA,MACA;AAAA,MACA,eAAe,OAAO,kBAAkB,gBAAgB,IAAI,OAAO,IAAI,WAAW;AAAA,MAClF,OAAO;AAAA,MACP,UAAU;AAAA,IACZ;AAAA,IACA,WAAW;AAAA,IACX,eAAe,OAAO,iBAAiB;AAAA,EACzC,CAAC;AAKD,QAAM,EAAE,QAAQ,YAAY,IAAI,kBAAkB,OAAO,gBAAgB;AAEzE,MAAI,cAA6B;AAGjC,QAAM,OAAO,UAAU,EAAE,MAAM,CAAC,UAAU;AAGxC,kBAAc,iBAAiB,QAAQ,MAAM,UAAU,OAAO,KAAK;AACnE,gBAAY;AAAA,EACd,CAAC;AAED,MAAI;AACF,qBAAiB,SAAS,QAAQ;AAChC,YAAM;AAAA,IACR;AAAA,EACF,UAAE;AACA,gBAAY;AAAA,EACd;AAGA,MAAI,aAAa;AACf,UAAM,EAAE,MAAM,SAAS,SAAS,gBAAgB,WAAW,GAAG;AAAA,EAChE;AACF;AAUA,eAAsB,WACpB,UACA,SACA,QACA,QACwB;AACxB,MAAI,UAAU;AACd,QAAM,YAAwB,CAAC;AAC/B,MAAI;AAEJ,mBAAiB,SAAS,aAAa,UAAU,SAAS,QAAQ,MAAM,GAAG;AACzE,YAAQ,MAAM,MAAM;AAAA,MAClB,KAAK;AAAQ,mBAAW,MAAM;AAAS;AAAA,MACvC,KAAK;AAAQ,kBAAU,KAAK,GAAG,MAAM,SAAS;AAAG,gBAAQ,MAAM;AAAO;AAAA,MACtE,KAAK;AAAS,cAAM,IAAI,MAAM,MAAM,OAAO;AAAA,IAC7C;AAAA,EACF;AAEA,SAAO,EAAE,SAAS,WAAW,MAAM;AACrC;","names":[]}
|
package/dist/models.d.ts
CHANGED
|
@@ -3,8 +3,12 @@ export { Api, Model as ProviderModel } from '@mariozechner/pi-ai';
|
|
|
3
3
|
/**
|
|
4
4
|
* Origen model configuration.
|
|
5
5
|
*
|
|
6
|
-
*
|
|
7
|
-
*
|
|
6
|
+
* Static entries for cloud providers (OpenRouter, Anthropic, Google, etc.)
|
|
7
|
+
* plus dynamic Ollama model discovery via GET /api/tags.
|
|
8
|
+
*
|
|
9
|
+
* Hardcoded Ollama entries serve as fallbacks when Ollama isn't reachable.
|
|
10
|
+
* When connected, fetchOllamaModels() pulls the live model list and merges
|
|
11
|
+
* it with the static entries.
|
|
8
12
|
*/
|
|
9
13
|
|
|
10
14
|
interface ModelConfig {
|
|
@@ -14,8 +18,6 @@ interface ModelConfig {
|
|
|
14
18
|
}
|
|
15
19
|
/** UI-facing model config — safe to send to the client. Strips internal fields. */
|
|
16
20
|
type UIModelConfig = ModelConfig;
|
|
17
|
-
/** Get models as a simple UI map (name, description, free). No internal fields. */
|
|
18
|
-
declare function getModelsForUI(): Record<string, UIModelConfig>;
|
|
19
21
|
declare const MODELS: Record<string, ModelConfig>;
|
|
20
22
|
type ModelId = keyof typeof MODELS;
|
|
21
23
|
/** Default model — free router, works with $0 credits */
|
|
@@ -23,12 +25,42 @@ declare const DEFAULT_MODEL_ID: ModelId;
|
|
|
23
25
|
/** Backward compat alias */
|
|
24
26
|
declare const DEFAULT_MODEL: ModelId;
|
|
25
27
|
/** Models that support extended thinking */
|
|
26
|
-
declare const THINKING_MODELS: ReadonlySet<
|
|
28
|
+
declare const THINKING_MODELS: ReadonlySet<string>;
|
|
27
29
|
/** Check if a model supports extended thinking */
|
|
28
|
-
declare function supportsThinking(model:
|
|
30
|
+
declare function supportsThinking(model: string): boolean;
|
|
29
31
|
/** Check if a model is an Ollama model */
|
|
30
|
-
declare function isOllamaModel(model:
|
|
32
|
+
declare function isOllamaModel(model: string): boolean;
|
|
31
33
|
/** Get all model IDs for a specific provider prefix */
|
|
32
|
-
declare function getModelsByProvider(provider: string):
|
|
34
|
+
declare function getModelsByProvider(provider: string): string[];
|
|
35
|
+
/** Get models as a simple UI map (name, description, free). No internal fields. */
|
|
36
|
+
declare function getModelsForUI(): Record<string, UIModelConfig>;
|
|
37
|
+
/**
|
|
38
|
+
* Fetch available models from a running Ollama server.
|
|
39
|
+
*
|
|
40
|
+
* Calls GET /api/tags on the Ollama server and returns model configs
|
|
41
|
+
* merged with the static defaults. Cloud models (e.g., foo:cloud)
|
|
42
|
+
* are included alongside local models.
|
|
43
|
+
*
|
|
44
|
+
* @param baseUrl - Ollama server URL (default: http://localhost:11434)
|
|
45
|
+
* @returns Object with discovered Ollama model configs (keyed by "ollama/<name>")
|
|
46
|
+
*/
|
|
47
|
+
declare function fetchOllamaModels(baseUrl?: string): Promise<Record<string, ModelConfig>>;
|
|
48
|
+
/**
|
|
49
|
+
* Merge dynamically discovered Ollama models into the static MODELS registry.
|
|
50
|
+
*
|
|
51
|
+
* Static defaults are kept as fallbacks. Discovered models override
|
|
52
|
+
* entries with the same key (e.g., "ollama/llama3" from the server
|
|
53
|
+
* replaces the hardcoded entry with live data).
|
|
54
|
+
*
|
|
55
|
+
* @param ollamaModels - Models returned by fetchOllamaModels()
|
|
56
|
+
*/
|
|
57
|
+
declare function mergeOllamaModels(ollamaModels: Record<string, ModelConfig>): void;
|
|
58
|
+
/**
|
|
59
|
+
* One-shot: fetch Ollama models and merge them into the registry.
|
|
60
|
+
* Returns the combined model map.
|
|
61
|
+
*
|
|
62
|
+
* @param baseUrl - Ollama server URL (default: http://localhost:11434)
|
|
63
|
+
*/
|
|
64
|
+
declare function discoverOllamaModels(baseUrl?: string): Promise<Record<string, ModelConfig>>;
|
|
33
65
|
|
|
34
|
-
export { DEFAULT_MODEL, DEFAULT_MODEL_ID, MODELS, type ModelConfig, type ModelId, THINKING_MODELS, type UIModelConfig, getModelsByProvider, getModelsForUI, isOllamaModel, supportsThinking };
|
|
66
|
+
export { DEFAULT_MODEL, DEFAULT_MODEL_ID, MODELS, type ModelConfig, type ModelId, THINKING_MODELS, type UIModelConfig, discoverOllamaModels, fetchOllamaModels, getModelsByProvider, getModelsForUI, isOllamaModel, mergeOllamaModels, supportsThinking };
|
package/dist/models.js
CHANGED
|
@@ -3,19 +3,25 @@ import {
|
|
|
3
3
|
DEFAULT_MODEL_ID,
|
|
4
4
|
MODELS,
|
|
5
5
|
THINKING_MODELS,
|
|
6
|
+
discoverOllamaModels,
|
|
7
|
+
fetchOllamaModels,
|
|
6
8
|
getModelsByProvider,
|
|
7
9
|
getModelsForUI,
|
|
8
10
|
isOllamaModel,
|
|
11
|
+
mergeOllamaModels,
|
|
9
12
|
supportsThinking
|
|
10
|
-
} from "./chunk-
|
|
13
|
+
} from "./chunk-GK5KZOHB.js";
|
|
11
14
|
export {
|
|
12
15
|
DEFAULT_MODEL,
|
|
13
16
|
DEFAULT_MODEL_ID,
|
|
14
17
|
MODELS,
|
|
15
18
|
THINKING_MODELS,
|
|
19
|
+
discoverOllamaModels,
|
|
20
|
+
fetchOllamaModels,
|
|
16
21
|
getModelsByProvider,
|
|
17
22
|
getModelsForUI,
|
|
18
23
|
isOllamaModel,
|
|
24
|
+
mergeOllamaModels,
|
|
19
25
|
supportsThinking
|
|
20
26
|
};
|
|
21
27
|
//# sourceMappingURL=models.js.map
|
package/package.json
CHANGED
package/dist/chunk-ECRY7XDR.js
DELETED
|
@@ -1,109 +0,0 @@
|
|
|
1
|
-
// src/models.ts
|
|
2
|
-
function getModelsForUI() {
|
|
3
|
-
const uiModels = {};
|
|
4
|
-
for (const [id, config] of Object.entries(MODELS)) {
|
|
5
|
-
uiModels[id] = { name: config.name, description: config.description, free: config.free };
|
|
6
|
-
}
|
|
7
|
-
return uiModels;
|
|
8
|
-
}
|
|
9
|
-
function buildModels() {
|
|
10
|
-
const models = {};
|
|
11
|
-
models["openrouter/free"] = {
|
|
12
|
-
name: "Free (Auto)",
|
|
13
|
-
description: "Free \u2014 auto-selects best free model for your request",
|
|
14
|
-
free: true
|
|
15
|
-
};
|
|
16
|
-
models["google/gemma-4-31b-it:free"] = {
|
|
17
|
-
name: "Gemma 4 31B",
|
|
18
|
-
description: "Free \u2014 great quality for Bible study",
|
|
19
|
-
free: true
|
|
20
|
-
};
|
|
21
|
-
models["nvidia/nemotron-3-super-120b-a12b:free"] = {
|
|
22
|
-
name: "Nemotron 3 Super",
|
|
23
|
-
description: "Free \u2014 large model, strong reasoning",
|
|
24
|
-
free: true
|
|
25
|
-
};
|
|
26
|
-
models["deepseek/deepseek-r1:free"] = {
|
|
27
|
-
name: "DeepSeek R1 (Free)",
|
|
28
|
-
description: "Free \u2014 reasoning with thinking support",
|
|
29
|
-
free: true
|
|
30
|
-
};
|
|
31
|
-
models["qwen/qwen3-coder:free"] = {
|
|
32
|
-
name: "Qwen3 Coder",
|
|
33
|
-
description: "Free \u2014 480B parameters, excellent tool use",
|
|
34
|
-
free: true
|
|
35
|
-
};
|
|
36
|
-
models["openrouter/auto"] = {
|
|
37
|
-
name: "Auto (All)",
|
|
38
|
-
description: "Auto-selects best model (requires credits)",
|
|
39
|
-
free: false
|
|
40
|
-
};
|
|
41
|
-
models["anthropic/claude-sonnet-4"] = {
|
|
42
|
-
name: "Claude Sonnet 4",
|
|
43
|
-
description: "Premium \u2014 excellent quality + reasoning (requires credits)",
|
|
44
|
-
free: false
|
|
45
|
-
};
|
|
46
|
-
models["google/gemini-2.5-flash-preview"] = {
|
|
47
|
-
name: "Gemini 2.5 Flash",
|
|
48
|
-
description: "Premium \u2014 fast with thinking (requires credits)",
|
|
49
|
-
free: false
|
|
50
|
-
};
|
|
51
|
-
models["ollama/llama3"] = {
|
|
52
|
-
name: "Llama 3 (Ollama)",
|
|
53
|
-
description: "Local \u2014 Meta's Llama 3, requires Ollama",
|
|
54
|
-
free: true
|
|
55
|
-
};
|
|
56
|
-
models["ollama/gemma3"] = {
|
|
57
|
-
name: "Gemma 3 (Ollama)",
|
|
58
|
-
description: "Local \u2014 Google's Gemma 3, requires Ollama",
|
|
59
|
-
free: true
|
|
60
|
-
};
|
|
61
|
-
models["ollama/mistral"] = {
|
|
62
|
-
name: "Mistral (Ollama)",
|
|
63
|
-
description: "Local \u2014 Mistral's 7B model, requires Ollama",
|
|
64
|
-
free: true
|
|
65
|
-
};
|
|
66
|
-
models["ollama/qwen3"] = {
|
|
67
|
-
name: "Qwen 3 (Ollama)",
|
|
68
|
-
description: "Local \u2014 Alibaba's Qwen 3, requires Ollama",
|
|
69
|
-
free: true
|
|
70
|
-
};
|
|
71
|
-
models["ollama/deepseek-r1"] = {
|
|
72
|
-
name: "DeepSeek R1 (Ollama)",
|
|
73
|
-
description: "Local \u2014 reasoning model, requires Ollama",
|
|
74
|
-
free: true
|
|
75
|
-
};
|
|
76
|
-
return models;
|
|
77
|
-
}
|
|
78
|
-
var MODELS = buildModels();
|
|
79
|
-
var DEFAULT_MODEL_ID = "openrouter/free";
|
|
80
|
-
var DEFAULT_MODEL = DEFAULT_MODEL_ID;
|
|
81
|
-
var THINKING_MODELS = /* @__PURE__ */ new Set([
|
|
82
|
-
"anthropic/claude-sonnet-4",
|
|
83
|
-
"deepseek/deepseek-r1:free",
|
|
84
|
-
"google/gemini-2.5-flash-preview",
|
|
85
|
-
"ollama/deepseek-r1"
|
|
86
|
-
]);
|
|
87
|
-
function supportsThinking(model) {
|
|
88
|
-
return THINKING_MODELS.has(model);
|
|
89
|
-
}
|
|
90
|
-
function isOllamaModel(model) {
|
|
91
|
-
return model.startsWith("ollama/");
|
|
92
|
-
}
|
|
93
|
-
function getModelsByProvider(provider) {
|
|
94
|
-
return Object.keys(MODELS).filter(
|
|
95
|
-
(id) => id.startsWith(`${provider}/`)
|
|
96
|
-
);
|
|
97
|
-
}
|
|
98
|
-
|
|
99
|
-
export {
|
|
100
|
-
getModelsForUI,
|
|
101
|
-
MODELS,
|
|
102
|
-
DEFAULT_MODEL_ID,
|
|
103
|
-
DEFAULT_MODEL,
|
|
104
|
-
THINKING_MODELS,
|
|
105
|
-
supportsThinking,
|
|
106
|
-
isOllamaModel,
|
|
107
|
-
getModelsByProvider
|
|
108
|
-
};
|
|
109
|
-
//# sourceMappingURL=chunk-ECRY7XDR.js.map
|
|
@@ -1 +0,0 @@
|
|
|
1
|
-
{"version":3,"sources":["../src/models.ts"],"sourcesContent":["/**\n * Origen model configuration.\n *\n * Delegates to pi-ai's model registry for known providers (OpenRouter, Anthropic, Google, etc.)\n * Plus custom entries for Ollama and free-tier aliases.\n */\n\nimport { getModel } from \"@mariozechner/pi-ai\";\nimport type { Model, Api } from \"@mariozechner/pi-ai\";\nexport type { Model as ProviderModel, Api } from \"@mariozechner/pi-ai\";\n\n// ── Model registry ────────────────────────────────────────────────────\n\nexport interface ModelConfig {\n name: string;\n description: string;\n free: boolean;\n}\n\n/** UI-facing model config — safe to send to the client. Strips internal fields. */\nexport type UIModelConfig = ModelConfig;\n\n/** Get models as a simple UI map (name, description, free). No internal fields. */\nexport function getModelsForUI(): Record<string, UIModelConfig> {\n const uiModels: Record<string, UIModelConfig> = {};\n for (const [id, config] of Object.entries(MODELS)) {\n uiModels[id] = { name: config.name, description: config.description, free: config.free };\n }\n return uiModels;\n}\n\n// Build MODELS map from pi-ai registry + custom entries\nfunction buildModels(): Record<string, ModelConfig> {\n const models: Record<string, ModelConfig> = {};\n\n // ── OpenRouter (free tier) ───────────────────────────\n models[\"openrouter/free\"] = {\n name: \"Free (Auto)\",\n description: \"Free — auto-selects best free model for your request\",\n free: true,\n };\n models[\"google/gemma-4-31b-it:free\"] = {\n name: \"Gemma 4 31B\",\n description: \"Free — great quality for Bible study\",\n free: true,\n };\n models[\"nvidia/nemotron-3-super-120b-a12b:free\"] = {\n name: \"Nemotron 3 Super\",\n description: \"Free — large model, strong reasoning\",\n free: true,\n };\n models[\"deepseek/deepseek-r1:free\"] = {\n name: \"DeepSeek R1 (Free)\",\n description: \"Free — reasoning with thinking support\",\n free: true,\n };\n\n models[\"qwen/qwen3-coder:free\"] = {\n name: \"Qwen3 Coder\",\n description: \"Free — 480B parameters, excellent tool use\",\n free: true,\n };\n\n // ── OpenRouter (premium) ─────────────────────────────\n models[\"openrouter/auto\"] = {\n name: \"Auto (All)\",\n description: \"Auto-selects best model (requires credits)\",\n free: false,\n };\n models[\"anthropic/claude-sonnet-4\"] = {\n name: \"Claude Sonnet 4\",\n description: \"Premium — excellent quality + reasoning (requires credits)\",\n free: false,\n };\n models[\"google/gemini-2.5-flash-preview\"] = {\n name: \"Gemini 2.5 Flash\",\n description: \"Premium — fast with thinking (requires credits)\",\n free: false,\n };\n\n // ── Ollama (local, always free) ──────────────────────\n models[\"ollama/llama3\"] = {\n name: \"Llama 3 (Ollama)\",\n description: \"Local — Meta's Llama 3, requires Ollama\",\n free: true,\n };\n models[\"ollama/gemma3\"] = {\n name: \"Gemma 3 (Ollama)\",\n description: \"Local — Google's Gemma 3, requires Ollama\",\n free: true,\n };\n models[\"ollama/mistral\"] = {\n name: \"Mistral (Ollama)\",\n description: \"Local — Mistral's 7B model, requires Ollama\",\n free: true,\n };\n models[\"ollama/qwen3\"] = {\n name: \"Qwen 3 (Ollama)\",\n description: \"Local — Alibaba's Qwen 3, requires Ollama\",\n free: true,\n };\n models[\"ollama/deepseek-r1\"] = {\n name: \"DeepSeek R1 (Ollama)\",\n description: \"Local — reasoning model, requires Ollama\",\n free: true,\n };\n\n return models;\n}\n\nexport const MODELS: Record<string, ModelConfig> = buildModels();\nexport type ModelId = keyof typeof MODELS;\n\n/** Default model — free router, works with $0 credits */\nexport const DEFAULT_MODEL_ID: ModelId = \"openrouter/free\";\n\n/** Backward compat alias */\nexport const DEFAULT_MODEL: ModelId = DEFAULT_MODEL_ID;\n\n/** Models that support extended thinking */\nexport const THINKING_MODELS: ReadonlySet<ModelId> = new Set<ModelId>([\n \"anthropic/claude-sonnet-4\",\n \"deepseek/deepseek-r1:free\",\n \"google/gemini-2.5-flash-preview\",\n \"ollama/deepseek-r1\",\n]);\n\n/** Check if a model supports extended thinking */\nexport function supportsThinking(model: ModelId): boolean {\n return THINKING_MODELS.has(model);\n}\n\n/** Check if a model is an Ollama model */\nexport function isOllamaModel(model: ModelId): boolean {\n return (model as string).startsWith(\"ollama/\");\n}\n\n/** Get all model IDs for a specific provider prefix */\nexport function getModelsByProvider(provider: string): ModelId[] {\n return (Object.keys(MODELS) as ModelId[]).filter((id) =>\n (id as string).startsWith(`${provider}/`)\n );\n}"],"mappings":";AAuBO,SAAS,iBAAgD;AAC9D,QAAM,WAA0C,CAAC;AACjD,aAAW,CAAC,IAAI,MAAM,KAAK,OAAO,QAAQ,MAAM,GAAG;AACjD,aAAS,EAAE,IAAI,EAAE,MAAM,OAAO,MAAM,aAAa,OAAO,aAAa,MAAM,OAAO,KAAK;AAAA,EACzF;AACA,SAAO;AACT;AAGA,SAAS,cAA2C;AAClD,QAAM,SAAsC,CAAC;AAG7C,SAAO,iBAAiB,IAAI;AAAA,IAC1B,MAAM;AAAA,IACN,aAAa;AAAA,IACb,MAAM;AAAA,EACR;AACA,SAAO,4BAA4B,IAAI;AAAA,IACrC,MAAM;AAAA,IACN,aAAa;AAAA,IACb,MAAM;AAAA,EACR;AACA,SAAO,wCAAwC,IAAI;AAAA,IACjD,MAAM;AAAA,IACN,aAAa;AAAA,IACb,MAAM;AAAA,EACR;AACA,SAAO,2BAA2B,IAAI;AAAA,IACpC,MAAM;AAAA,IACN,aAAa;AAAA,IACb,MAAM;AAAA,EACR;AAEA,SAAO,uBAAuB,IAAI;AAAA,IAChC,MAAM;AAAA,IACN,aAAa;AAAA,IACb,MAAM;AAAA,EACR;AAGA,SAAO,iBAAiB,IAAI;AAAA,IAC1B,MAAM;AAAA,IACN,aAAa;AAAA,IACb,MAAM;AAAA,EACR;AACA,SAAO,2BAA2B,IAAI;AAAA,IACpC,MAAM;AAAA,IACN,aAAa;AAAA,IACb,MAAM;AAAA,EACR;AACA,SAAO,iCAAiC,IAAI;AAAA,IAC1C,MAAM;AAAA,IACN,aAAa;AAAA,IACb,MAAM;AAAA,EACR;AAGA,SAAO,eAAe,IAAI;AAAA,IACxB,MAAM;AAAA,IACN,aAAa;AAAA,IACb,MAAM;AAAA,EACR;AACA,SAAO,eAAe,IAAI;AAAA,IACxB,MAAM;AAAA,IACN,aAAa;AAAA,IACb,MAAM;AAAA,EACR;AACA,SAAO,gBAAgB,IAAI;AAAA,IACzB,MAAM;AAAA,IACN,aAAa;AAAA,IACb,MAAM;AAAA,EACR;AACA,SAAO,cAAc,IAAI;AAAA,IACvB,MAAM;AAAA,IACN,aAAa;AAAA,IACb,MAAM;AAAA,EACR;AACA,SAAO,oBAAoB,IAAI;AAAA,IAC7B,MAAM;AAAA,IACN,aAAa;AAAA,IACb,MAAM;AAAA,EACR;AAEA,SAAO;AACT;AAEO,IAAM,SAAsC,YAAY;AAIxD,IAAM,mBAA4B;AAGlC,IAAM,gBAAyB;AAG/B,IAAM,kBAAwC,oBAAI,IAAa;AAAA,EACpE;AAAA,EACA;AAAA,EACA;AAAA,EACA;AACF,CAAC;AAGM,SAAS,iBAAiB,OAAyB;AACxD,SAAO,gBAAgB,IAAI,KAAK;AAClC;AAGO,SAAS,cAAc,OAAyB;AACrD,SAAQ,MAAiB,WAAW,SAAS;AAC/C;AAGO,SAAS,oBAAoB,UAA6B;AAC/D,SAAQ,OAAO,KAAK,MAAM,EAAgB;AAAA,IAAO,CAAC,OAC/C,GAAc,WAAW,GAAG,QAAQ,GAAG;AAAA,EAC1C;AACF;","names":[]}
|
|
@@ -1 +0,0 @@
|
|
|
1
|
-
{"version":3,"sources":["../src/adapter.ts"],"sourcesContent":["/**\n * Adapter: bridges Origen's simple types to pi-agent-core/pi-ai types.\n *\n * - OrigenTool → AgentTool (injects D1Provider)\n * - pi-ai Model resolution (OpenRouter, Ollama, Anthropic, Google)\n * - StreamEvent translation (AgentEvent → Origen's StreamEvent)\n */\n\nimport { getModel } from \"@mariozechner/pi-ai\";\nimport type { Model, Api, Message, Context, Tool } from \"@mariozechner/pi-ai\";\nimport type { AgentTool, AgentEvent, AgentMessage } from \"@mariozechner/pi-agent-core\";\nimport type { OrigenTool, StreamEvent } from \"./agent\";\nimport type { D1Provider, Citation, UsageInfo } from \"./types\";\n\n// ── Tool adapter ─────────────────────────────────────────────────────\n\n/**\n * Convert an OrigenTool into a pi-agent-core AgentTool.\n * The D1Provider is captured in closure so the tool's execute gets it.\n */\nexport function adaptTool(tool: OrigenTool, getD1: D1Provider): AgentTool {\n return {\n name: tool.name,\n description: tool.description,\n // Convert JSON schema to TypeBox format — pi-agent-core uses TypeBox\n // but accepts plain JSON schemas for the tool definition sent to the LLM.\n // We provide parameters as a TypeBox-like schema.\n parameters: {\n type: \"object\",\n ...tool.parameters,\n } as any,\n label: tool.name,\n execute: async (_toolCallId, params, _signal) => {\n const result = await tool.execute(params as Record<string, unknown>, getD1);\n return {\n content: [{ type: \"text\" as const, text: result }],\n details: {},\n };\n },\n };\n}\n\n/** Adapt all OrigenTools for an Agent instance. */\nexport function adaptTools(tools: OrigenTool[], getD1: D1Provider): AgentTool[] {\n return tools.map((t) => adaptTool(t, getD1));\n}\n\n// ── Model resolution ──────────────────────────────────────────────────\n\nexport interface ModelResolutionOptions {\n /** Ollama base URL, e.g. \"http://localhost:11434/v1\" */\n ollamaBaseUrl?: string;\n}\n\n/** Known Ollama models that don't exist in pi-ai's generated registry. */\nconst OLLAMA_MODELS: Record<string, Partial<Model<Api>>> = {\n \"ollama/llama3\": {\n id: \"llama3\",\n name: \"Llama 3 (Ollama)\",\n api: \"openai-completions\",\n provider: \"ollama\",\n baseUrl: \"http://localhost:11434/v1\",\n reasoning: false,\n input: [\"text\"],\n cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },\n contextWindow: 8192,\n maxTokens: 4096,\n },\n \"ollama/gemma3\": {\n id: \"gemma3\",\n name: \"Gemma 3 (Ollama)\",\n api: \"openai-completions\",\n provider: \"ollama\",\n baseUrl: \"http://localhost:11434/v1\",\n reasoning: false,\n input: [\"text\"],\n cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },\n contextWindow: 8192,\n maxTokens: 4096,\n },\n \"ollama/mistral\": {\n id: \"mistral\",\n name: \"Mistral (Ollama)\",\n api: \"openai-completions\",\n provider: \"ollama\",\n baseUrl: \"http://localhost:11434/v1\",\n reasoning: false,\n input: [\"text\"],\n cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },\n contextWindow: 32768,\n maxTokens: 4096,\n },\n \"ollama/qwen3\": {\n id: \"qwen3\",\n name: \"Qwen 3 (Ollama)\",\n api: \"openai-completions\",\n provider: \"ollama\",\n baseUrl: \"http://localhost:11434/v1\",\n reasoning: false,\n input: [\"text\"],\n cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },\n contextWindow: 32768,\n maxTokens: 4096,\n },\n \"ollama/deepseek-r1\": {\n id: \"deepseek-r1\",\n name: \"DeepSeek R1 (Ollama)\",\n api: \"openai-completions\",\n provider: \"ollama\",\n baseUrl: \"http://localhost:11434/v1\",\n reasoning: true,\n input: [\"text\"],\n cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },\n contextWindow: 65536,\n maxTokens: 8192,\n },\n};\n\nconst DEFAULT_MODEL: Model<Api> = {\n id: \"openrouter/free\",\n name: \"Free (Auto)\",\n api: \"openai-completions\",\n provider: \"openrouter\",\n baseUrl: \"https://openrouter.ai/api/v1\",\n reasoning: false,\n input: [\"text\"],\n cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },\n contextWindow: 128000,\n maxTokens: 4096,\n};\n\n/**\n * Resolve a model ID string to a pi-ai Model object.\n * Tries pi-ai's registry first, then falls back to built-in Ollama definitions.\n */\nexport function resolveModel(modelId: string, options?: ModelResolutionOptions): Model<Api> {\n // Try Ollama models first\n if (modelId.startsWith(\"ollama/\")) {\n const ollamaDef = OLLAMA_MODELS[modelId];\n if (ollamaDef) {\n const baseUrl = options?.ollamaBaseUrl ?? ollamaDef.baseUrl ?? \"http://localhost:11434/v1\";\n return {\n ...DEFAULT_MODEL,\n ...ollamaDef,\n baseUrl,\n compat: {\n supportsStore: false,\n supportsDeveloperRole: false,\n supportsReasoningEffort: false,\n supportsUsageInStreaming: false,\n maxTokensField: \"max_tokens\",\n requiresToolResultName: false,\n requiresAssistantAfterToolResult: false,\n requiresThinkingAsText: true,\n requiresReasoningContentOnAssistantMessages: false,\n thinkingFormat: \"openai\",\n supportsStrictMode: false,\n supportsLongCacheRetention: false,\n },\n } as Model<Api>;\n }\n // Generic Ollama model: user typed a custom model name\n const customId = modelId.replace(\"ollama/\", \"\");\n return {\n ...DEFAULT_MODEL,\n id: customId,\n name: `${customId} (Ollama)`,\n provider: \"ollama\",\n baseUrl: options?.ollamaBaseUrl ?? \"http://localhost:11434/v1\",\n compat: {\n supportsStore: false,\n supportsDeveloperRole: false,\n supportsReasoningEffort: false,\n supportsUsageInStreaming: false,\n maxTokensField: \"max_tokens\",\n requiresToolResultName: false,\n requiresAssistantAfterToolResult: false,\n requiresThinkingAsText: true,\n requiresReasoningContentOnAssistantMessages: false,\n thinkingFormat: \"openai\",\n supportsStrictMode: false,\n supportsLongCacheRetention: false,\n },\n } as Model<Api>;\n }\n\n // Try pi-ai's model registry (OpenRouter, Anthropic, Google, etc.)\n // pi-ai groups by provider, so we try known providers\n const providers = [\"openrouter\", \"anthropic\", \"google\", \"openai\", \"deepseek\", \"groq\", \"xai\"];\n for (const provider of providers) {\n try {\n const model = getModel(provider as any, modelId as any);\n if (model) return model as Model<Api>;\n } catch {\n // Not found in this provider, try next\n }\n }\n\n // Fallback: create a generic OpenRouter-compatible model\n return {\n ...DEFAULT_MODEL,\n id: modelId,\n name: modelId,\n };\n}\n\n// ── Message conversion ────────────────────────────────────────────────\n\n/** Convert Origen's simple messages to pi-ai Message format. */\nexport function convertMessages(\n messages: Array<{ role: \"user\" | \"assistant\"; content: string }>\n): Message[] {\n return messages.map((m) => ({\n role: m.role,\n content: m.content as any,\n timestamp: Date.now(),\n })) as Message[];\n}\n\n// ── Context builder ───────────────────────────────────────────────────\n\n/** Build a pi-ai Context from Origen's config. */\nexport function buildContext(\n systemPrompt: string,\n messages: Message[],\n adaptedTools: AgentTool[]\n): Context {\n return {\n systemPrompt,\n messages,\n tools: adaptedTools.map((t) => ({\n name: t.name,\n description: t.description,\n parameters: t.parameters,\n })),\n };\n}\n\n// ── Event translation ─────────────────────────────────────────────────\n\n/** Default citation extractor — [BOOK CHAPTER:VERSE] patterns. */\nfunction defaultCitationExtractor(text: string): Citation[] {\n const citations: Citation[] = [];\n const regex = /\\[([A-Z]{3})\\s+(\\d+):(\\d+)\\]/g;\n let match;\n while ((match = regex.exec(text)) !== null) {\n citations.push({ book: match[1], chapter: parseInt(match[2]), verse: parseInt(match[3]) });\n }\n return citations;\n}\n\n/** Translate a pi-agent-core AgentEvent into an Origen StreamEvent. */\nexport function translateEvent(\n event: AgentEvent,\n extractCitations?: (text: string) => Citation[]\n): StreamEvent | null {\n switch (event.type) {\n case \"message_update\": {\n const assistantEvent = event.assistantMessageEvent;\n if (assistantEvent.type === \"text_delta\") {\n return { type: \"text\" as const, content: assistantEvent.delta };\n }\n if (assistantEvent.type === \"thinking_delta\") {\n return { type: \"reasoning\" as const, content: assistantEvent.delta };\n }\n return null;\n }\n case \"tool_execution_start\": {\n return {\n type: \"tool_call\" as const,\n name: event.toolName,\n args: event.args as Record<string, unknown>,\n };\n }\n case \"tool_execution_end\": {\n const resultText = event.result?.content\n ?.filter((c: any) => c.type === \"text\")\n .map((c: any) => c.text)\n .join(\"\\n\") ?? \"\";\n return {\n type: \"tool_result\" as const,\n name: event.toolName,\n result: resultText,\n };\n }\n case \"agent_end\": {\n // Find the final assistant message\n const assistantMsg = event.messages\n .filter((m): m is any => m.role === \"assistant\")\n .pop();\n const text = assistantMsg?.content\n ?.filter((c: any) => c.type === \"text\")\n .map((c: any) => c.text)\n .join(\"\") ?? \"\";\n const usage: UsageInfo | undefined = assistantMsg?.usage\n ? {\n promptTokens: assistantMsg.usage.input,\n completionTokens: assistantMsg.usage.output,\n totalCost: assistantMsg.usage.cost?.total,\n }\n : undefined;\n const citFn = extractCitations ?? defaultCitationExtractor;\n // Check for error\n if (assistantMsg?.stopReason === \"error\" || assistantMsg?.stopReason === \"aborted\") {\n return {\n type: \"error\" as const,\n message: assistantMsg.errorMessage ?? \"Agent encountered an error\",\n };\n }\n return {\n type: \"done\" as const,\n message: text,\n citations: citFn(text),\n usage,\n };\n }\n default:\n return null;\n }\n}\n\n/**\n * Eagerly subscribe to an Agent and return an async iterable of Origen StreamEvents.\n *\n * CRITICAL: The subscription is created synchronously when this function is called,\n * BEFORE agent.prompt() starts. This avoids the race condition where events\n * emitted during prompt() are missed if subscription happens after.\n *\n * Usage:\n * const { stream, unsubscribe } = createEventStream(agent, extractCitations);\n * agent.prompt(messages); // events flow into stream via active subscription\n * for await (const event of stream) { ... }\n */\nexport function createEventStream(\n agent: any, // Agent from pi-agent-core\n extractCitations?: (text: string) => Citation[]\n): {\n stream: AsyncGenerator<StreamEvent>;\n unsubscribe: () => void;\n} {\n const queue: StreamEvent[] = [];\n let resolve: (() => void) | null = null;\n let done = false;\n\n // Subscribe IMMEDIATELY (before prompt is called)\n const unsubscribe = agent.subscribe((event: AgentEvent) => {\n const translated = translateEvent(event, extractCitations);\n if (translated) {\n queue.push(translated);\n if (resolve) {\n resolve();\n resolve = null;\n }\n }\n if (event.type === \"agent_end\") {\n done = true;\n if (resolve) {\n resolve();\n resolve = null;\n }\n }\n });\n\n async function* stream(): AsyncGenerator<StreamEvent> {\n try {\n while (!done || queue.length > 0) {\n if (queue.length > 0) {\n yield queue.shift()!;\n continue;\n }\n if (done) break;\n await new Promise<void>((r) => { resolve = r; });\n }\n } finally {\n unsubscribe();\n }\n }\n\n return { stream: stream(), unsubscribe };\n}\n\n/**\n * Subscribe to an Agent and yield Origen StreamEvents.\n * Handles the full lifecycle from agent_start to agent_end.\n *\n * @deprecated Use createEventStream() instead to avoid race conditions.\n * This function subscribes lazily (on first iteration) which can miss events\n * if the agent has already started emitting.\n */\nexport async function* agentToStreamEvents(\n agent: any,\n extractCitations?: (text: string) => Citation[]\n): AsyncGenerator<StreamEvent> {\n yield* createEventStream(agent, extractCitations).stream;\n}"],"mappings":";AAQA,SAAS,gBAAgB;AAYlB,SAAS,UAAU,MAAkB,OAA8B;AACxE,SAAO;AAAA,IACL,MAAM,KAAK;AAAA,IACX,aAAa,KAAK;AAAA;AAAA;AAAA;AAAA,IAIlB,YAAY;AAAA,MACV,MAAM;AAAA,MACN,GAAG,KAAK;AAAA,IACV;AAAA,IACA,OAAO,KAAK;AAAA,IACZ,SAAS,OAAO,aAAa,QAAQ,YAAY;AAC/C,YAAM,SAAS,MAAM,KAAK,QAAQ,QAAmC,KAAK;AAC1E,aAAO;AAAA,QACL,SAAS,CAAC,EAAE,MAAM,QAAiB,MAAM,OAAO,CAAC;AAAA,QACjD,SAAS,CAAC;AAAA,MACZ;AAAA,IACF;AAAA,EACF;AACF;AAGO,SAAS,WAAW,OAAqB,OAAgC;AAC9E,SAAO,MAAM,IAAI,CAAC,MAAM,UAAU,GAAG,KAAK,CAAC;AAC7C;AAUA,IAAM,gBAAqD;AAAA,EACzD,iBAAiB;AAAA,IACf,IAAI;AAAA,IACJ,MAAM;AAAA,IACN,KAAK;AAAA,IACL,UAAU;AAAA,IACV,SAAS;AAAA,IACT,WAAW;AAAA,IACX,OAAO,CAAC,MAAM;AAAA,IACd,MAAM,EAAE,OAAO,GAAG,QAAQ,GAAG,WAAW,GAAG,YAAY,EAAE;AAAA,IACzD,eAAe;AAAA,IACf,WAAW;AAAA,EACb;AAAA,EACA,iBAAiB;AAAA,IACf,IAAI;AAAA,IACJ,MAAM;AAAA,IACN,KAAK;AAAA,IACL,UAAU;AAAA,IACV,SAAS;AAAA,IACT,WAAW;AAAA,IACX,OAAO,CAAC,MAAM;AAAA,IACd,MAAM,EAAE,OAAO,GAAG,QAAQ,GAAG,WAAW,GAAG,YAAY,EAAE;AAAA,IACzD,eAAe;AAAA,IACf,WAAW;AAAA,EACb;AAAA,EACA,kBAAkB;AAAA,IAChB,IAAI;AAAA,IACJ,MAAM;AAAA,IACN,KAAK;AAAA,IACL,UAAU;AAAA,IACV,SAAS;AAAA,IACT,WAAW;AAAA,IACX,OAAO,CAAC,MAAM;AAAA,IACd,MAAM,EAAE,OAAO,GAAG,QAAQ,GAAG,WAAW,GAAG,YAAY,EAAE;AAAA,IACzD,eAAe;AAAA,IACf,WAAW;AAAA,EACb;AAAA,EACA,gBAAgB;AAAA,IACd,IAAI;AAAA,IACJ,MAAM;AAAA,IACN,KAAK;AAAA,IACL,UAAU;AAAA,IACV,SAAS;AAAA,IACT,WAAW;AAAA,IACX,OAAO,CAAC,MAAM;AAAA,IACd,MAAM,EAAE,OAAO,GAAG,QAAQ,GAAG,WAAW,GAAG,YAAY,EAAE;AAAA,IACzD,eAAe;AAAA,IACf,WAAW;AAAA,EACb;AAAA,EACA,sBAAsB;AAAA,IACpB,IAAI;AAAA,IACJ,MAAM;AAAA,IACN,KAAK;AAAA,IACL,UAAU;AAAA,IACV,SAAS;AAAA,IACT,WAAW;AAAA,IACX,OAAO,CAAC,MAAM;AAAA,IACd,MAAM,EAAE,OAAO,GAAG,QAAQ,GAAG,WAAW,GAAG,YAAY,EAAE;AAAA,IACzD,eAAe;AAAA,IACf,WAAW;AAAA,EACb;AACF;AAEA,IAAM,gBAA4B;AAAA,EAChC,IAAI;AAAA,EACJ,MAAM;AAAA,EACN,KAAK;AAAA,EACL,UAAU;AAAA,EACV,SAAS;AAAA,EACT,WAAW;AAAA,EACX,OAAO,CAAC,MAAM;AAAA,EACd,MAAM,EAAE,OAAO,GAAG,QAAQ,GAAG,WAAW,GAAG,YAAY,EAAE;AAAA,EACzD,eAAe;AAAA,EACf,WAAW;AACb;AAMO,SAAS,aAAa,SAAiB,SAA8C;AAE1F,MAAI,QAAQ,WAAW,SAAS,GAAG;AACjC,UAAM,YAAY,cAAc,OAAO;AACvC,QAAI,WAAW;AACb,YAAM,UAAU,SAAS,iBAAiB,UAAU,WAAW;AAC/D,aAAO;AAAA,QACL,GAAG;AAAA,QACH,GAAG;AAAA,QACH;AAAA,QACA,QAAQ;AAAA,UACN,eAAe;AAAA,UACf,uBAAuB;AAAA,UACvB,yBAAyB;AAAA,UACzB,0BAA0B;AAAA,UAC1B,gBAAgB;AAAA,UAChB,wBAAwB;AAAA,UACxB,kCAAkC;AAAA,UAClC,wBAAwB;AAAA,UACxB,6CAA6C;AAAA,UAC7C,gBAAgB;AAAA,UAChB,oBAAoB;AAAA,UACpB,4BAA4B;AAAA,QAC9B;AAAA,MACF;AAAA,IACF;AAEA,UAAM,WAAW,QAAQ,QAAQ,WAAW,EAAE;AAC9C,WAAO;AAAA,MACL,GAAG;AAAA,MACH,IAAI;AAAA,MACJ,MAAM,GAAG,QAAQ;AAAA,MACjB,UAAU;AAAA,MACV,SAAS,SAAS,iBAAiB;AAAA,MACnC,QAAQ;AAAA,QACN,eAAe;AAAA,QACf,uBAAuB;AAAA,QACvB,yBAAyB;AAAA,QACzB,0BAA0B;AAAA,QAC1B,gBAAgB;AAAA,QAChB,wBAAwB;AAAA,QACxB,kCAAkC;AAAA,QAClC,wBAAwB;AAAA,QACxB,6CAA6C;AAAA,QAC7C,gBAAgB;AAAA,QAChB,oBAAoB;AAAA,QACpB,4BAA4B;AAAA,MAC9B;AAAA,IACF;AAAA,EACF;AAIA,QAAM,YAAY,CAAC,cAAc,aAAa,UAAU,UAAU,YAAY,QAAQ,KAAK;AAC3F,aAAW,YAAY,WAAW;AAChC,QAAI;AACF,YAAM,QAAQ,SAAS,UAAiB,OAAc;AACtD,UAAI,MAAO,QAAO;AAAA,IACpB,QAAQ;AAAA,IAER;AAAA,EACF;AAGA,SAAO;AAAA,IACL,GAAG;AAAA,IACH,IAAI;AAAA,IACJ,MAAM;AAAA,EACR;AACF;AAKO,SAAS,gBACd,UACW;AACX,SAAO,SAAS,IAAI,CAAC,OAAO;AAAA,IAC1B,MAAM,EAAE;AAAA,IACR,SAAS,EAAE;AAAA,IACX,WAAW,KAAK,IAAI;AAAA,EACtB,EAAE;AACJ;AAKO,SAAS,aACd,cACA,UACA,cACS;AACT,SAAO;AAAA,IACL;AAAA,IACA;AAAA,IACA,OAAO,aAAa,IAAI,CAAC,OAAO;AAAA,MAC9B,MAAM,EAAE;AAAA,MACR,aAAa,EAAE;AAAA,MACf,YAAY,EAAE;AAAA,IAChB,EAAE;AAAA,EACJ;AACF;AAKA,SAAS,yBAAyB,MAA0B;AAC1D,QAAM,YAAwB,CAAC;AAC/B,QAAM,QAAQ;AACd,MAAI;AACJ,UAAQ,QAAQ,MAAM,KAAK,IAAI,OAAO,MAAM;AAC1C,cAAU,KAAK,EAAE,MAAM,MAAM,CAAC,GAAG,SAAS,SAAS,MAAM,CAAC,CAAC,GAAG,OAAO,SAAS,MAAM,CAAC,CAAC,EAAE,CAAC;AAAA,EAC3F;AACA,SAAO;AACT;AAGO,SAAS,eACd,OACA,kBACoB;AACpB,UAAQ,MAAM,MAAM;AAAA,IAClB,KAAK,kBAAkB;AACrB,YAAM,iBAAiB,MAAM;AAC7B,UAAI,eAAe,SAAS,cAAc;AACxC,eAAO,EAAE,MAAM,QAAiB,SAAS,eAAe,MAAM;AAAA,MAChE;AACA,UAAI,eAAe,SAAS,kBAAkB;AAC5C,eAAO,EAAE,MAAM,aAAsB,SAAS,eAAe,MAAM;AAAA,MACrE;AACA,aAAO;AAAA,IACT;AAAA,IACA,KAAK,wBAAwB;AAC3B,aAAO;AAAA,QACL,MAAM;AAAA,QACN,MAAM,MAAM;AAAA,QACZ,MAAM,MAAM;AAAA,MACd;AAAA,IACF;AAAA,IACA,KAAK,sBAAsB;AACzB,YAAM,aAAa,MAAM,QAAQ,SAC7B,OAAO,CAAC,MAAW,EAAE,SAAS,MAAM,EACrC,IAAI,CAAC,MAAW,EAAE,IAAI,EACtB,KAAK,IAAI,KAAK;AACjB,aAAO;AAAA,QACL,MAAM;AAAA,QACN,MAAM,MAAM;AAAA,QACZ,QAAQ;AAAA,MACV;AAAA,IACF;AAAA,IACA,KAAK,aAAa;AAEhB,YAAM,eAAe,MAAM,SACxB,OAAO,CAAC,MAAgB,EAAE,SAAS,WAAW,EAC9C,IAAI;AACP,YAAM,OAAO,cAAc,SACvB,OAAO,CAAC,MAAW,EAAE,SAAS,MAAM,EACrC,IAAI,CAAC,MAAW,EAAE,IAAI,EACtB,KAAK,EAAE,KAAK;AACf,YAAM,QAA+B,cAAc,QAC/C;AAAA,QACE,cAAc,aAAa,MAAM;AAAA,QACjC,kBAAkB,aAAa,MAAM;AAAA,QACrC,WAAW,aAAa,MAAM,MAAM;AAAA,MACtC,IACA;AACJ,YAAM,QAAQ,oBAAoB;AAElC,UAAI,cAAc,eAAe,WAAW,cAAc,eAAe,WAAW;AAClF,eAAO;AAAA,UACL,MAAM;AAAA,UACN,SAAS,aAAa,gBAAgB;AAAA,QACxC;AAAA,MACF;AACA,aAAO;AAAA,QACL,MAAM;AAAA,QACN,SAAS;AAAA,QACT,WAAW,MAAM,IAAI;AAAA,QACrB;AAAA,MACF;AAAA,IACF;AAAA,IACA;AACE,aAAO;AAAA,EACX;AACF;AAcO,SAAS,kBACd,OACA,kBAIA;AACA,QAAM,QAAuB,CAAC;AAC9B,MAAI,UAA+B;AACnC,MAAI,OAAO;AAGX,QAAM,cAAc,MAAM,UAAU,CAAC,UAAsB;AACzD,UAAM,aAAa,eAAe,OAAO,gBAAgB;AACzD,QAAI,YAAY;AACd,YAAM,KAAK,UAAU;AACrB,UAAI,SAAS;AACX,gBAAQ;AACR,kBAAU;AAAA,MACZ;AAAA,IACF;AACA,QAAI,MAAM,SAAS,aAAa;AAC9B,aAAO;AACP,UAAI,SAAS;AACX,gBAAQ;AACR,kBAAU;AAAA,MACZ;AAAA,IACF;AAAA,EACF,CAAC;AAED,kBAAgB,SAAsC;AACpD,QAAI;AACF,aAAO,CAAC,QAAQ,MAAM,SAAS,GAAG;AAChC,YAAI,MAAM,SAAS,GAAG;AACpB,gBAAM,MAAM,MAAM;AAClB;AAAA,QACF;AACA,YAAI,KAAM;AACV,cAAM,IAAI,QAAc,CAAC,MAAM;AAAE,oBAAU;AAAA,QAAG,CAAC;AAAA,MACjD;AAAA,IACF,UAAE;AACA,kBAAY;AAAA,IACd;AAAA,EACF;AAEA,SAAO,EAAE,QAAQ,OAAO,GAAG,YAAY;AACzC;AAUA,gBAAuB,oBACrB,OACA,kBAC6B;AAC7B,SAAO,kBAAkB,OAAO,gBAAgB,EAAE;AACpD;","names":[]}
|