@moikapy/origen 0.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json ADDED
@@ -0,0 +1,27 @@
1
+ {
2
+ "name": "@moikapy/origen",
3
+ "version": "0.3.0",
4
+ "type": "module",
5
+ "main": "./src/index.ts",
6
+ "types": "./src/index.ts",
7
+ "exports": {
8
+ ".": "./src/index.ts",
9
+ "./models": "./src/models.ts",
10
+ "./soul": "./src/soul.ts",
11
+ "./adapter": "./src/adapter.ts"
12
+ },
13
+ "scripts": {
14
+ "test": "vitest run",
15
+ "typecheck": "tsc --noEmit"
16
+ },
17
+ "dependencies": {
18
+ "@mariozechner/pi-agent-core": "^0.73.0",
19
+ "@mariozechner/pi-ai": "^0.73.0",
20
+ "zod": "^4.4.2"
21
+ },
22
+ "devDependencies": {
23
+ "typescript": "^5",
24
+ "vitest": "^3"
25
+ },
26
+ "license": "MIT"
27
+ }
package/src/adapter.ts ADDED
@@ -0,0 +1,364 @@
1
+ /**
2
+ * Adapter: bridges Origen's simple types to pi-agent-core/pi-ai types.
3
+ *
4
+ * - OrigenTool → AgentTool (injects D1Provider)
5
+ * - pi-ai Model resolution (OpenRouter, Ollama, Anthropic, Google)
6
+ * - StreamEvent translation (AgentEvent → Origen's StreamEvent)
7
+ */
8
+
9
+ import { getModel } from "@mariozechner/pi-ai";
10
+ import type { Model, Api, Message, Context, Tool } from "@mariozechner/pi-ai";
11
+ import type { AgentTool, AgentEvent, AgentMessage } from "@mariozechner/pi-agent-core";
12
+ import type { OrigenTool, StreamEvent } from "./agent";
13
+ import type { D1Provider, Citation, UsageInfo } from "./types";
14
+
15
+ // ── Tool adapter ─────────────────────────────────────────────────────
16
+
17
+ /**
18
+ * Convert an OrigenTool into a pi-agent-core AgentTool.
19
+ * The D1Provider is captured in closure so the tool's execute gets it.
20
+ */
21
+ export function adaptTool(tool: OrigenTool, getD1: D1Provider): AgentTool {
22
+ return {
23
+ name: tool.name,
24
+ description: tool.description,
25
+ // Convert JSON schema to TypeBox format — pi-agent-core uses TypeBox
26
+ // but accepts plain JSON schemas for the tool definition sent to the LLM.
27
+ // We provide parameters as a TypeBox-like schema.
28
+ parameters: {
29
+ type: "object",
30
+ ...tool.parameters,
31
+ } as any,
32
+ label: tool.name,
33
+ execute: async (_toolCallId, params, _signal) => {
34
+ const result = await tool.execute(params as Record<string, unknown>, getD1);
35
+ return {
36
+ content: [{ type: "text" as const, text: result }],
37
+ details: {},
38
+ };
39
+ },
40
+ };
41
+ }
42
+
43
+ /** Adapt all OrigenTools for an Agent instance. */
44
+ export function adaptTools(tools: OrigenTool[], getD1: D1Provider): AgentTool[] {
45
+ return tools.map((t) => adaptTool(t, getD1));
46
+ }
47
+
48
+ // ── Model resolution ──────────────────────────────────────────────────
49
+
50
+ export interface ModelResolutionOptions {
51
+ /** Ollama base URL, e.g. "http://localhost:11434/v1" */
52
+ ollamaBaseUrl?: string;
53
+ }
54
+
55
+ /** Known Ollama models that don't exist in pi-ai's generated registry. */
56
+ const OLLAMA_MODELS: Record<string, Partial<Model<Api>>> = {
57
+ "ollama/llama3": {
58
+ id: "llama3",
59
+ name: "Llama 3 (Ollama)",
60
+ api: "openai-completions",
61
+ provider: "ollama",
62
+ baseUrl: "http://localhost:11434/v1",
63
+ reasoning: false,
64
+ input: ["text"],
65
+ cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
66
+ contextWindow: 8192,
67
+ maxTokens: 4096,
68
+ },
69
+ "ollama/gemma3": {
70
+ id: "gemma3",
71
+ name: "Gemma 3 (Ollama)",
72
+ api: "openai-completions",
73
+ provider: "ollama",
74
+ baseUrl: "http://localhost:11434/v1",
75
+ reasoning: false,
76
+ input: ["text"],
77
+ cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
78
+ contextWindow: 8192,
79
+ maxTokens: 4096,
80
+ },
81
+ "ollama/mistral": {
82
+ id: "mistral",
83
+ name: "Mistral (Ollama)",
84
+ api: "openai-completions",
85
+ provider: "ollama",
86
+ baseUrl: "http://localhost:11434/v1",
87
+ reasoning: false,
88
+ input: ["text"],
89
+ cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
90
+ contextWindow: 32768,
91
+ maxTokens: 4096,
92
+ },
93
+ "ollama/qwen3": {
94
+ id: "qwen3",
95
+ name: "Qwen 3 (Ollama)",
96
+ api: "openai-completions",
97
+ provider: "ollama",
98
+ baseUrl: "http://localhost:11434/v1",
99
+ reasoning: false,
100
+ input: ["text"],
101
+ cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
102
+ contextWindow: 32768,
103
+ maxTokens: 4096,
104
+ },
105
+ "ollama/deepseek-r1": {
106
+ id: "deepseek-r1",
107
+ name: "DeepSeek R1 (Ollama)",
108
+ api: "openai-completions",
109
+ provider: "ollama",
110
+ baseUrl: "http://localhost:11434/v1",
111
+ reasoning: true,
112
+ input: ["text"],
113
+ cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
114
+ contextWindow: 65536,
115
+ maxTokens: 8192,
116
+ },
117
+ };
118
+
119
+ const DEFAULT_MODEL: Model<Api> = {
120
+ id: "openrouter/free",
121
+ name: "Free (Auto)",
122
+ api: "openai-completions",
123
+ provider: "openrouter",
124
+ baseUrl: "https://openrouter.ai/api/v1",
125
+ reasoning: false,
126
+ input: ["text"],
127
+ cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
128
+ contextWindow: 128000,
129
+ maxTokens: 4096,
130
+ };
131
+
132
+ /**
133
+ * Resolve a model ID string to a pi-ai Model object.
134
+ * Tries pi-ai's registry first, then falls back to built-in Ollama definitions.
135
+ */
136
+ export function resolveModel(modelId: string, options?: ModelResolutionOptions): Model<Api> {
137
+ // Try Ollama models first
138
+ if (modelId.startsWith("ollama/")) {
139
+ const ollamaDef = OLLAMA_MODELS[modelId];
140
+ if (ollamaDef) {
141
+ const baseUrl = options?.ollamaBaseUrl ?? ollamaDef.baseUrl ?? "http://localhost:11434/v1";
142
+ return {
143
+ ...DEFAULT_MODEL,
144
+ ...ollamaDef,
145
+ baseUrl,
146
+ compat: {
147
+ supportsStore: false,
148
+ supportsDeveloperRole: false,
149
+ supportsReasoningEffort: false,
150
+ supportsUsageInStreaming: false,
151
+ maxTokensField: "max_tokens",
152
+ requiresToolResultName: false,
153
+ requiresAssistantAfterToolResult: false,
154
+ requiresThinkingAsText: true,
155
+ requiresReasoningContentOnAssistantMessages: false,
156
+ thinkingFormat: "openai",
157
+ supportsStrictMode: false,
158
+ supportsLongCacheRetention: false,
159
+ },
160
+ } as Model<Api>;
161
+ }
162
+ // Generic Ollama model: user typed a custom model name
163
+ const customId = modelId.replace("ollama/", "");
164
+ return {
165
+ ...DEFAULT_MODEL,
166
+ id: customId,
167
+ name: `${customId} (Ollama)`,
168
+ provider: "ollama",
169
+ baseUrl: options?.ollamaBaseUrl ?? "http://localhost:11434/v1",
170
+ compat: {
171
+ supportsStore: false,
172
+ supportsDeveloperRole: false,
173
+ supportsReasoningEffort: false,
174
+ supportsUsageInStreaming: false,
175
+ maxTokensField: "max_tokens",
176
+ requiresToolResultName: false,
177
+ requiresAssistantAfterToolResult: false,
178
+ requiresThinkingAsText: true,
179
+ requiresReasoningContentOnAssistantMessages: false,
180
+ thinkingFormat: "openai",
181
+ supportsStrictMode: false,
182
+ supportsLongCacheRetention: false,
183
+ },
184
+ } as Model<Api>;
185
+ }
186
+
187
+ // Try pi-ai's model registry (OpenRouter, Anthropic, Google, etc.)
188
+ // pi-ai groups by provider, so we try known providers
189
+ const providers = ["openrouter", "anthropic", "google", "openai", "deepseek", "groq", "xai"];
190
+ for (const provider of providers) {
191
+ try {
192
+ const model = getModel(provider as any, modelId as any);
193
+ if (model) return model as Model<Api>;
194
+ } catch {
195
+ // Not found in this provider, try next
196
+ }
197
+ }
198
+
199
+ // Fallback: create a generic OpenRouter-compatible model
200
+ return {
201
+ ...DEFAULT_MODEL,
202
+ id: modelId,
203
+ name: modelId,
204
+ };
205
+ }
206
+
207
+ // ── Message conversion ────────────────────────────────────────────────
208
+
209
+ /** Convert Origen's simple messages to pi-ai Message format. */
210
+ export function convertMessages(
211
+ messages: Array<{ role: "user" | "assistant"; content: string }>
212
+ ): Message[] {
213
+ return messages.map((m) => ({
214
+ role: m.role,
215
+ content: m.content as any,
216
+ timestamp: Date.now(),
217
+ })) as Message[];
218
+ }
219
+
220
+ // ── Context builder ───────────────────────────────────────────────────
221
+
222
+ /** Build a pi-ai Context from Origen's config. */
223
+ export function buildContext(
224
+ systemPrompt: string,
225
+ messages: Message[],
226
+ adaptedTools: AgentTool[]
227
+ ): Context {
228
+ return {
229
+ systemPrompt,
230
+ messages,
231
+ tools: adaptedTools.map((t) => ({
232
+ name: t.name,
233
+ description: t.description,
234
+ parameters: t.parameters,
235
+ })),
236
+ };
237
+ }
238
+
239
+ // ── Event translation ─────────────────────────────────────────────────
240
+
241
+ /** Default citation extractor — [BOOK CHAPTER:VERSE] patterns. */
242
+ function defaultCitationExtractor(text: string): Citation[] {
243
+ const citations: Citation[] = [];
244
+ const regex = /\[([A-Z]{3})\s+(\d+):(\d+)\]/g;
245
+ let match;
246
+ while ((match = regex.exec(text)) !== null) {
247
+ citations.push({ book: match[1], chapter: parseInt(match[2]), verse: parseInt(match[3]) });
248
+ }
249
+ return citations;
250
+ }
251
+
252
+ /** Translate a pi-agent-core AgentEvent into an Origen StreamEvent. */
253
+ export function translateEvent(
254
+ event: AgentEvent,
255
+ extractCitations?: (text: string) => Citation[]
256
+ ): StreamEvent | null {
257
+ switch (event.type) {
258
+ case "message_update": {
259
+ const assistantEvent = event.assistantMessageEvent;
260
+ if (assistantEvent.type === "text_delta") {
261
+ return { type: "text" as const, content: assistantEvent.delta };
262
+ }
263
+ if (assistantEvent.type === "thinking_delta") {
264
+ return { type: "reasoning" as const, content: assistantEvent.delta };
265
+ }
266
+ return null;
267
+ }
268
+ case "tool_execution_start": {
269
+ return {
270
+ type: "tool_call" as const,
271
+ name: event.toolName,
272
+ args: event.args as Record<string, unknown>,
273
+ };
274
+ }
275
+ case "tool_execution_end": {
276
+ const resultText = event.result?.content
277
+ ?.filter((c: any) => c.type === "text")
278
+ .map((c: any) => c.text)
279
+ .join("\n") ?? "";
280
+ return {
281
+ type: "tool_result" as const,
282
+ name: event.toolName,
283
+ result: resultText,
284
+ };
285
+ }
286
+ case "agent_end": {
287
+ // Find the final assistant message
288
+ const assistantMsg = event.messages
289
+ .filter((m): m is any => m.role === "assistant")
290
+ .pop();
291
+ const text = assistantMsg?.content
292
+ ?.filter((c: any) => c.type === "text")
293
+ .map((c: any) => c.text)
294
+ .join("") ?? "";
295
+ const usage: UsageInfo | undefined = assistantMsg?.usage
296
+ ? {
297
+ promptTokens: assistantMsg.usage.input,
298
+ completionTokens: assistantMsg.usage.output,
299
+ totalCost: assistantMsg.usage.cost?.total,
300
+ }
301
+ : undefined;
302
+ const citFn = extractCitations ?? defaultCitationExtractor;
303
+ // Check for error
304
+ if (assistantMsg?.stopReason === "error" || assistantMsg?.stopReason === "aborted") {
305
+ return {
306
+ type: "error" as const,
307
+ message: assistantMsg.errorMessage ?? "Agent encountered an error",
308
+ };
309
+ }
310
+ return {
311
+ type: "done" as const,
312
+ message: text,
313
+ citations: citFn(text),
314
+ usage,
315
+ };
316
+ }
317
+ default:
318
+ return null;
319
+ }
320
+ }
321
+
322
+ /**
323
+ * Subscribe to an Agent and yield Origen StreamEvents.
324
+ * Handles the full lifecycle from agent_start to agent_end.
325
+ */
326
+ export async function* agentToStreamEvents(
327
+ agent: any, // Agent from pi-agent-core
328
+ extractCitations?: (text: string) => Citation[]
329
+ ): AsyncGenerator<StreamEvent> {
330
+ const queue: StreamEvent[] = [];
331
+ let resolve: (() => void) | null = null;
332
+ let done = false;
333
+
334
+ const unsubscribe = agent.subscribe((event: AgentEvent) => {
335
+ const translated = translateEvent(event, extractCitations);
336
+ if (translated) {
337
+ queue.push(translated);
338
+ if (resolve) {
339
+ resolve();
340
+ resolve = null;
341
+ }
342
+ }
343
+ if (event.type === "agent_end") {
344
+ done = true;
345
+ if (resolve) {
346
+ resolve();
347
+ resolve = null;
348
+ }
349
+ }
350
+ });
351
+
352
+ try {
353
+ while (!done || queue.length > 0) {
354
+ if (queue.length > 0) {
355
+ yield queue.shift()!;
356
+ continue;
357
+ }
358
+ if (done) break;
359
+ await new Promise<void>((r) => { resolve = r; });
360
+ }
361
+ } finally {
362
+ unsubscribe();
363
+ }
364
+ }
package/src/agent.ts ADDED
@@ -0,0 +1,219 @@
1
+ /**
2
+ * Origen — Agent Engine (v0.3)
3
+ *
4
+ * Multi-provider agent harness built on pi-ai + pi-agent-core.
5
+ * Supports OpenRouter, Ollama, Anthropic, Google, and any OpenAI-compatible API.
6
+ * Soul.md personas, streaming, parallel tool execution, abort support.
7
+ */
8
+
9
+ import { Agent } from "@mariozechner/pi-agent-core";
10
+ import { streamSimple } from "@mariozechner/pi-ai";
11
+ import type { AgentEvent } from "@mariozechner/pi-agent-core";
12
+ import { z } from "zod";
13
+ import {
14
+ adaptTools,
15
+ convertMessages,
16
+ buildContext,
17
+ agentToStreamEvents,
18
+ resolveModel,
19
+ } from "./adapter";
20
+ import { DEFAULT_MODEL_ID, THINKING_MODELS, type ModelId } from "./models";
21
+ import type { D1Provider, Citation, UsageInfo } from "./types";
22
+
23
+ // ── Tool definition ───────────────────────────────────────────────────
24
+
25
+ /**
26
+ * A tool that the host app registers with Origen.
27
+ * Simple interface: name, description, JSON schema, and an execute function
28
+ * that receives (args, getD1). The adapter wraps this into pi-agent-core's AgentTool.
29
+ */
30
+ export interface OrigenTool {
31
+ name: string;
32
+ description: string;
33
+ /** OpenAI function-calling parameter schema (JSON) */
34
+ parameters: Record<string, unknown>;
35
+ /** Zod schema for runtime validation (optional) */
36
+ inputSchema?: z.ZodType;
37
+ execute: (args: Record<string, unknown>, getD1: D1Provider) => Promise<string>;
38
+ }
39
+
40
+ // ── Agent configuration ───────────────────────────────────────────────
41
+
42
+ export interface AgentConfig {
43
+ appName?: string;
44
+ systemPrompt?: string;
45
+ tools: OrigenTool[];
46
+ getD1: D1Provider;
47
+ model?: ModelId;
48
+ maxSteps?: number;
49
+ /** Custom citation extractor */
50
+ extractCitations?: (text: string) => Citation[];
51
+ /** Dynamic API key resolution per provider (e.g., for expiring OAuth tokens) */
52
+ getApiKey?: (provider: string) => Promise<string | undefined>;
53
+ /** Ollama base URL override (default: http://localhost:11434/v1) */
54
+ ollamaBaseUrl?: string;
55
+ /** Tool execution mode: "parallel" (default) or "sequential" */
56
+ toolExecution?: "sequential" | "parallel";
57
+ /** Abort signal for cancellation */
58
+ signal?: AbortSignal;
59
+ /** Reasoning/thinking level for models that support it */
60
+ thinkingLevel?: "off" | "minimal" | "low" | "medium" | "high";
61
+ }
62
+
63
+ // ── Auth check ────────────────────────────────────────
64
+
65
+ export interface AuthCheckResult {
66
+ authenticated: boolean;
67
+ apiKey: string | null;
68
+ provider?: string;
69
+ error?: string;
70
+ }
71
+
72
+ /**
73
+ * Provider-aware auth check. Tests key availability for each provider.
74
+ * If no provider argument, checks OpenRouter + Ollama availability.
75
+ */
76
+ export async function checkAuth(
77
+ getApiKey: ((provider: string) => Promise<string | undefined>) | (() => Promise<string | null>),
78
+ ): Promise<AuthCheckResult> {
79
+ // Normalize to per-provider signature
80
+ const getProviderKey = getApiKey.length >= 1
81
+ ? getApiKey as (provider: string) => Promise<string | undefined>
82
+ : async (provider: string) => {
83
+ const key = await (getApiKey as () => Promise<string | null>)();
84
+ return key ?? undefined;
85
+ };
86
+
87
+ // Try OpenRouter first
88
+ const orKey = await getProviderKey("openrouter");
89
+ if (orKey) return { authenticated: true, apiKey: orKey, provider: "openrouter" };
90
+
91
+ // Try Ollama
92
+ const ollamaKey = await getProviderKey("ollama");
93
+ if (ollamaKey) return { authenticated: true, apiKey: ollamaKey, provider: "ollama" };
94
+
95
+ // Try Anthropic
96
+ const anthropicKey = await getProviderKey("anthropic");
97
+ if (anthropicKey) return { authenticated: true, apiKey: anthropicKey, provider: "anthropic" };
98
+
99
+ return {
100
+ authenticated: false,
101
+ apiKey: null,
102
+ error: "Connect your OpenRouter account or configure Ollama to enable AI-powered study.",
103
+ };
104
+ }
105
+
106
+ /** Convenience: check OpenRouter auth only (backward compat). */
107
+ export async function checkOpenRouterAuth(
108
+ getApiKey: () => Promise<string | null>
109
+ ): Promise<AuthCheckResult> {
110
+ const apiKey = await getApiKey();
111
+ if (!apiKey) {
112
+ return { authenticated: false, apiKey: null, error: "Connect your OpenRouter account to enable AI-powered study." };
113
+ }
114
+ return { authenticated: true, apiKey, provider: "openrouter" };
115
+ }
116
+
117
+ // ── Stream event types ─────────────────────────────────────────────────
118
+
119
+ export type StreamEvent =
120
+ | { type: "reasoning"; content: string }
121
+ | { type: "tool_call"; name: string; args: Record<string, unknown> }
122
+ | { type: "tool_result"; name: string; result: string }
123
+ | { type: "text"; content: string }
124
+ | { type: "done"; message: string; citations: Citation[]; usage?: UsageInfo }
125
+ | { type: "error"; message: string };
126
+
127
+ // ── Streaming agent call ───────────────────────────────────────────────
128
+
129
+ export async function* streamOrigen(
130
+ messages: Array<{ role: "user" | "assistant"; content: string }>,
131
+ context: Record<string, unknown> | undefined,
132
+ config: AgentConfig,
133
+ apiKey?: string,
134
+ ): AsyncGenerator<StreamEvent> {
135
+ const systemPrompt = config.systemPrompt ?? `You are ${config.appName ?? "Origen"}, an AI assistant. Use your tools to help the user.`;
136
+ const modelId = config.model ?? DEFAULT_MODEL_ID;
137
+ const maxSteps = config.maxSteps ?? 5;
138
+ const extractCitations = config.extractCitations;
139
+
140
+ // Resolve model to pi-ai Model object
141
+ const model = resolveModel(modelId, { ollamaBaseUrl: config.ollamaBaseUrl });
142
+
143
+ // Adapt tools to AgentTool format
144
+ const adaptedTools = adaptTools(config.tools, config.getD1);
145
+
146
+ // Convert messages
147
+ let piMessages = convertMessages(messages);
148
+
149
+ // Inject context into last user message
150
+ if (context && piMessages.length > 0) {
151
+ const lastIdx = piMessages.length - 1;
152
+ const lastMsg = piMessages[lastIdx];
153
+ if (lastMsg.role === "user") {
154
+ piMessages[lastIdx] = {
155
+ ...lastMsg,
156
+ content: `[Context: ${JSON.stringify(context)}] ${typeof lastMsg.content === "string" ? lastMsg.content : ""}`,
157
+ };
158
+ }
159
+ }
160
+
161
+ // Resolve API key per provider
162
+ const resolveApiKey = async (provider: string): Promise<string | undefined> => {
163
+ if (config.getApiKey) return config.getApiKey(provider);
164
+ if (apiKey) return apiKey;
165
+ return undefined;
166
+ };
167
+
168
+ // Create Agent
169
+ const agent = new Agent({
170
+ initialState: {
171
+ systemPrompt,
172
+ model,
173
+ thinkingLevel: config.thinkingLevel ?? (THINKING_MODELS.has(modelId) ? "medium" : "off"),
174
+ tools: adaptedTools,
175
+ messages: piMessages as any,
176
+ },
177
+ getApiKey: resolveApiKey,
178
+ toolExecution: config.toolExecution ?? "parallel",
179
+ });
180
+
181
+ // Stream events
182
+ try {
183
+ await agent.prompt(piMessages as any);
184
+
185
+ yield* agentToStreamEvents(agent, extractCitations);
186
+ } catch (error) {
187
+ const msg = error instanceof Error ? error.message : String(error);
188
+ yield { type: "error", message: `Agent error: ${msg}` };
189
+ }
190
+ }
191
+
192
+ // ── Non-streaming agent call ──────────────────────────────────────────
193
+
194
+ export interface AgentResponse {
195
+ message: string;
196
+ citations: Citation[];
197
+ usage?: UsageInfo;
198
+ }
199
+
200
+ export async function callOrigen(
201
+ messages: Array<{ role: "user" | "assistant"; content: string }>,
202
+ context: Record<string, unknown> | undefined,
203
+ config: AgentConfig,
204
+ apiKey?: string,
205
+ ): Promise<AgentResponse> {
206
+ let message = "";
207
+ const citations: Citation[] = [];
208
+ let usage: UsageInfo | undefined;
209
+
210
+ for await (const event of streamOrigen(messages, context, config, apiKey)) {
211
+ switch (event.type) {
212
+ case "text": message += event.content; break;
213
+ case "done": citations.push(...event.citations); usage = event.usage; break;
214
+ case "error": throw new Error(event.message);
215
+ }
216
+ }
217
+
218
+ return { message, citations, usage };
219
+ }
package/src/index.ts ADDED
@@ -0,0 +1,58 @@
1
+ /**
2
+ * @moikapy/origen — Multi-Provider Agent Engine
3
+ *
4
+ * Generic agent harness with Soul.md personas, streaming, tool calling.
5
+ * Supports OpenRouter, Ollama, Anthropic, Google, and any OpenAI-compatible API.
6
+ *
7
+ * Domain-specific tools live in their own packages (e.g., @moikapy/scholar-tools).
8
+ *
9
+ * Usage:
10
+ * import { streamOrigen } from "@moikapy/origen";
11
+ * import { allBibleTools, buildScholarPrompt } from "@moikapy/scholar-tools";
12
+ *
13
+ * const config = {
14
+ * systemPrompt: buildScholarPrompt(),
15
+ * tools: allBibleTools(),
16
+ * getD1: async () => myD1,
17
+ * model: "openrouter/free",
18
+ * getApiKey: async (provider) => resolveKey(provider),
19
+ * };
20
+ */
21
+
22
+ export type {
23
+ D1Like,
24
+ D1Provider,
25
+ ReadingContext,
26
+ Citation,
27
+ UsageInfo,
28
+ ModelConfig as OrigenModelConfig,
29
+ } from "./types";
30
+
31
+ export {
32
+ MODELS,
33
+ DEFAULT_MODEL_ID,
34
+ DEFAULT_MODEL,
35
+ THINKING_MODELS,
36
+ supportsThinking,
37
+ isOllamaModel,
38
+ getModelsByProvider,
39
+ getModelsForUI,
40
+ type ModelId,
41
+ type ModelConfig,
42
+ type UIModelConfig,
43
+ } from "./models";
44
+
45
+ export {
46
+ streamOrigen,
47
+ callOrigen,
48
+ checkAuth,
49
+ checkOpenRouterAuth,
50
+ type AgentConfig,
51
+ type OrigenTool,
52
+ type AuthCheckResult,
53
+ type AgentResponse,
54
+ type StreamEvent,
55
+ } from "./agent";
56
+
57
+ export { resolveModel } from "./adapter";
58
+ export type { ModelResolutionOptions } from "./adapter";