@cuylabs/agent-core 0.3.0 → 0.5.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (66) hide show
  1. package/README.md +216 -41
  2. package/dist/builder-RcTZuYnO.d.ts +34 -0
  3. package/dist/capabilities/index.d.ts +97 -0
  4. package/dist/capabilities/index.js +46 -0
  5. package/dist/chunk-6TDTQJ4P.js +116 -0
  6. package/dist/chunk-7MUFEN4K.js +559 -0
  7. package/dist/chunk-BDBZ3SLK.js +745 -0
  8. package/dist/chunk-DWYX7ASF.js +26 -0
  9. package/dist/chunk-FG4MD5MU.js +54 -0
  10. package/dist/chunk-IMGQOTU2.js +2019 -0
  11. package/dist/chunk-IVUJDISU.js +556 -0
  12. package/dist/chunk-LRHOS4ZN.js +584 -0
  13. package/dist/chunk-OTUGSCED.js +691 -0
  14. package/dist/chunk-P6YF7USR.js +182 -0
  15. package/dist/chunk-QAQADS4X.js +258 -0
  16. package/dist/chunk-QWFMX226.js +879 -0
  17. package/dist/{chunk-6VKLWNRE.js → chunk-SDSBEQXG.js} +1 -132
  18. package/dist/chunk-VBWWUHWI.js +724 -0
  19. package/dist/chunk-VEKUXUVF.js +41 -0
  20. package/dist/chunk-X635CM2F.js +305 -0
  21. package/dist/chunk-YUUJK53A.js +91 -0
  22. package/dist/chunk-ZXAKHMWH.js +283 -0
  23. package/dist/config-D2xeGEHK.d.ts +52 -0
  24. package/dist/context/index.d.ts +259 -0
  25. package/dist/context/index.js +26 -0
  26. package/dist/identifiers-BLUxFqV_.d.ts +12 -0
  27. package/dist/index-p0kOsVsE.d.ts +1067 -0
  28. package/dist/index-tmhaADz5.d.ts +198 -0
  29. package/dist/index.d.ts +185 -4316
  30. package/dist/index.js +1238 -5368
  31. package/dist/mcp/index.d.ts +26 -0
  32. package/dist/mcp/index.js +14 -0
  33. package/dist/messages-BYWGn8TY.d.ts +110 -0
  34. package/dist/middleware/index.d.ts +7 -0
  35. package/dist/middleware/index.js +12 -0
  36. package/dist/models/index.d.ts +33 -0
  37. package/dist/models/index.js +12 -0
  38. package/dist/network-D76DS5ot.d.ts +5 -0
  39. package/dist/prompt/index.d.ts +224 -0
  40. package/dist/prompt/index.js +45 -0
  41. package/dist/reasoning/index.d.ts +71 -0
  42. package/dist/reasoning/index.js +47 -0
  43. package/dist/registry-CuRWWtcT.d.ts +164 -0
  44. package/dist/resolver-DOfZ-xuk.d.ts +254 -0
  45. package/dist/runner-C7aMP_x3.d.ts +596 -0
  46. package/dist/runtime/index.d.ts +357 -0
  47. package/dist/runtime/index.js +64 -0
  48. package/dist/session-manager-Uawm2Le7.d.ts +274 -0
  49. package/dist/skill/index.d.ts +103 -0
  50. package/dist/skill/index.js +39 -0
  51. package/dist/storage/index.d.ts +167 -0
  52. package/dist/storage/index.js +50 -0
  53. package/dist/sub-agent/index.d.ts +14 -0
  54. package/dist/sub-agent/index.js +15 -0
  55. package/dist/tool/index.d.ts +173 -1
  56. package/dist/tool/index.js +12 -3
  57. package/dist/tool-DYp6-cC3.d.ts +239 -0
  58. package/dist/tool-pFAnJc5Y.d.ts +419 -0
  59. package/dist/tracker-DClqYqTj.d.ts +96 -0
  60. package/dist/tracking/index.d.ts +109 -0
  61. package/dist/tracking/index.js +20 -0
  62. package/dist/types-CQaXbRsS.d.ts +47 -0
  63. package/dist/types-MM1JoX5T.d.ts +810 -0
  64. package/dist/types-VQgymC1N.d.ts +156 -0
  65. package/package.json +89 -5
  66. package/dist/index-QR704uRr.d.ts +0 -472
package/README.md CHANGED
@@ -1,19 +1,26 @@
1
1
  # @cuylabs/agent-core
2
2
 
3
- Embeddable AI agent infrastructure using Vercel AI SDK. Core building blocks for streaming AI agents with session management, resilience, and model capabilities.
3
+ Embeddable AI agent infrastructure using Vercel AI SDK. Core building blocks for AI agents with session management, tools, skills, sub-agents, and tracing.
4
4
 
5
5
  ## Features
6
6
 
7
- - **Agent Framework** - Create AI agents with tool support and streaming responses
8
- - **Session Management** - Persistent conversation state with branching support
9
- - **LLM Streaming** - Real-time streaming with proper backpressure handling
10
- - **Error Resilience** - Automatic retry with exponential backoff
11
- - **Context Management** - Token counting and automatic context pruning
12
- - **Tool Framework** - Type-safe tool definitions with Zod schemas
13
- - **Approval System** - Configurable tool approval workflows
14
- - **Checkpoint System** - Undo/restore capabilities for file operations
15
- - **Model Capabilities** - Runtime model capability detection
16
- - **MCP Support** - Extend agents with Model Context Protocol servers
7
+ - **Named Agents** `name` field for identity in spans, runtime traces, and logging
8
+ - **Agent Framework** Create AI agents with tool support and streaming responses
9
+ - **OpenTelemetry Tracing** Built-in tracing with a single `tracing` config — Zipkin, Phoenix, any OTLP backend
10
+ - **Session Management** Persistent conversation state with branching support
11
+ - **Execution Streaming** — Real-time event streaming with proper backpressure handling
12
+ - **Error Resilience** Automatic retry with exponential backoff
13
+ - **Context Management** Token counting and automatic context pruning
14
+ - **Tool Framework** — Type-safe tool definitions with Zod schemas
15
+ - **Middleware** Composable lifecycle hooks for tool interception, prompt injection, logging, and guardrails
16
+ - **Skills** Modular knowledge packs with progressive disclosure (L1 summary → L2 content → L3 resources)
17
+ - **Approval System** — Configurable tool approval via middleware (rules + interactive prompts)
18
+ - **Checkpoint System** — Undo/restore capabilities for file operations
19
+ - **Model Capabilities** — Runtime model capability detection
20
+ - **MCP Support** — Extend agents with Model Context Protocol servers
21
+ - **Sub-Agents** — Fork agents with inherited config, run tasks in parallel, or let the LLM delegate via `invoke_agent`
22
+ - **Presets** — Reusable agent configurations with tool filtering
23
+ - **Runtime Integration** — Extracted turn-runner phases for durable workflow integration
17
24
 
18
25
  ## Installation
19
26
 
@@ -38,28 +45,47 @@ npm install @ai-sdk/openai-compatible
38
45
 
39
46
  ## Quick Start
40
47
 
48
+ Everything is available from the root import, plus focused subpath imports for clearer boundaries:
49
+
50
+ ```typescript
51
+ import { createAgent, Tool } from "@cuylabs/agent-core"; // full API
52
+ import { defineTool } from "@cuylabs/agent-core/tool"; // tools only
53
+ import { createSkillRegistry } from "@cuylabs/agent-core/skill"; // skill system
54
+ import { createSubAgentTools } from "@cuylabs/agent-core/sub-agent"; // sub-agents
55
+ import { createAgentTaskRunner } from "@cuylabs/agent-core/runtime"; // runtime
56
+ import type { AgentMiddleware } from "@cuylabs/agent-core/middleware"; // middleware
57
+ import { createPromptBuilder } from "@cuylabs/agent-core/prompt"; // prompts
58
+ import { createMCPManager } from "@cuylabs/agent-core/mcp"; // MCP integration
59
+ ```
60
+
61
+ Additional focused entrypoints are available for `storage`, `context`, `reasoning`,
62
+ `capabilities`, `models`, and `tracking` when you want imports to mirror the
63
+ module layout.
64
+
41
65
  ```typescript
42
66
  import { createAgent, Tool } from "@cuylabs/agent-core";
43
67
  import { openai } from "@ai-sdk/openai";
44
68
  import { z } from "zod";
45
69
 
46
70
  // Define tools
47
- const tools: Tool[] = [
48
- {
49
- name: "greet",
50
- description: "Greet a user by name",
51
- parameters: z.object({
52
- name: z.string().describe("Name to greet"),
53
- }),
54
- execute: async ({ name }) => `Hello, ${name}!`,
55
- },
56
- ];
71
+ const greet = Tool.define("greet", {
72
+ description: "Greet a user by name",
73
+ parameters: z.object({
74
+ name: z.string().describe("Name to greet"),
75
+ }),
76
+ execute: async ({ name }) => ({
77
+ title: "Greeting",
78
+ output: `Hello, ${name}!`,
79
+ metadata: {},
80
+ }),
81
+ });
57
82
 
58
83
  // Create agent
59
84
  const agent = createAgent({
85
+ name: "my-assistant",
60
86
  model: openai("gpt-4o"),
61
- cwd: process.cwd(),
62
- tools,
87
+ tools: [greet],
88
+ systemPrompt: "You are a helpful assistant.",
63
89
  });
64
90
 
65
91
  // Stream responses
@@ -68,7 +94,7 @@ for await (const event of agent.chat("session-1", "Hello!")) {
68
94
  case "text-delta":
69
95
  process.stdout.write(event.text);
70
96
  break;
71
- case "tool-call":
97
+ case "tool-start":
72
98
  console.log(`Calling tool: ${event.toolName}`);
73
99
  break;
74
100
  case "tool-result":
@@ -86,9 +112,9 @@ for await (const event of agent.chat("session-1", "Hello!")) {
86
112
  ### Local Models (Ollama Example)
87
113
 
88
114
  ```typescript
89
- import { createAgent, createModelResolver } from "@cuylabs/agent-core";
115
+ import { createAgent, createResolver } from "@cuylabs/agent-core";
90
116
 
91
- const resolveModel = createModelResolver({
117
+ const resolveModel = createResolver({
92
118
  engines: {
93
119
  ollama: {
94
120
  adapter: "openai-compatible",
@@ -140,36 +166,170 @@ const memoryStorage = new MemoryStorage();
140
166
 
141
167
  // File-based persistent storage
142
168
  const fileStorage = new FileStorage({
143
- dataDir: "/path/to/data",
169
+ directory: "/path/to/data",
144
170
  });
145
171
 
146
172
  const sessions = new SessionManager(fileStorage);
147
- await sessions.createSession({ id: "my-session" });
173
+ await sessions.create({ id: "my-session", cwd: process.cwd() });
148
174
  ```
149
175
 
150
176
  ### Tool Framework
151
177
 
152
178
  ```typescript
153
- import { defineTool, Tool, ToolRegistry } from "@cuylabs/agent-core";
179
+ import { Tool } from "@cuylabs/agent-core";
154
180
  import { z } from "zod";
155
181
 
156
- const myTool = defineTool({
157
- name: "calculate",
158
- description: "Perform a calculation",
182
+ const calculator = Tool.define("calculator", {
183
+ description: "Evaluate a math expression",
159
184
  parameters: z.object({
160
185
  expression: z.string(),
161
186
  }),
162
187
  execute: async ({ expression }, ctx) => {
163
- // ctx provides sessionId, cwd, abort signal, etc.
164
- return eval(expression); // Don't actually do this!
188
+ // ctx provides sessionID, cwd, abort signal, host, etc.
189
+ return {
190
+ title: "Calculator",
191
+ output: String(eval(expression)), // Don't actually do this!
192
+ metadata: {},
193
+ };
194
+ },
195
+ });
196
+ ```
197
+
198
+ ### Middleware
199
+
200
+ Composable lifecycle hooks for tool interception, prompt injection, logging, and guardrails:
201
+
202
+ ```typescript
203
+ import { createAgent, type AgentMiddleware, approvalMiddleware } from "@cuylabs/agent-core";
204
+
205
+ // Simple logging middleware
206
+ const logger: AgentMiddleware = {
207
+ name: "logger",
208
+ async beforeToolCall(tool, args) {
209
+ console.log(`→ ${tool}`, args);
210
+ return { action: "allow" };
211
+ },
212
+ async afterToolCall(tool, _args, result) {
213
+ console.log(`← ${tool}`, result.title);
214
+ return result;
215
+ },
216
+ async onChatStart(sessionId, message) {
217
+ console.log(`Chat started: ${sessionId}`);
218
+ },
219
+ async onChatEnd(sessionId, { usage, error }) {
220
+ console.log(`Chat ended: ${usage?.totalTokens} tokens`);
221
+ },
222
+ };
223
+
224
+ // Guardrail middleware — block dangerous tools
225
+ const guardrail: AgentMiddleware = {
226
+ name: "guardrail",
227
+ async beforeToolCall(tool) {
228
+ if (tool === "bash") {
229
+ return { action: "deny", reason: "Shell commands are disabled." };
230
+ }
231
+ return { action: "allow" };
232
+ },
233
+ };
234
+
235
+ const agent = createAgent({
236
+ model: openai("gpt-4o"),
237
+ tools: myTools,
238
+ middleware: [
239
+ logger,
240
+ guardrail,
241
+ approvalMiddleware({
242
+ rules: [{ pattern: "*", tool: "read_file", action: "allow" }],
243
+ onRequest: async (req) => {
244
+ // Prompt user for approval
245
+ return await askUser(req);
246
+ },
247
+ }),
248
+ ],
249
+ });
250
+ ```
251
+
252
+ Middleware hooks: `beforeToolCall`, `afterToolCall`, `promptSections`, `onEvent`, `onChatStart`, `onChatEnd`. Sub-agents inherit middleware via `fork()`.
253
+
254
+ ### Tracing (OpenTelemetry)
255
+
256
+ Enable full tracing with a single `tracing` field — no manual provider setup:
257
+
258
+ ```typescript
259
+ import { createAgent } from "@cuylabs/agent-core";
260
+ import { openai } from "@ai-sdk/openai";
261
+ import { SimpleSpanProcessor } from "@opentelemetry/sdk-trace-node";
262
+ import { ZipkinExporter } from "@opentelemetry/exporter-zipkin";
263
+
264
+ const agent = createAgent({
265
+ name: "my-agent",
266
+ model: openai("gpt-4o"),
267
+ tools: [myTool],
268
+ tracing: {
269
+ spanProcessor: new SimpleSpanProcessor(
270
+ new ZipkinExporter({ url: "http://localhost:9411/api/v2/spans" }),
271
+ ),
165
272
  },
166
273
  });
167
274
 
168
- // Register tools
169
- const registry = new ToolRegistry();
170
- registry.register(myTool);
275
+ // ... use agent ...
276
+
277
+ // Flush spans before exit
278
+ await agent.close();
279
+ ```
280
+
281
+ This auto-creates agent-level, tool-level, and AI SDK LLM spans following the [GenAI Semantic Conventions](https://opentelemetry.io/docs/specs/semconv/gen-ai/). Works with Zipkin, Arize Phoenix, Jaeger, or any OTLP backend. See [docs/tracing.md](docs/tracing.md) for the full reference.
282
+
283
+ ### Skills
284
+
285
+ Modular knowledge packs with three-level progressive disclosure:
286
+
287
+ ```typescript
288
+ import { createAgent, createSkillRegistry, createSkillTools } from "@cuylabs/agent-core";
289
+
290
+ // Discover skills from SKILL.md files
291
+ const registry = await createSkillRegistry(process.cwd(), {
292
+ externalDirs: [".agents", ".claude"],
293
+ });
294
+
295
+ // Create skill tools (skill + skill_resource)
296
+ const skillTools = createSkillTools(registry);
297
+
298
+ // Inject L1 summary into system prompt, give agent skill tools
299
+ const agent = createAgent({
300
+ model: openai("gpt-4o"),
301
+ tools: [...myTools, ...skillTools],
302
+ systemPrompt: `You are a coding assistant.\n\n${registry.formatSummary()}`,
303
+ });
304
+ ```
305
+
306
+ Skills use the `SKILL.md` format with YAML frontmatter. See [examples/skills/](examples/skills/) for samples.
307
+
308
+ ### Built-in Sub-Agent Tools
309
+
310
+ Let the LLM delegate work to specialized sub-agents via tool calls:
311
+
312
+ ```typescript
313
+ import { createAgent, createSubAgentTools, Presets } from "@cuylabs/agent-core";
314
+ import type { AgentProfile } from "@cuylabs/agent-core";
315
+
316
+ const profiles: AgentProfile[] = [
317
+ { name: "explorer", description: "Fast code search", preset: Presets.explore },
318
+ { name: "reviewer", description: "Thorough code review", preset: Presets.review },
319
+ ];
320
+
321
+ const parent = createAgent({ model, tools: codeTools });
322
+ const subTools = createSubAgentTools(parent, { profiles, async: true });
323
+
324
+ const agent = createAgent({
325
+ model,
326
+ tools: [...codeTools, ...subTools],
327
+ });
328
+ // The LLM can now call invoke_agent, wait_agent, and close_agent
171
329
  ```
172
330
 
331
+ Tools: `invoke_agent` (spawn), `wait_agent` (collect results), `close_agent` (cancel). Supports sync and async modes, depth limiting, and concurrency control. See [docs/subagents.md](docs/subagents.md#built-in-sub-agent-tools-llm-callable).
332
+
173
333
  ### Error Handling & Retry
174
334
 
175
335
  ```typescript
@@ -262,14 +422,29 @@ The `chat()` method yields these event types:
262
422
  | Event Type | Description |
263
423
  |------------|-------------|
264
424
  | `text-delta` | Streaming text chunk |
265
- | `tool-call` | Tool invocation started |
425
+ | `text-start` / `text-end` | Text generation boundaries |
426
+ | `tool-start` | Tool invocation started |
266
427
  | `tool-result` | Tool execution completed |
267
- | `reasoning` | Model reasoning (if supported) |
268
- | `message` | Complete assistant message |
428
+ | `tool-error` | Tool execution failed |
429
+ | `step-start` / `step-finish` | LLM step boundaries with usage stats |
430
+ | `reasoning-delta` | Model reasoning chunk (if supported) |
431
+ | `status` | Agent state changes (thinking, calling-tool, etc.) |
432
+ | `doom-loop` | Repeated tool call pattern detected |
433
+ | `context-overflow` | Context window exceeded |
434
+ | `message` | Complete user/assistant message |
269
435
  | `complete` | Stream finished with usage stats |
270
- | `error` | Error occurred |
436
+ | `error` | Unrecoverable error |
437
+
438
+ See [examples/03-streaming.ts](examples/03-streaming.ts) for a complete event handling example.
271
439
 
272
440
  ## Concurrency Note
273
441
 
274
442
  When using `runConcurrent()` to run multiple tasks in parallel, be aware that all tasks share the same `SessionManager` instance. For independent conversations, create separate `Agent` instances or use distinct session IDs.
275
443
 
444
+ ## Examples
445
+
446
+ See [examples/](examples/) for 20 runnable examples covering every feature — from basic chat to middleware, skills, tracing, and Docker execution. The [examples README](examples/README.md) has setup instructions and run commands.
447
+
448
+ ## Documentation
449
+
450
+ See [docs/](docs/) for in-depth guides on each subsystem.
@@ -0,0 +1,34 @@
1
+ import { P as PromptConfig, a as PromptBuildContext, M as MiddlewareRunner, b as PromptSection, c as ModelFamily } from './runner-C7aMP_x3.js';
2
+ import { S as SkillRegistry } from './registry-CuRWWtcT.js';
3
+
4
+ interface PromptSectionPreview {
5
+ id: string;
6
+ label: string;
7
+ priority: number;
8
+ size: number;
9
+ }
10
+
11
+ declare class PromptBuilder {
12
+ private readonly config;
13
+ private customSections;
14
+ private envCache?;
15
+ private instructionCache?;
16
+ private skillRegistryCache?;
17
+ constructor(config?: PromptConfig);
18
+ build(context: PromptBuildContext, middleware?: MiddlewareRunner): Promise<string>;
19
+ addSection(section: PromptSection): void;
20
+ removeSection(id: string): boolean;
21
+ toggleSection(id: string, enabled: boolean): void;
22
+ getSections(): PromptSection[];
23
+ hasSection(id: string): boolean;
24
+ clearCache(): void;
25
+ getSkillRegistry(cwd: string): Promise<SkillRegistry>;
26
+ getModelFamily(model: PromptBuildContext["model"]): ModelFamily;
27
+ getBaseTemplate(model: PromptBuildContext["model"]): string;
28
+ preview(context: PromptBuildContext, middleware?: MiddlewareRunner): Promise<PromptSectionPreview[]>;
29
+ private getEnvironment;
30
+ private getInstructions;
31
+ }
32
+ declare function createPromptBuilder(config?: PromptConfig): PromptBuilder;
33
+
34
+ export { PromptBuilder as P, createPromptBuilder as c };
@@ -0,0 +1,97 @@
1
+ import { C as CapabilitySource, S as SourcePriority, b as SourceResult, P as ProviderCompatibility, R as ResolverOptions, M as ModelEntry, c as ModelCapabilities } from '../resolver-DOfZ-xuk.js';
2
+ export { D as DEFAULT_RESOLVER_OPTIONS, I as InputModality, d as ModelCapabilityResolver, N as NetworkStatus, O as OutputModality, f as ResolutionResult, g as configureResolver, e as extractModelId, a as extractProvider, h as getDefaultResolver } from '../resolver-DOfZ-xuk.js';
3
+ export { g as getModelId, a as getProviderId } from '../identifiers-BLUxFqV_.js';
4
+ export { g as getNetworkStatus } from '../network-D76DS5ot.js';
5
+ import 'ai';
6
+
7
+ /**
8
+ * Pattern-Based Capability Detection for @cuylabs/agent-core
9
+ *
10
+ * Static pattern matching for model capabilities when no external data is available.
11
+ * This is the fallback layer - always works offline.
12
+ */
13
+
14
+ /**
15
+ * Infer provider from model ID
16
+ */
17
+ declare function inferProvider(modelId: string): string | undefined;
18
+ /**
19
+ * Pattern-based capability source
20
+ * Always available, uses heuristics to detect capabilities
21
+ */
22
+ declare class PatternCapabilitySource implements CapabilitySource {
23
+ readonly priority = SourcePriority.PatternMatch;
24
+ readonly name = "Pattern Matching";
25
+ lookup(modelId: string, providerHint?: string): Promise<SourceResult>;
26
+ isAvailable(): Promise<boolean>;
27
+ }
28
+ /**
29
+ * Quick check if a model ID likely supports reasoning
30
+ */
31
+ declare function likelySupportsReasoning(modelId: string): boolean;
32
+ /**
33
+ * Get provider compatibility for a model
34
+ */
35
+ declare function getProviderCompatibility(modelId: string, provider?: string): ProviderCompatibility | undefined;
36
+
37
+ type CapabilityOverrides = ResolverOptions["modelOverrides"];
38
+ type OverrideLookup = {
39
+ override?: Partial<ModelCapabilities>;
40
+ matchedKey?: string;
41
+ };
42
+ declare function findCapabilityOverride(overrides: CapabilityOverrides, modelId: string, provider?: string): OverrideLookup;
43
+ declare function applyCapabilityOverride(entry: ModelEntry, override?: Partial<ModelCapabilities>): ModelEntry;
44
+
45
+ declare class CapabilityCache {
46
+ private adapter;
47
+ private memoryCache;
48
+ private ttlMs;
49
+ private loaded;
50
+ constructor(options?: Partial<ResolverOptions>);
51
+ private load;
52
+ get(modelId: string, provider?: string): Promise<ModelEntry | undefined>;
53
+ set(entry: ModelEntry): Promise<void>;
54
+ setMany(entries: ModelEntry[]): Promise<void>;
55
+ persist(): Promise<void>;
56
+ clear(): Promise<void>;
57
+ stats(): {
58
+ size: number;
59
+ loaded: boolean;
60
+ };
61
+ getAll(): Promise<ModelEntry[]>;
62
+ getAllByProvider(): Promise<Record<string, ModelEntry[]>>;
63
+ }
64
+ declare class CacheCapabilitySource implements CapabilitySource {
65
+ private cache;
66
+ readonly priority = SourcePriority.LocalCache;
67
+ readonly name = "Local Cache";
68
+ constructor(cache: CapabilityCache);
69
+ lookup(modelId: string, provider?: string): Promise<SourceResult>;
70
+ isAvailable(): Promise<boolean>;
71
+ }
72
+
73
+ declare class RemoteCapabilityFetcher {
74
+ private apiUrl;
75
+ private timeoutMs;
76
+ private cache;
77
+ private lastFetchTime;
78
+ private minFetchInterval;
79
+ constructor(cache: CapabilityCache, options?: Partial<ResolverOptions>);
80
+ fetchAll(): Promise<ModelEntry[]>;
81
+ ping(): Promise<boolean>;
82
+ }
83
+
84
+ declare class RemoteCapabilitySource implements CapabilitySource {
85
+ readonly priority = SourcePriority.RemoteAPI;
86
+ readonly name = "Remote API (models.dev)";
87
+ private fetcher;
88
+ private cache;
89
+ private fetchPromise;
90
+ private enabled;
91
+ constructor(cache: CapabilityCache, options?: Partial<ResolverOptions>);
92
+ lookup(modelId: string, provider?: string): Promise<SourceResult>;
93
+ isAvailable(): Promise<boolean>;
94
+ refresh(): Promise<void>;
95
+ }
96
+
97
+ export { CacheCapabilitySource, CapabilityCache, type CapabilityOverrides, CapabilitySource, ModelCapabilities, ModelEntry, PatternCapabilitySource, ProviderCompatibility, RemoteCapabilityFetcher, RemoteCapabilitySource, ResolverOptions, SourcePriority, SourceResult, applyCapabilityOverride, findCapabilityOverride, getProviderCompatibility, inferProvider, likelySupportsReasoning };
@@ -0,0 +1,46 @@
1
+ import {
2
+ CacheCapabilitySource,
3
+ CapabilityCache,
4
+ DEFAULT_RESOLVER_OPTIONS,
5
+ ModelCapabilityResolver,
6
+ PatternCapabilitySource,
7
+ RemoteCapabilityFetcher,
8
+ RemoteCapabilitySource,
9
+ SourcePriority,
10
+ applyCapabilityOverride,
11
+ configureResolver,
12
+ extractModelId,
13
+ extractProvider,
14
+ findCapabilityOverride,
15
+ getDefaultResolver,
16
+ getNetworkStatus,
17
+ getProviderCompatibility,
18
+ inferProvider,
19
+ likelySupportsReasoning
20
+ } from "../chunk-QWFMX226.js";
21
+ import {
22
+ getModelId,
23
+ getProviderId
24
+ } from "../chunk-DWYX7ASF.js";
25
+ export {
26
+ CacheCapabilitySource,
27
+ CapabilityCache,
28
+ DEFAULT_RESOLVER_OPTIONS,
29
+ ModelCapabilityResolver,
30
+ PatternCapabilitySource,
31
+ RemoteCapabilityFetcher,
32
+ RemoteCapabilitySource,
33
+ SourcePriority,
34
+ applyCapabilityOverride,
35
+ configureResolver,
36
+ extractModelId,
37
+ extractProvider,
38
+ findCapabilityOverride,
39
+ getDefaultResolver,
40
+ getModelId,
41
+ getNetworkStatus,
42
+ getProviderCompatibility,
43
+ getProviderId,
44
+ inferProvider,
45
+ likelySupportsReasoning
46
+ };
@@ -0,0 +1,116 @@
1
+ // src/models/resolver.ts
2
+ function parseKey(input) {
3
+ const [engineId, ...rest] = input.split("/");
4
+ if (!engineId || rest.length === 0) return null;
5
+ return { engineId, modelId: rest.join("/") };
6
+ }
7
+ function mergeSettings(base, override) {
8
+ return {
9
+ apiKey: override?.apiKey ?? base?.apiKey,
10
+ baseUrl: override?.baseUrl ?? base?.baseUrl,
11
+ headers: {
12
+ ...base?.headers ?? {},
13
+ ...override?.headers ?? {}
14
+ },
15
+ extra: {
16
+ ...base?.extra ?? {},
17
+ ...override?.extra ?? {}
18
+ }
19
+ };
20
+ }
21
+ function settingsKey(settings, adapter, engineId) {
22
+ return JSON.stringify({
23
+ engineId,
24
+ adapter,
25
+ apiKey: settings.apiKey ?? "",
26
+ baseUrl: settings.baseUrl ?? "",
27
+ headers: settings.headers ?? {},
28
+ extra: settings.extra ?? {}
29
+ });
30
+ }
31
+ function buildOptions(settings) {
32
+ const opts = { ...settings.extra ?? {} };
33
+ if (settings.apiKey) opts.apiKey = settings.apiKey;
34
+ if (settings.baseUrl) opts.baseURL = settings.baseUrl;
35
+ if (settings.headers && Object.keys(settings.headers).length > 0) opts.headers = settings.headers;
36
+ return opts;
37
+ }
38
+ async function createFactory(adapter, settings) {
39
+ const asModel = (m) => m;
40
+ const opts = buildOptions(settings);
41
+ switch (adapter) {
42
+ case "openai": {
43
+ const { createOpenAI } = await import("@ai-sdk/openai").catch(() => {
44
+ throw new Error(
45
+ `Provider "@ai-sdk/openai" is required for the "openai" adapter. Install it with: pnpm add @ai-sdk/openai`
46
+ );
47
+ });
48
+ const provider = createOpenAI(opts);
49
+ return (modelId) => provider.languageModel(modelId);
50
+ }
51
+ case "anthropic": {
52
+ const { createAnthropic } = await import("@ai-sdk/anthropic").catch(() => {
53
+ throw new Error(
54
+ `Provider "@ai-sdk/anthropic" is required for the "anthropic" adapter. Install it with: pnpm add @ai-sdk/anthropic`
55
+ );
56
+ });
57
+ const provider = createAnthropic(opts);
58
+ return (modelId) => provider.languageModel(modelId);
59
+ }
60
+ case "google": {
61
+ const { createGoogleGenerativeAI } = await import("@ai-sdk/google").catch(() => {
62
+ throw new Error(
63
+ `Provider "@ai-sdk/google" is required for the "google" adapter. Install it with: pnpm add @ai-sdk/google`
64
+ );
65
+ });
66
+ const provider = createGoogleGenerativeAI(opts);
67
+ return (modelId) => asModel(provider.languageModel(modelId));
68
+ }
69
+ case "openai-compatible": {
70
+ const { createOpenAICompatible } = await import("@ai-sdk/openai-compatible").catch(() => {
71
+ throw new Error(
72
+ `Provider "@ai-sdk/openai-compatible" is required for the "openai-compatible" adapter. Install it with: pnpm add @ai-sdk/openai-compatible`
73
+ );
74
+ });
75
+ const provider = createOpenAICompatible({
76
+ name: opts.name ?? "custom",
77
+ baseURL: opts.baseURL ?? "",
78
+ ...opts.apiKey ? { apiKey: opts.apiKey } : {},
79
+ ...opts.headers ? { headers: opts.headers } : {}
80
+ });
81
+ return (modelId) => provider.languageModel(modelId);
82
+ }
83
+ default:
84
+ throw new Error(`No factory registered for adapter: ${adapter}`);
85
+ }
86
+ }
87
+ function createResolver(directory) {
88
+ const factoryCache = /* @__PURE__ */ new Map();
89
+ return async (key) => {
90
+ const parsed = parseKey(key);
91
+ const entry = parsed ? void 0 : directory.entries?.[key];
92
+ const engineId = parsed?.engineId ?? entry?.engine;
93
+ const modelId = parsed?.modelId ?? entry?.id;
94
+ if (!engineId || !modelId) {
95
+ throw new Error(`Unknown model reference: ${key}`);
96
+ }
97
+ const engine = directory.engines[engineId];
98
+ if (!engine) {
99
+ throw new Error(`Unknown engine: ${engineId}`);
100
+ }
101
+ const settings = mergeSettings(engine.settings, entry?.settings);
102
+ if (engine.build) {
103
+ return engine.build(modelId, settings);
104
+ }
105
+ const cacheKey = settingsKey(settings, engine.adapter, engineId);
106
+ const cached = factoryCache.get(cacheKey);
107
+ if (cached) return cached(modelId);
108
+ const factory = await createFactory(engine.adapter, settings);
109
+ factoryCache.set(cacheKey, factory);
110
+ return factory(modelId);
111
+ };
112
+ }
113
+
114
+ export {
115
+ createResolver
116
+ };