@cuylabs/agent-core 0.8.0 → 0.9.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (68) hide show
  1. package/dist/{builder-UpOWQMW3.d.ts → builder-BgZ_j4Vs.d.ts} +2 -1
  2. package/dist/{chunk-RZITT45F.js → chunk-4QFNWPIF.js} +4 -4
  3. package/dist/{chunk-BFM2YHNM.js → chunk-5ARZJWD2.js} +74 -37
  4. package/dist/{chunk-KUVSERLJ.js → chunk-DXFBQMXP.js} +5 -2
  5. package/dist/{chunk-CAA7FHIH.js → chunk-EKR6PKXU.js} +0 -100
  6. package/dist/{chunk-IVUJDISU.js → chunk-GFTW23FV.js} +5 -14
  7. package/dist/{chunk-7VKQ4WPB.js → chunk-H3FUYU52.js} +11 -3
  8. package/dist/chunk-I6PKJ7XQ.js +292 -0
  9. package/dist/chunk-IYWQOJMQ.js +102 -0
  10. package/dist/{chunk-4BDA7DQY.js → chunk-J4QDGZIA.js} +19 -3
  11. package/dist/{chunk-7MUFEN4K.js → chunk-JLXG2SH7.js} +349 -3
  12. package/dist/{chunk-YSLSEQ6B.js → chunk-MAZ5DY5B.js} +18 -30
  13. package/dist/{chunk-P6YF7USR.js → chunk-MHKK374K.js} +12 -11
  14. package/dist/{chunk-VBWWUHWI.js → chunk-OFDKHNCX.js} +4 -1
  15. package/dist/{chunk-YUUJK53A.js → chunk-RKEW5WXI.js} +1 -1
  16. package/dist/{chunk-LRHOS4ZN.js → chunk-SPILYYDF.js} +3 -2
  17. package/dist/{chunk-RFEKJKTO.js → chunk-UDCZ673N.js} +321 -275
  18. package/dist/{chunk-BDBZ3SLK.js → chunk-UHCJEM2E.js} +39 -2
  19. package/dist/chunk-WGZAPU6N.js +929 -0
  20. package/dist/{chunk-N6HWIEEA.js → chunk-WKHDSSXG.js} +140 -23
  21. package/dist/index-BCqEGzBj.d.ts +251 -0
  22. package/dist/{index-CWSchSql.d.ts → index-DQuTZ8xL.d.ts} +290 -13
  23. package/dist/index.d.ts +23 -29
  24. package/dist/index.js +776 -490
  25. package/dist/{errors → inference/errors}/index.d.ts +2 -2
  26. package/dist/{errors → inference/errors}/index.js +1 -1
  27. package/dist/inference/index.d.ts +10 -9
  28. package/dist/inference/index.js +34 -8
  29. package/dist/middleware/index.d.ts +5 -4
  30. package/dist/middleware/index.js +3 -3
  31. package/dist/models/index.d.ts +18 -16
  32. package/dist/models/index.js +47 -11
  33. package/dist/models/reasoning/index.d.ts +4 -0
  34. package/dist/{reasoning → models/reasoning}/index.js +2 -3
  35. package/dist/plugin/index.d.ts +414 -0
  36. package/dist/plugin/index.js +32 -0
  37. package/dist/presets/index.d.ts +3 -3
  38. package/dist/presets/index.js +7 -5
  39. package/dist/prompt/index.d.ts +6 -5
  40. package/dist/prompt/index.js +3 -2
  41. package/dist/runner-CI-XeR16.d.ts +91 -0
  42. package/dist/runtime/index.d.ts +7 -6
  43. package/dist/runtime/index.js +6 -7
  44. package/dist/safety/index.d.ts +1 -1
  45. package/dist/safety/index.js +1 -1
  46. package/dist/{session-manager-B_CWGTsl.d.ts → session-manager-KbYt2WUh.d.ts} +8 -0
  47. package/dist/signal/index.js +1 -1
  48. package/dist/skill/index.d.ts +2 -2
  49. package/dist/skill/index.js +3 -3
  50. package/dist/storage/index.d.ts +2 -2
  51. package/dist/storage/index.js +1 -1
  52. package/dist/sub-agent/index.d.ts +10 -9
  53. package/dist/sub-agent/index.js +21 -4
  54. package/dist/tool/index.d.ts +19 -5
  55. package/dist/tool/index.js +2 -2
  56. package/dist/{tool-BHbyUAy3.d.ts → tool-CZWN3KbO.d.ts} +1 -10
  57. package/dist/{tool-DLXAR9Ce.d.ts → tool-DkhSCV2Y.d.ts} +1 -1
  58. package/dist/tracking/index.d.ts +1 -1
  59. package/dist/tracking/index.js +1 -1
  60. package/dist/{types-KKDrdU9Y.d.ts → types-BlOKk-Bb.d.ts} +9 -4
  61. package/dist/{types-BnpEOYV-.d.ts → types-BlZwmnuW.d.ts} +1 -1
  62. package/dist/{runner-e2YRcUoX.d.ts → types-DTSkxakL.d.ts} +3 -138
  63. package/dist/{types-QKHHQLLq.d.ts → types-DmDwi2zI.d.ts} +7 -4
  64. package/package.json +15 -9
  65. package/dist/chunk-DWYX7ASF.js +0 -26
  66. package/dist/chunk-SQU2AJHO.js +0 -305
  67. package/dist/reasoning/index.d.ts +0 -116
  68. package/dist/types-QA4WhEfz.d.ts +0 -138
@@ -1,16 +1,16 @@
1
1
  import {
2
2
  executeAgentToolCall
3
- } from "./chunk-7VKQ4WPB.js";
3
+ } from "./chunk-H3FUYU52.js";
4
4
  import {
5
- buildReasoningOptionsSync
6
- } from "./chunk-SQU2AJHO.js";
5
+ LLMError,
6
+ isRetryable
7
+ } from "./chunk-4QFNWPIF.js";
7
8
  import {
8
9
  snapshotScope
9
10
  } from "./chunk-N7P4PN3O.js";
10
11
  import {
11
- LLMError,
12
- isRetryable
13
- } from "./chunk-RZITT45F.js";
12
+ buildReasoningOptionsSync
13
+ } from "./chunk-UDCZ673N.js";
14
14
 
15
15
  // src/inference/toolset.ts
16
16
  import { tool, zodSchema } from "ai";
@@ -25,6 +25,7 @@ async function buildToolSet(options) {
25
25
  execute: async (params) => (await executeAgentToolCall({
26
26
  toolName: id,
27
27
  tool: info,
28
+ initialized,
28
29
  params,
29
30
  cwd: options.cwd,
30
31
  abort: options.abort,
@@ -48,7 +49,114 @@ import {
48
49
  streamText
49
50
  } from "ai";
50
51
 
51
- // src/retry.ts
52
+ // src/inference/middleware-support.ts
53
+ function isPlainObject(value) {
54
+ return typeof value === "object" && value !== null && Object.getPrototypeOf(value) === Object.prototype;
55
+ }
56
+ function isAsyncIterable(value) {
57
+ return typeof value === "object" && value !== null && Symbol.asyncIterator in value;
58
+ }
59
+ function stringifyToolOutput(output) {
60
+ if (typeof output === "string") {
61
+ return output;
62
+ }
63
+ try {
64
+ const serialized = JSON.stringify(output);
65
+ return serialized ?? String(output);
66
+ } catch {
67
+ return String(output);
68
+ }
69
+ }
70
+ async function runAfterToolCall(options) {
71
+ const { middleware, toolName, params, output, ctx } = options;
72
+ const transformed = await middleware.runAfterToolCall(
73
+ toolName,
74
+ params,
75
+ {
76
+ title: toolName,
77
+ output: stringifyToolOutput(output),
78
+ metadata: {}
79
+ },
80
+ ctx
81
+ );
82
+ return typeof output === "string" ? transformed.output : output;
83
+ }
84
+ function mergeProviderOptions(base, override) {
85
+ if (!base) return override;
86
+ if (!override) return base;
87
+ const merged = { ...base };
88
+ for (const [key, value] of Object.entries(override)) {
89
+ const current = merged[key];
90
+ merged[key] = isPlainObject(current) && isPlainObject(value) ? mergeProviderOptions(
91
+ current,
92
+ value
93
+ ) : value;
94
+ }
95
+ return merged;
96
+ }
97
+ function wrapMcpToolsForMiddleware(options) {
98
+ const { tools, middleware, cwd, sessionID, abort, agent } = options;
99
+ if (!middleware?.hasMiddleware) {
100
+ return tools;
101
+ }
102
+ const wrapped = {};
103
+ for (const [toolName, tool2] of Object.entries(tools)) {
104
+ if (!tool2.execute) {
105
+ wrapped[toolName] = tool2;
106
+ continue;
107
+ }
108
+ wrapped[toolName] = {
109
+ ...tool2,
110
+ execute: async (params, execOptions) => {
111
+ const ctx = {
112
+ cwd,
113
+ abort: execOptions.abortSignal ?? abort,
114
+ sessionID,
115
+ messageID: execOptions.toolCallId,
116
+ agent: agent ?? "default",
117
+ scope: snapshotScope(),
118
+ extra: {
119
+ toolCallId: execOptions.toolCallId,
120
+ messages: execOptions.messages,
121
+ experimentalContext: execOptions.experimental_context
122
+ }
123
+ };
124
+ const decision = await middleware.runBeforeToolCall(
125
+ toolName,
126
+ params,
127
+ ctx
128
+ );
129
+ if (decision.action === "deny") {
130
+ return decision.reason ?? `Tool call denied: ${toolName}`;
131
+ }
132
+ const result = tool2.execute(params, execOptions);
133
+ if (isAsyncIterable(result)) {
134
+ let lastOutput;
135
+ for await (const chunk of result) {
136
+ lastOutput = chunk;
137
+ }
138
+ return await runAfterToolCall({
139
+ middleware,
140
+ toolName,
141
+ params,
142
+ output: lastOutput,
143
+ ctx
144
+ });
145
+ }
146
+ return await runAfterToolCall({
147
+ middleware,
148
+ toolName,
149
+ params,
150
+ output: await result,
151
+ ctx
152
+ });
153
+ }
154
+ };
155
+ }
156
+ return wrapped;
157
+ }
158
+
159
+ // src/inference/retry.ts
52
160
  var DEFAULT_RETRY_CONFIG = {
53
161
  maxAttempts: 3,
54
162
  initialDelayMs: 2e3,
@@ -187,11 +295,6 @@ function applyModelCallInput(target, modelCall) {
187
295
  target.toolExecutionMode = modelCall.toolExecutionMode;
188
296
  target.activeModelCall = modelCall;
189
297
  }
190
- function mergeProviderOptions(base, override) {
191
- if (!base) return override;
192
- if (!override) return base;
193
- return { ...base, ...override };
194
- }
195
298
  function isBlockedModelCall(value) {
196
299
  return "block" in value && value.block === true;
197
300
  }
@@ -281,6 +384,11 @@ async function callStreamTextWithOtelContext(options) {
281
384
  abortSignal: input.abort,
282
385
  providerOptions: mergedProviderOptions,
283
386
  experimental_telemetry: input.telemetry,
387
+ // The AI SDK defaults to console.error(error) for stream failures.
388
+ // We normalize and surface these errors through our own runtime events,
389
+ // so suppress the duplicate raw dump here.
390
+ onError: () => {
391
+ },
284
392
  prepareStep: input.intervention ? async ({ messages }) => {
285
393
  const pending = input.intervention.drainImmediate();
286
394
  if (pending.length === 0) {
@@ -342,11 +450,14 @@ async function stream(input) {
342
450
  if (!input.retry || input.retry.maxAttempts === 0) {
343
451
  return wrapModelStream(await runCustomStream(), input);
344
452
  }
345
- return wrapModelStream(await withRetry(
346
- async () => await runCustomStream(),
347
- input.retry,
348
- input.abort
349
- ), input);
453
+ return wrapModelStream(
454
+ await withRetry(
455
+ async () => await runCustomStream(),
456
+ input.retry,
457
+ input.abort
458
+ ),
459
+ input
460
+ );
350
461
  }
351
462
  const toolSet = await buildToolSet({
352
463
  tools: input.tools,
@@ -359,9 +470,16 @@ async function stream(input) {
359
470
  middleware: input.middleware,
360
471
  executionMode: input.toolExecutionMode
361
472
  });
473
+ const mcpToolSet = wrapMcpToolsForMiddleware({
474
+ tools: input.mcpTools ?? {},
475
+ middleware: input.middleware,
476
+ cwd: input.cwd,
477
+ sessionID: input.sessionID,
478
+ abort: input.abort
479
+ });
362
480
  const allTools = {
363
481
  ...toolSet,
364
- ...input.mcpTools ?? {}
482
+ ...mcpToolSet
365
483
  };
366
484
  const providerOptions = input.reasoningLevel ? buildReasoningOptionsSync(input.model, input.reasoningLevel) : void 0;
367
485
  const createStream = async () => {
@@ -379,11 +497,10 @@ async function stream(input) {
379
497
  if (!input.retry || input.retry.maxAttempts === 0) {
380
498
  return wrapModelStream(await createStream(), input);
381
499
  }
382
- return wrapModelStream(await withRetry(
383
- async () => await createStream(),
384
- input.retry,
385
- input.abort
386
- ), input);
500
+ return wrapModelStream(
501
+ await withRetry(async () => await createStream(), input.retry, input.abort),
502
+ input
503
+ );
387
504
  }
388
505
  async function streamOnce(input) {
389
506
  return await stream({ ...input, retry: void 0 });
@@ -0,0 +1,251 @@
1
+ import { R as ReasoningLevel, a as ReasoningConfig } from './types-CQaXbRsS.js';
2
+ import { LanguageModel } from 'ai';
3
+ import { ProviderOptions } from '@ai-sdk/provider-utils';
4
+
5
+ /**
6
+ * Model Capability Types for @cuylabs/agent-core
7
+ *
8
+ * Defines the structure for model capabilities that can be sourced from
9
+ * static patterns, local cache, or remote APIs.
10
+ */
11
+ /**
12
+ * Input modalities a model can accept
13
+ */
14
+ type InputModality = "text" | "image" | "audio" | "video" | "pdf";
15
+ /**
16
+ * Output modalities a model can produce
17
+ */
18
+ type OutputModality = "text" | "image" | "audio" | "video";
19
+ /**
20
+ * Comprehensive model capabilities
21
+ */
22
+ interface ModelCapabilities {
23
+ /** Model supports extended reasoning/thinking */
24
+ reasoning: boolean;
25
+ /** Model supports function/tool calling */
26
+ toolCalling: boolean;
27
+ /** Model supports temperature adjustment */
28
+ temperature: boolean;
29
+ /** Model supports file attachments */
30
+ attachments: boolean;
31
+ /** Model supports streaming responses */
32
+ streaming: boolean;
33
+ /** Supported input modalities */
34
+ inputModalities: InputModality[];
35
+ /** Supported output modalities */
36
+ outputModalities: OutputModality[];
37
+ /** Maximum context window in tokens */
38
+ contextWindow?: number;
39
+ /** Maximum output tokens */
40
+ maxOutput?: number;
41
+ }
42
+ /**
43
+ * Provider-specific compatibility flags
44
+ * These handle quirks in different provider implementations
45
+ */
46
+ interface ProviderCompatibility {
47
+ /** Supports OpenAI-style reasoning_effort parameter */
48
+ supportsReasoningEffort?: boolean;
49
+ /** Supports developer/system role distinction */
50
+ supportsDeveloperRole?: boolean;
51
+ /** Field name for max tokens (varies by provider) */
52
+ maxTokensField?: "max_tokens" | "max_completion_tokens";
53
+ /** Requires thinking as text tags vs structured */
54
+ requiresThinkingTags?: boolean;
55
+ /** Provider-specific thinking format */
56
+ thinkingFormat?: "openai" | "anthropic" | "google" | "zai";
57
+ }
58
+ /**
59
+ * Complete model entry with metadata
60
+ */
61
+ interface ModelEntry {
62
+ /** Model identifier (e.g., "gpt-4o", "claude-sonnet-4") */
63
+ id: string;
64
+ /** Human-readable model name */
65
+ name: string;
66
+ /** Provider identifier (e.g., "openai", "anthropic") */
67
+ provider: string;
68
+ /** Model capabilities */
69
+ capabilities: ModelCapabilities;
70
+ /** Provider-specific compatibility settings */
71
+ compatibility?: ProviderCompatibility;
72
+ /** Cost per million tokens (input) */
73
+ costInput?: number;
74
+ /** Cost per million tokens (output) */
75
+ costOutput?: number;
76
+ /** When this entry was last updated */
77
+ updatedAt?: string;
78
+ }
79
+ /**
80
+ * Priority levels for capability sources
81
+ */
82
+ declare enum SourcePriority {
83
+ /** User configuration overrides everything */
84
+ UserConfig = 0,
85
+ /** Local cache from previous fetch */
86
+ LocalCache = 1,
87
+ /** Bundled static data (build-time) */
88
+ BundledData = 2,
89
+ /** Pattern-based inference (fallback) */
90
+ PatternMatch = 3,
91
+ /** Remote API (if network available) */
92
+ RemoteAPI = 4
93
+ }
94
+ /**
95
+ * Result from a capability source lookup
96
+ */
97
+ interface SourceResult {
98
+ /** The model entry if found */
99
+ entry?: ModelEntry;
100
+ /** Which source provided this result */
101
+ source: SourcePriority;
102
+ /** Whether this is a confident match */
103
+ confident: boolean;
104
+ /** Error message if lookup failed */
105
+ error?: string;
106
+ }
107
+ /**
108
+ * Capability source interface
109
+ */
110
+ interface CapabilitySource {
111
+ /** Source priority (lower = higher priority) */
112
+ priority: SourcePriority;
113
+ /** Human-readable source name */
114
+ name: string;
115
+ /** Look up capabilities for a model */
116
+ lookup(modelId: string, provider?: string): Promise<SourceResult>;
117
+ /** Check if this source is available */
118
+ isAvailable(): Promise<boolean>;
119
+ }
120
+ /**
121
+ * Options for the capability resolver
122
+ */
123
+ interface ResolverOptions {
124
+ /** Enable remote API fetching (default: false) */
125
+ enableRemoteFetch?: boolean;
126
+ /** Remote API URL (default: https://models.dev) */
127
+ remoteApiUrl?: string;
128
+ /** Cache directory path */
129
+ cachePath?: string;
130
+ /** Cache TTL in milliseconds (default: 1 hour) */
131
+ cacheTtlMs?: number;
132
+ /** Network timeout in milliseconds (default: 10 seconds) */
133
+ networkTimeoutMs?: number;
134
+ /** Custom user overrides for specific models */
135
+ modelOverrides?: Record<string, Partial<ModelCapabilities>>;
136
+ }
137
+ /**
138
+ * Default resolver options
139
+ */
140
+ declare const DEFAULT_RESOLVER_OPTIONS: Required<ResolverOptions>;
141
+
142
+ /**
143
+ * Provider-Specific Reasoning Option Builders
144
+ *
145
+ * Each function takes a {@link ReasoningLevel} and returns the
146
+ * provider-options object expected by the Vercel AI SDK's
147
+ * `providerOptions` parameter.
148
+ *
149
+ * Provider key mapping (`getProviderOptionsKey`) is also defined
150
+ * here so that callers can wrap the raw options correctly.
151
+ */
152
+
153
+ /**
154
+ * Build OpenAI reasoning options.
155
+ *
156
+ * OpenAI reasoning models use:
157
+ * - `reasoningEffort` — controls depth
158
+ * - `reasoningSummary` — controls whether reasoning is streamed
159
+ * (`"auto"` = condensed summary, `"detailed"` = full)
160
+ *
161
+ * `reasoningSummary` requires a verified OpenAI organisation.
162
+ * Set `CODE_AGENT_DISABLE_REASONING_SUMMARY=true` to skip it.
163
+ */
164
+ declare function buildOpenAIOptions(level: ReasoningLevel, compat?: ProviderCompatibility): Record<string, unknown> | undefined;
165
+ /**
166
+ * Build Anthropic thinking options.
167
+ *
168
+ * Uses `thinking.type: "enabled"` with `budgetTokens`.
169
+ */
170
+ declare function buildAnthropicOptions(level: ReasoningLevel): Record<string, unknown> | undefined;
171
+ /**
172
+ * Build Google / Gemini reasoning options.
173
+ *
174
+ * - **Gemini 3+** uses `thinkingLevel` (`"low"` / `"high"`)
175
+ * - **Gemini 2.5 and earlier** uses `thinkingBudget` (token count)
176
+ */
177
+ declare function buildGoogleOptions(level: ReasoningLevel, modelId?: string): Record<string, unknown> | undefined;
178
+ /**
179
+ * Build xAI / Grok reasoning options.
180
+ */
181
+ declare function buildXAIOptions(level: ReasoningLevel): Record<string, unknown> | undefined;
182
+ /**
183
+ * Build Groq reasoning options.
184
+ */
185
+ declare function buildGroqOptions(level: ReasoningLevel): Record<string, unknown> | undefined;
186
+ /**
187
+ * Build Amazon Bedrock reasoning options.
188
+ *
189
+ * - Anthropic / Claude models use `budgetTokens`
190
+ * - Amazon Nova models use `maxReasoningEffort`
191
+ */
192
+ declare function buildBedrockOptions(level: ReasoningLevel, modelId?: string): Record<string, unknown> | undefined;
193
+ /**
194
+ * Build OpenRouter reasoning options.
195
+ */
196
+ declare function buildOpenRouterOptions(level: ReasoningLevel): Record<string, unknown> | undefined;
197
+ /**
198
+ * Resolve the `providerOptions` key for a given SDK provider string.
199
+ *
200
+ * Falls back to the first dot-segment (e.g. `"foo.bar"` → `"foo"`).
201
+ */
202
+ declare function getProviderOptionsKey(provider: string): string;
203
+
204
+ /**
205
+ * Reasoning Configuration & Option Builders
206
+ *
207
+ * Orchestrates capability detection and provider-specific option
208
+ * building to produce ready-to-use `providerOptions` for the
209
+ * Vercel AI SDK.
210
+ *
211
+ * Two flavours of every function are provided:
212
+ * - **Async** (`getReasoningConfig`, `buildReasoningOptions`) —
213
+ * uses the full capability resolver (network + cache).
214
+ * - **Sync** (`getReasoningConfigSync`, `buildReasoningOptionsSync`) —
215
+ * uses fast pattern-matching only (no network).
216
+ */
217
+
218
+ /**
219
+ * Get the reasoning configuration for a model.
220
+ *
221
+ * Uses the full capability resolver (including network lookups)
222
+ * for the most accurate result.
223
+ */
224
+ declare function getReasoningConfig(model: LanguageModel): Promise<ReasoningConfig>;
225
+ /**
226
+ * Synchronous reasoning config using pattern-matching only.
227
+ *
228
+ * Faster but less accurate than {@link getReasoningConfig}.
229
+ * Good for hot paths where async is impractical.
230
+ */
231
+ declare function getReasoningConfigSync(model: LanguageModel): ReasoningConfig;
232
+ /**
233
+ * Build `providerOptions` for a reasoning level (async).
234
+ *
235
+ * Returns `undefined` when reasoning is off or unsupported.
236
+ */
237
+ declare function buildReasoningOptions(model: LanguageModel, level: ReasoningLevel): Promise<ProviderOptions | undefined>;
238
+ /**
239
+ * Build `providerOptions` for a reasoning level (sync / pattern-only).
240
+ */
241
+ declare function buildReasoningOptionsSync(model: LanguageModel, level: ReasoningLevel): ProviderOptions | undefined;
242
+ /**
243
+ * Check whether a model supports reasoning (async, full resolver).
244
+ */
245
+ declare function supportsReasoning(model: LanguageModel): Promise<boolean>;
246
+ /**
247
+ * Synchronous check using pattern-matching only.
248
+ */
249
+ declare function supportsReasoningSync(model: LanguageModel): boolean;
250
+
251
+ export { type CapabilitySource as C, DEFAULT_RESOLVER_OPTIONS as D, type InputModality as I, type ModelCapabilities as M, type OutputModality as O, type ProviderCompatibility as P, type ResolverOptions as R, SourcePriority as S, type ModelEntry as a, type SourceResult as b, buildAnthropicOptions as c, buildBedrockOptions as d, buildGoogleOptions as e, buildGroqOptions as f, buildOpenAIOptions as g, buildOpenRouterOptions as h, buildReasoningOptions as i, buildReasoningOptionsSync as j, buildXAIOptions as k, getProviderOptionsKey as l, getReasoningConfig as m, getReasoningConfigSync as n, supportsReasoningSync as o, supportsReasoning as s };