@cuylabs/agent-core 0.7.0 → 0.9.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (79) hide show
  1. package/dist/{builder-BRvqCcIk.d.ts → builder-BgZ_j4Vs.d.ts} +3 -2
  2. package/dist/chunk-4QFNWPIF.js +202 -0
  3. package/dist/chunk-5ARZJWD2.js +259 -0
  4. package/dist/chunk-DXFBQMXP.js +53 -0
  5. package/dist/chunk-EKR6PKXU.js +180 -0
  6. package/dist/{chunk-IVUJDISU.js → chunk-GFTW23FV.js} +5 -14
  7. package/dist/{chunk-IEFIQENH.js → chunk-H3FUYU52.js} +15 -7
  8. package/dist/chunk-I6PKJ7XQ.js +292 -0
  9. package/dist/chunk-IYWQOJMQ.js +102 -0
  10. package/dist/{chunk-3HNO5SVI.js → chunk-J4QDGZIA.js} +20 -4
  11. package/dist/{chunk-7MUFEN4K.js → chunk-JLXG2SH7.js} +349 -3
  12. package/dist/{chunk-CDTV2UYU.js → chunk-MAZ5DY5B.js} +64 -276
  13. package/dist/{chunk-P6YF7USR.js → chunk-MHKK374K.js} +12 -11
  14. package/dist/{chunk-VBWWUHWI.js → chunk-OFDKHNCX.js} +4 -1
  15. package/dist/{chunk-YUUJK53A.js → chunk-RKEW5WXI.js} +1 -1
  16. package/dist/{chunk-LRHOS4ZN.js → chunk-SPILYYDF.js} +3 -2
  17. package/dist/{chunk-QGOGIP7T.js → chunk-UDCZ673N.js} +385 -233
  18. package/dist/{chunk-BDBZ3SLK.js → chunk-UHCJEM2E.js} +39 -2
  19. package/dist/chunk-WGZAPU6N.js +929 -0
  20. package/dist/{chunk-5K7AQVOU.js → chunk-WKHDSSXG.js} +130 -209
  21. package/dist/{chunk-BNSHUWCV.js → chunk-WWYYNWEW.js} +1 -1
  22. package/dist/context/index.js +1 -1
  23. package/dist/events-CE72w8W4.d.ts +149 -0
  24. package/dist/index-BCqEGzBj.d.ts +251 -0
  25. package/dist/{index-C33hlD6H.d.ts → index-DQuTZ8xL.d.ts} +319 -56
  26. package/dist/index.d.ts +42 -121
  27. package/dist/index.js +951 -848
  28. package/dist/inference/errors/index.d.ts +11 -0
  29. package/dist/inference/errors/index.js +16 -0
  30. package/dist/inference/index.d.ts +12 -8
  31. package/dist/inference/index.js +35 -7
  32. package/dist/llm-error-D93FNNLY.d.ts +32 -0
  33. package/dist/middleware/index.d.ts +246 -7
  34. package/dist/middleware/index.js +3 -1
  35. package/dist/models/index.d.ts +132 -9
  36. package/dist/models/index.js +48 -8
  37. package/dist/models/reasoning/index.d.ts +4 -0
  38. package/dist/{reasoning → models/reasoning}/index.js +2 -7
  39. package/dist/plugin/index.d.ts +414 -0
  40. package/dist/plugin/index.js +32 -0
  41. package/dist/presets/index.d.ts +53 -0
  42. package/dist/presets/index.js +30 -0
  43. package/dist/prompt/index.d.ts +11 -8
  44. package/dist/prompt/index.js +3 -2
  45. package/dist/{registry-BDLIHOQB.d.ts → registry-DwYqsQkX.d.ts} +1 -1
  46. package/dist/runner-CI-XeR16.d.ts +91 -0
  47. package/dist/runtime/index.d.ts +12 -8
  48. package/dist/runtime/index.js +8 -7
  49. package/dist/safety/index.d.ts +38 -0
  50. package/dist/safety/index.js +12 -0
  51. package/dist/scope/index.d.ts +2 -2
  52. package/dist/{session-manager-B_CWGTsl.d.ts → session-manager-KbYt2WUh.d.ts} +8 -0
  53. package/dist/signal/index.d.ts +28 -0
  54. package/dist/signal/index.js +6 -0
  55. package/dist/skill/index.d.ts +7 -6
  56. package/dist/skill/index.js +3 -3
  57. package/dist/storage/index.d.ts +2 -2
  58. package/dist/storage/index.js +1 -1
  59. package/dist/sub-agent/index.d.ts +16 -10
  60. package/dist/sub-agent/index.js +21 -4
  61. package/dist/tool/index.d.ts +22 -6
  62. package/dist/tool/index.js +3 -3
  63. package/dist/tool-CZWN3KbO.d.ts +141 -0
  64. package/dist/{tool-HUtkiVBx.d.ts → tool-DkhSCV2Y.d.ts} +2 -2
  65. package/dist/tracking/index.d.ts +2 -2
  66. package/dist/tracking/index.js +1 -1
  67. package/dist/{tool-Db1Ue-1U.d.ts → types-BfNpU8NS.d.ts} +1 -150
  68. package/dist/{types-FRpzzg_9.d.ts → types-BlOKk-Bb.d.ts} +10 -35
  69. package/dist/types-BlZwmnuW.d.ts +50 -0
  70. package/dist/{types-9jGQUjqW.d.ts → types-CQL-SvTn.d.ts} +1 -1
  71. package/dist/types-CWm-7rvB.d.ts +55 -0
  72. package/dist/{runner-DSKaEz3z.d.ts → types-DTSkxakL.d.ts} +7 -235
  73. package/dist/{types-CqDZTh4d.d.ts → types-DmDwi2zI.d.ts} +8 -4
  74. package/dist/types-YuWV4ag7.d.ts +72 -0
  75. package/package.json +67 -6
  76. package/dist/capability-resolver-CgRGsWVX.d.ts +0 -254
  77. package/dist/chunk-ZPMACVZK.js +0 -305
  78. package/dist/index-CfBGYrpd.d.ts +0 -317
  79. package/dist/reasoning/index.d.ts +0 -117
@@ -1,22 +1,13 @@
1
1
  import {
2
2
  createSkillRegistry,
3
3
  emptySkillRegistry
4
- } from "./chunk-LRHOS4ZN.js";
4
+ } from "./chunk-SPILYYDF.js";
5
+ import {
6
+ extractModelId,
7
+ extractProvider
8
+ } from "./chunk-I6PKJ7XQ.js";
5
9
 
6
10
  // src/prompt/templates.ts
7
- function extractModelId(model) {
8
- if (typeof model === "string") return model;
9
- if (typeof model === "object" && model !== null && "modelId" in model) {
10
- return String(model.modelId);
11
- }
12
- return void 0;
13
- }
14
- function extractProvider(model) {
15
- if (typeof model === "object" && model !== null && "provider" in model) {
16
- return String(model.provider);
17
- }
18
- return void 0;
19
- }
20
11
  function detectModelFamily(model) {
21
12
  const provider = extractProvider(model);
22
13
  const modelId = extractModelId(model);
@@ -1,11 +1,11 @@
1
- import {
2
- snapshotScope,
3
- withinScope
4
- } from "./chunk-N7P4PN3O.js";
5
1
  import {
6
2
  extractFilePathsFromArgs,
7
3
  shouldCaptureBaseline
8
4
  } from "./chunk-VEKUXUVF.js";
5
+ import {
6
+ snapshotScope,
7
+ withinScope
8
+ } from "./chunk-N7P4PN3O.js";
9
9
 
10
10
  // src/tool/executor.ts
11
11
  async function executeAgentToolCall(options) {
@@ -21,7 +21,7 @@ async function executeAgentToolCall(options) {
21
21
  }
22
22
  },
23
23
  async () => {
24
- const initialized = await options.tool.init({ cwd: options.cwd });
24
+ const initialized = options.initialized ?? await options.tool.init({ cwd: options.cwd });
25
25
  const ctx = {
26
26
  cwd: options.cwd,
27
27
  abort: options.abort,
@@ -61,9 +61,17 @@ async function executeAgentToolCall(options) {
61
61
  result,
62
62
  ctx
63
63
  );
64
- return { output: transformed.output };
64
+ return {
65
+ output: transformed.output,
66
+ title: transformed.title,
67
+ metadata: transformed.metadata
68
+ };
65
69
  }
66
- return { output: result.output };
70
+ return {
71
+ output: result.output,
72
+ title: result.title,
73
+ metadata: result.metadata
74
+ };
67
75
  }
68
76
  );
69
77
  }
@@ -0,0 +1,292 @@
1
+ // src/models/types.ts
2
+ var SourcePriority = /* @__PURE__ */ ((SourcePriority2) => {
3
+ SourcePriority2[SourcePriority2["UserConfig"] = 0] = "UserConfig";
4
+ SourcePriority2[SourcePriority2["LocalCache"] = 1] = "LocalCache";
5
+ SourcePriority2[SourcePriority2["BundledData"] = 2] = "BundledData";
6
+ SourcePriority2[SourcePriority2["PatternMatch"] = 3] = "PatternMatch";
7
+ SourcePriority2[SourcePriority2["RemoteAPI"] = 4] = "RemoteAPI";
8
+ return SourcePriority2;
9
+ })(SourcePriority || {});
10
+ var DEFAULT_RESOLVER_OPTIONS = {
11
+ enableRemoteFetch: false,
12
+ remoteApiUrl: "https://models.dev",
13
+ cachePath: ".agent-core/cache",
14
+ cacheTtlMs: 60 * 60 * 1e3,
15
+ // 1 hour
16
+ networkTimeoutMs: 10 * 1e3,
17
+ // 10 seconds
18
+ modelOverrides: {}
19
+ };
20
+
21
+ // src/models/profiles.ts
22
+ var REASONING_PATTERNS = [
23
+ // OpenAI o-series
24
+ {
25
+ pattern: /^o[134]-?(mini|pro|preview)?$/i,
26
+ provider: "openai",
27
+ capabilities: { reasoning: true, toolCalling: true },
28
+ compatibility: { supportsReasoningEffort: true, thinkingFormat: "openai" },
29
+ confidence: 0.95
30
+ },
31
+ // OpenAI GPT-5.x
32
+ {
33
+ pattern: /gpt-?5(\.\d)?/i,
34
+ provider: "openai",
35
+ capabilities: { reasoning: true, toolCalling: true },
36
+ compatibility: { supportsReasoningEffort: true, thinkingFormat: "openai" },
37
+ confidence: 0.9
38
+ },
39
+ // DeepSeek R1 variants
40
+ {
41
+ pattern: /deepseek[_-]?r1|r1[_-]distill/i,
42
+ capabilities: { reasoning: true, toolCalling: false },
43
+ confidence: 0.95
44
+ },
45
+ // Anthropic Claude with thinking
46
+ {
47
+ pattern: /claude.*thinking|thinking.*claude/i,
48
+ provider: "anthropic",
49
+ capabilities: { reasoning: true, toolCalling: true },
50
+ compatibility: { thinkingFormat: "anthropic" },
51
+ confidence: 0.9
52
+ },
53
+ // Claude 4.x series (reasoning capable)
54
+ {
55
+ pattern: /claude[_-]?(opus|sonnet)[_-]?4/i,
56
+ provider: "anthropic",
57
+ capabilities: { reasoning: true, toolCalling: true },
58
+ compatibility: { thinkingFormat: "anthropic" },
59
+ confidence: 0.85
60
+ },
61
+ // Gemini thinking models
62
+ {
63
+ pattern: /gemini.*thinking|gemini[_-]?2\.5[_-]?pro/i,
64
+ provider: "google",
65
+ capabilities: { reasoning: true, toolCalling: true },
66
+ compatibility: { thinkingFormat: "google" },
67
+ confidence: 0.85
68
+ },
69
+ // Gemini 3.x (future-proofing)
70
+ {
71
+ pattern: /gemini[_-]?3/i,
72
+ provider: "google",
73
+ capabilities: { reasoning: true, toolCalling: true },
74
+ compatibility: { thinkingFormat: "google" },
75
+ confidence: 0.8
76
+ },
77
+ // Grok reasoning models
78
+ {
79
+ pattern: /grok[_-]?\d[_-]?(mini|reasoning)/i,
80
+ provider: "xai",
81
+ capabilities: { reasoning: true, toolCalling: true },
82
+ confidence: 0.85
83
+ },
84
+ // Qwen thinking models
85
+ {
86
+ pattern: /qwen.*thinking|qwen3/i,
87
+ capabilities: { reasoning: true, toolCalling: true },
88
+ confidence: 0.8
89
+ },
90
+ // Generic reasoning/thinking in name
91
+ {
92
+ pattern: /reasoning|thinking/i,
93
+ capabilities: { reasoning: true },
94
+ confidence: 0.7
95
+ }
96
+ ];
97
+ var PROVIDER_PATTERNS = [
98
+ { pattern: /^(gpt|o[134]|chatgpt|davinci)/i, provider: "openai" },
99
+ { pattern: /^claude/i, provider: "anthropic" },
100
+ { pattern: /^gemini|^palm/i, provider: "google" },
101
+ { pattern: /^grok/i, provider: "xai" },
102
+ { pattern: /^deepseek/i, provider: "deepseek" },
103
+ { pattern: /^mistral|^mixtral|codestral/i, provider: "mistral" },
104
+ { pattern: /^llama/i, provider: "meta" },
105
+ { pattern: /^qwen/i, provider: "alibaba" },
106
+ { pattern: /^command/i, provider: "cohere" }
107
+ ];
108
+ var CONTEXT_WINDOW_PROFILES = [
109
+ // Anthropic Claude 4.x — 200k
110
+ { pattern: /claude[_-]?(opus|sonnet)[_-]?4/i, tokens: 2e5 },
111
+ // Anthropic Claude 3.5 — 200k
112
+ { pattern: /claude[_-]?3[._-]?5/i, tokens: 2e5 },
113
+ // Anthropic Claude 3 Opus/Sonnet/Haiku — 200k
114
+ { pattern: /claude[_-]?3/i, tokens: 2e5 },
115
+ // Anthropic Claude 2 — 100k
116
+ { pattern: /claude[_-]?2/i, tokens: 1e5 },
117
+ // OpenAI o-series (o1, o3, o4) — 200k
118
+ { pattern: /^o[134]-?(mini|pro|preview)?$/i, tokens: 2e5 },
119
+ // OpenAI GPT-5.x — 1M
120
+ { pattern: /gpt-?5/i, tokens: 1e6 },
121
+ // OpenAI GPT-4o — 128k
122
+ { pattern: /gpt-?4o/i, tokens: 128e3 },
123
+ // OpenAI GPT-4 turbo — 128k
124
+ { pattern: /gpt-?4[_-]?turbo/i, tokens: 128e3 },
125
+ // OpenAI GPT-4 — 8k (original)
126
+ { pattern: /gpt-?4(?!o|[_-]?turbo)/i, tokens: 8192 },
127
+ // OpenAI GPT-3.5 turbo — 16k
128
+ { pattern: /gpt-?3[._-]?5/i, tokens: 16384 },
129
+ // Google Gemini 2.5 Pro — 1M
130
+ { pattern: /gemini[_-]?2[._-]?5[_-]?pro/i, tokens: 1e6 },
131
+ // Google Gemini 2.x Flash — 1M
132
+ { pattern: /gemini[_-]?2.*flash/i, tokens: 1e6 },
133
+ // Google Gemini 2.x Pro — 1M
134
+ { pattern: /gemini[_-]?2[._-]?\d?[_-]?pro/i, tokens: 1e6 },
135
+ // Google Gemini 1.5 Pro/Flash — 1M
136
+ { pattern: /gemini[_-]?1[._-]?5/i, tokens: 1e6 },
137
+ // xAI Grok 3+ — 128k
138
+ { pattern: /grok[_-]?\d/i, tokens: 128e3 },
139
+ // Mistral Large — 128k
140
+ { pattern: /mistral[_-]?large/i, tokens: 128e3 },
141
+ // Mistral Medium — 32k
142
+ { pattern: /mistral[_-]?medium/i, tokens: 32768 },
143
+ // Codestral — 256k
144
+ { pattern: /codestral/i, tokens: 256e3 },
145
+ // Mistral generic — 32k
146
+ { pattern: /mistral|mixtral/i, tokens: 32768 },
147
+ // DeepSeek R1/V3 — 128k
148
+ { pattern: /deepseek/i, tokens: 128e3 },
149
+ // Qwen 3 — 128k
150
+ { pattern: /qwen/i, tokens: 128e3 },
151
+ // Meta Llama 3 — 128k
152
+ { pattern: /llama[_-]?3/i, tokens: 128e3 },
153
+ // Cohere Command R+ — 128k
154
+ { pattern: /command[_-]?r/i, tokens: 128e3 }
155
+ ];
156
+ function inferContextWindow(modelId) {
157
+ const normalized = modelId.toLowerCase();
158
+ for (const { pattern, tokens } of CONTEXT_WINDOW_PROFILES) {
159
+ if (pattern.test(normalized)) {
160
+ return tokens;
161
+ }
162
+ }
163
+ return void 0;
164
+ }
165
+ function inferProvider(modelId) {
166
+ const normalized = modelId.toLowerCase();
167
+ for (const { pattern, provider } of PROVIDER_PATTERNS) {
168
+ if (pattern.test(normalized)) {
169
+ return provider;
170
+ }
171
+ }
172
+ if (modelId.includes("/")) {
173
+ return modelId.split("/")[0];
174
+ }
175
+ return void 0;
176
+ }
177
+ function matchPatterns(modelId, providerHint) {
178
+ const normalized = modelId.toLowerCase();
179
+ for (const rule of REASONING_PATTERNS) {
180
+ if (rule.provider && providerHint && rule.provider !== providerHint) {
181
+ continue;
182
+ }
183
+ const matches = typeof rule.pattern === "string" ? normalized.includes(rule.pattern.toLowerCase()) : rule.pattern.test(normalized);
184
+ if (matches) {
185
+ return { rule, confidence: rule.confidence };
186
+ }
187
+ }
188
+ return void 0;
189
+ }
190
+ function createDefaultCapabilities() {
191
+ return {
192
+ reasoning: false,
193
+ toolCalling: true,
194
+ temperature: true,
195
+ attachments: false,
196
+ streaming: true,
197
+ inputModalities: ["text"],
198
+ outputModalities: ["text"]
199
+ };
200
+ }
201
+ var PatternCapabilitySource = class {
202
+ priority = 3 /* PatternMatch */;
203
+ name = "Pattern Matching";
204
+ async lookup(modelId, providerHint) {
205
+ const provider = providerHint || inferProvider(modelId);
206
+ const match = matchPatterns(modelId, provider);
207
+ const baseCapabilities = createDefaultCapabilities();
208
+ if (match) {
209
+ const entry = {
210
+ id: modelId,
211
+ name: modelId,
212
+ provider: match.rule.provider || provider || "unknown",
213
+ capabilities: {
214
+ ...baseCapabilities,
215
+ ...match.rule.capabilities,
216
+ contextWindow: inferContextWindow(modelId)
217
+ },
218
+ compatibility: match.rule.compatibility
219
+ };
220
+ return {
221
+ entry,
222
+ source: this.priority,
223
+ confident: match.confidence > 0.8
224
+ };
225
+ }
226
+ return {
227
+ entry: {
228
+ id: modelId,
229
+ name: modelId,
230
+ provider: provider || "unknown",
231
+ capabilities: {
232
+ ...baseCapabilities,
233
+ contextWindow: inferContextWindow(modelId)
234
+ }
235
+ },
236
+ source: this.priority,
237
+ confident: false
238
+ };
239
+ }
240
+ async isAvailable() {
241
+ return true;
242
+ }
243
+ };
244
+ function likelySupportsReasoning(modelId) {
245
+ const match = matchPatterns(modelId);
246
+ return match !== void 0 && match.rule.capabilities.reasoning === true;
247
+ }
248
+ function getProviderCompatibility(modelId, provider) {
249
+ const match = matchPatterns(modelId, provider);
250
+ return match?.rule.compatibility;
251
+ }
252
+
253
+ // src/models/identifiers.ts
254
+ function getModelId(model) {
255
+ if (typeof model === "string") return model;
256
+ if (typeof model === "object" && model !== null && "modelId" in model) {
257
+ return String(model.modelId);
258
+ }
259
+ return String(model);
260
+ }
261
+ function getProviderId(model) {
262
+ if (typeof model === "string") {
263
+ if (model.includes("/")) {
264
+ return model.split("/")[0];
265
+ }
266
+ return void 0;
267
+ }
268
+ if (typeof model === "object" && model !== null && "provider" in model) {
269
+ const provider = String(model.provider);
270
+ return provider.split(".")[0];
271
+ }
272
+ return void 0;
273
+ }
274
+ var extractModelId = getModelId;
275
+ function extractProvider(model) {
276
+ const provider = getProviderId(model);
277
+ return provider ?? inferProvider(getModelId(model));
278
+ }
279
+
280
+ export {
281
+ SourcePriority,
282
+ DEFAULT_RESOLVER_OPTIONS,
283
+ inferContextWindow,
284
+ inferProvider,
285
+ PatternCapabilitySource,
286
+ likelySupportsReasoning,
287
+ getProviderCompatibility,
288
+ getModelId,
289
+ getProviderId,
290
+ extractModelId,
291
+ extractProvider
292
+ };
@@ -0,0 +1,102 @@
1
+ // src/presets/patterns.ts
2
+ function globToRegex(pattern) {
3
+ const escaped = pattern.replace(/[.+^${}()|[\]\\]/g, "\\$&").replace(/\*/g, ".*").replace(/\?/g, ".");
4
+ return new RegExp(`^${escaped}$`, "i");
5
+ }
6
+ function matchesPatterns(id, patterns) {
7
+ return patterns.some((pattern) => globToRegex(pattern).test(id));
8
+ }
9
+ function filterTools(tools, options) {
10
+ const allowPatterns = options.allow ?? [];
11
+ const denyPatterns = options.deny ?? [];
12
+ return tools.filter((tool) => {
13
+ const matchesAllow = allowPatterns.length === 0 || matchesPatterns(tool.id, allowPatterns);
14
+ if (!matchesAllow) {
15
+ return false;
16
+ }
17
+ const matchesDeny = denyPatterns.length > 0 && matchesPatterns(tool.id, denyPatterns);
18
+ if (matchesDeny && allowPatterns.length === 0) {
19
+ return false;
20
+ }
21
+ return true;
22
+ });
23
+ }
24
+
25
+ // src/presets/apply.ts
26
+ function applyPreset(preset, availableTools, baseSystemPrompt) {
27
+ const tools = filterTools(availableTools, {
28
+ allow: preset.allowTools,
29
+ deny: preset.denyTools
30
+ });
31
+ let systemPrompt = preset.systemPrompt;
32
+ if (systemPrompt && baseSystemPrompt) {
33
+ systemPrompt = systemPrompt.replace("{basePrompt}", baseSystemPrompt);
34
+ }
35
+ return {
36
+ name: preset.name,
37
+ systemPrompt,
38
+ tools: tools.length > 0 ? tools : void 0,
39
+ temperature: preset.temperature,
40
+ maxSteps: preset.maxSteps,
41
+ reasoningLevel: preset.reasoningLevel,
42
+ model: preset.model
43
+ };
44
+ }
45
+ function mergePresets(...presets) {
46
+ if (presets.length === 0) {
47
+ throw new Error("mergePresets requires at least one preset");
48
+ }
49
+ if (presets.length === 1) {
50
+ return presets[0];
51
+ }
52
+ const [first, ...rest] = presets;
53
+ const allAllow = [];
54
+ const allDeny = [];
55
+ for (const preset of presets) {
56
+ if (preset.allowTools?.length) {
57
+ allAllow.push(preset.allowTools);
58
+ }
59
+ if (preset.denyTools?.length) {
60
+ allDeny.push(...preset.denyTools);
61
+ }
62
+ }
63
+ const combinedAllow = allAllow.length > 0 ? allAllow.reduce(
64
+ (left, right) => left.length <= right.length ? left : right
65
+ ) : void 0;
66
+ return {
67
+ name: presets.map((preset) => preset.name).join("+"),
68
+ description: presets.map((preset) => preset.description).join(" | "),
69
+ allowTools: combinedAllow,
70
+ denyTools: allDeny.length > 0 ? [...new Set(allDeny)] : void 0,
71
+ systemPrompt: rest.reduce(
72
+ (value, preset) => preset.systemPrompt ?? value,
73
+ first.systemPrompt
74
+ ),
75
+ temperature: rest.reduce(
76
+ (value, preset) => preset.temperature ?? value,
77
+ first.temperature
78
+ ),
79
+ maxSteps: rest.reduce(
80
+ (value, preset) => preset.maxSteps ?? value,
81
+ first.maxSteps
82
+ ),
83
+ reasoningLevel: rest.reduce(
84
+ (value, preset) => preset.reasoningLevel ?? value,
85
+ first.reasoningLevel
86
+ ),
87
+ model: rest.reduce((value, preset) => preset.model ?? value, first.model)
88
+ };
89
+ }
90
+ function createPreset(options) {
91
+ return {
92
+ description: options.description ?? `Custom preset: ${options.name}`,
93
+ ...options
94
+ };
95
+ }
96
+
97
+ export {
98
+ filterTools,
99
+ applyPreset,
100
+ mergePresets,
101
+ createPreset
102
+ };
@@ -1,18 +1,25 @@
1
1
  import {
2
2
  DEFAULT_CONTEXT_LIMITS
3
- } from "./chunk-BNSHUWCV.js";
3
+ } from "./chunk-WWYYNWEW.js";
4
4
  import {
5
5
  Inference
6
- } from "./chunk-5K7AQVOU.js";
6
+ } from "./chunk-WKHDSSXG.js";
7
7
  import {
8
8
  executeAgentToolCall
9
- } from "./chunk-IEFIQENH.js";
9
+ } from "./chunk-H3FUYU52.js";
10
+ import {
11
+ LLMError
12
+ } from "./chunk-4QFNWPIF.js";
10
13
  import {
11
14
  currentScope,
12
15
  snapshotScope,
13
16
  streamWithinScope,
14
17
  withinScope
15
18
  } from "./chunk-N7P4PN3O.js";
19
+ import {
20
+ extractModelId,
21
+ extractProvider
22
+ } from "./chunk-I6PKJ7XQ.js";
16
23
 
17
24
  // src/runtime/task/observer.ts
18
25
  function defaultAgentTaskCheckpointStrategy(input) {
@@ -763,6 +770,7 @@ async function handleContextOverflow(options) {
763
770
  // src/runtime/step-processing/process.ts
764
771
  async function processStepStream(stream, options) {
765
772
  const { abort, onEvent } = options;
773
+ const normalizeError = options.normalizeError ?? ((error2) => error2 instanceof Error ? error2 : new Error(String(error2)));
766
774
  const doomLoopThreshold = options.doomLoopThreshold ?? DEFAULT_DOOM_LOOP_THRESHOLD;
767
775
  const maxSteps = options.maxSteps ?? 50;
768
776
  let stepCount = options.currentStep ?? 1;
@@ -914,7 +922,7 @@ async function processStepStream(stream, options) {
914
922
  }
915
923
  }
916
924
  } catch (caught) {
917
- error = caught instanceof Error ? caught : new Error(String(caught));
925
+ error = normalizeError(caught);
918
926
  await onEvent({ type: "status", status: "error" });
919
927
  await onEvent({ type: "error", error });
920
928
  }
@@ -950,6 +958,13 @@ function buildModelCallContext(options) {
950
958
  scope: snapshotScope()
951
959
  };
952
960
  }
961
+ function normalizeModelStepError(options, error) {
962
+ const model = options.preparedStep.inferenceInput.model;
963
+ return LLMError.from(error, {
964
+ provider: extractProvider(model),
965
+ model: extractModelId(model)
966
+ });
967
+ }
953
968
  async function* runModelStep(options) {
954
969
  return yield* streamWithinScope(
955
970
  {
@@ -983,6 +998,7 @@ async function* runModelStep(options) {
983
998
  const processPromise = processStepStream(stream, {
984
999
  sessionID: preparedStep.inferenceInput.sessionID,
985
1000
  abort: preparedStep.inferenceInput.abort,
1001
+ normalizeError: (error) => normalizeModelStepError(options, error),
986
1002
  currentStep: preparedStep.step,
987
1003
  maxSteps: preparedStep.stepProcessing.maxSteps,
988
1004
  doomLoopThreshold: preparedStep.stepProcessing.doomLoopThreshold ?? 3,