@perstack/runtime 0.0.70 → 0.0.71

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,3 +1,8 @@
1
+ import { getSkillManagersFromLockfile, getSkillManagers, closeSkillManagers, getSkillManagerByToolName, getToolSet } from './chunk-RG4QHAGG.js';
2
+ import { readFileSync } from 'fs';
3
+ import path from 'path';
4
+ import { parseWithFriendlyError, lockfileSchema, runParamsSchema, createRuntimeEvent, knownModels, stopRunByExceededMaxSteps, continueToNextStep, stopRunByDelegate, stopRunByInteractiveTool, stopRunByError, retry, completeRun, finishToolCall, resolveToolResults, attemptCompletion, callDelegate, callInteractiveTool, callTools, resumeToolCalls, finishAllToolCalls, startGeneration, startRun } from '@perstack/core';
5
+ import TOML from 'smol-toml';
1
6
  import { createAmazonBedrock } from '@ai-sdk/amazon-bedrock';
2
7
  import { createAnthropic } from '@ai-sdk/anthropic';
3
8
  import { createAzure } from '@ai-sdk/azure';
@@ -5,24 +10,19 @@ import { createDeepSeek } from '@ai-sdk/deepseek';
5
10
  import { createGoogleGenerativeAI } from '@ai-sdk/google';
6
11
  import { createVertex } from '@ai-sdk/google-vertex';
7
12
  import { createOpenAI } from '@ai-sdk/openai';
8
- import { runParamsSchema, createRuntimeEvent, knownModels, getFilteredEnv, stopRunByExceededMaxSteps, continueToNextStep, stopRunByDelegate, stopRunByInteractiveTool, retry, completeRun, finishToolCall, resolveToolResults, attemptCompletion, callDelegate, callInteractiveTool, callTools, resumeToolCalls, finishAllToolCalls, startGeneration, startRun } from '@perstack/core';
9
13
  import { createOllama } from 'ollama-ai-provider-v2';
10
14
  import { ProxyAgent, fetch } from 'undici';
15
+ import { ApiV1Client } from '@perstack/api-client/v1';
11
16
  import { setup, assign, createActor } from 'xstate';
12
- import { generateText, tool, jsonSchema } from 'ai';
13
- import { Client } from '@modelcontextprotocol/sdk/client/index.js';
14
- import { McpError } from '@modelcontextprotocol/sdk/types.js';
15
- import { createId } from '@paralleldrive/cuid2';
16
- import { SSEClientTransport } from '@modelcontextprotocol/sdk/client/sse.js';
17
- import { StdioClientTransport } from '@modelcontextprotocol/sdk/client/stdio.js';
18
17
  import { readFile } from 'fs/promises';
18
+ import { createId } from '@paralleldrive/cuid2';
19
19
  import { dedent } from 'ts-dedent';
20
- import { ApiV1Client } from '@perstack/api-client/v1';
20
+ import { generateText, streamText } from 'ai';
21
21
 
22
22
  // package.json
23
23
  var package_default = {
24
24
  name: "@perstack/runtime",
25
- version: "0.0.70",
25
+ version: "0.0.71",
26
26
  description: "Perstack Runtime",
27
27
  author: "Wintermute Technologies, Inc.",
28
28
  license: "Apache-2.0",
@@ -54,21 +54,31 @@ var package_default = {
54
54
  typecheck: "tsc --noEmit"
55
55
  },
56
56
  dependencies: {
57
- "@ai-sdk/amazon-bedrock": "^3.0.71",
58
- "@ai-sdk/anthropic": "^2.0.56",
59
- "@ai-sdk/azure": "^2.0.90",
60
- "@ai-sdk/deepseek": "^1.0.32",
61
- "@ai-sdk/google": "^2.0.49",
62
- "@ai-sdk/google-vertex": "^3.0.94",
63
- "@ai-sdk/openai": "^2.0.88",
57
+ "@ai-sdk/amazon-bedrock": "^3.0.0",
58
+ "@ai-sdk/anthropic": "^2.0.0",
59
+ "@ai-sdk/azure": "^2.0.0",
60
+ "@ai-sdk/deepseek": "^1.0.0",
61
+ "@ai-sdk/google": "^2.0.0",
62
+ "@ai-sdk/google-vertex": "^3.0.0",
63
+ "@ai-sdk/openai": "^2.0.0",
64
64
  "@modelcontextprotocol/sdk": "^1.25.1",
65
65
  "@paralleldrive/cuid2": "^3.0.4",
66
+ "@perstack/anthropic-provider": "workspace:*",
66
67
  "@perstack/api-client": "workspace:*",
68
+ "@perstack/base": "workspace:*",
69
+ "@perstack/azure-openai-provider": "workspace:*",
70
+ "@perstack/bedrock-provider": "workspace:*",
67
71
  "@perstack/core": "workspace:*",
72
+ "@perstack/deepseek-provider": "workspace:*",
73
+ "@perstack/google-provider": "workspace:*",
74
+ "@perstack/ollama-provider": "workspace:*",
75
+ "@perstack/openai-provider": "workspace:*",
76
+ "@perstack/provider-core": "workspace:*",
77
+ "@perstack/vertex-provider": "workspace:*",
68
78
  ai: "^5.0.115",
79
+ "ollama-ai-provider-v2": "^1.5.5",
69
80
  commander: "^14.0.2",
70
81
  dotenv: "^17.2.3",
71
- "ollama-ai-provider-v2": "^1.5.5",
72
82
  "smol-toml": "^1.5.2",
73
83
  "ts-dedent": "^2.2.0",
74
84
  undici: "^7.16.0",
@@ -86,6 +96,56 @@ var package_default = {
86
96
  node: ">=22.0.0"
87
97
  }
88
98
  };
99
+ function loadLockfile(lockfilePath) {
100
+ try {
101
+ const content = readFileSync(lockfilePath, "utf-8");
102
+ const parsed = TOML.parse(content);
103
+ return parseWithFriendlyError(lockfileSchema, parsed, "perstack.lock");
104
+ } catch {
105
+ return null;
106
+ }
107
+ }
108
+ function isRemoteUrl(configPath) {
109
+ const lower = configPath.toLowerCase();
110
+ return lower.startsWith("https://") || lower.startsWith("http://");
111
+ }
112
+ function findLockfile(configPath) {
113
+ if (configPath) {
114
+ if (isRemoteUrl(configPath)) {
115
+ return null;
116
+ }
117
+ const configDir = path.dirname(path.resolve(process.cwd(), configPath));
118
+ return path.join(configDir, "perstack.lock");
119
+ }
120
+ return findLockfileRecursively(process.cwd());
121
+ }
122
+ function findLockfileRecursively(cwd) {
123
+ const lockfilePath = path.resolve(cwd, "perstack.lock");
124
+ try {
125
+ readFileSync(lockfilePath);
126
+ return lockfilePath;
127
+ } catch {
128
+ if (cwd === path.parse(cwd).root) {
129
+ return null;
130
+ }
131
+ return findLockfileRecursively(path.dirname(cwd));
132
+ }
133
+ }
134
+ function getLockfileExpertToolDefinitions(lockfileExpert) {
135
+ const result = {};
136
+ for (const toolDef of lockfileExpert.toolDefinitions) {
137
+ if (!result[toolDef.skillName]) {
138
+ result[toolDef.skillName] = [];
139
+ }
140
+ result[toolDef.skillName].push({
141
+ skillName: toolDef.skillName,
142
+ name: toolDef.name,
143
+ description: toolDef.description,
144
+ inputSchema: toolDef.inputSchema
145
+ });
146
+ }
147
+ return result;
148
+ }
89
149
  function createProxyFetch(proxyUrl) {
90
150
  const agent = new ProxyAgent(proxyUrl);
91
151
  return (input, init) => {
@@ -217,523 +277,133 @@ function sumUsage(a, b) {
217
277
  };
218
278
  }
219
279
 
220
- // src/skill-manager/base.ts
221
- var BaseSkillManager = class {
222
- _toolDefinitions = [];
223
- _initialized = false;
224
- _initializing;
225
- skill;
226
- interactiveSkill;
227
- expert;
228
- _jobId;
229
- _runId;
230
- _eventListener;
231
- constructor(jobId, runId, eventListener) {
232
- this._jobId = jobId;
233
- this._runId = runId;
234
- this._eventListener = eventListener;
235
- }
236
- async init() {
237
- if (this._initialized) {
238
- throw new Error(`Skill ${this.name} is already initialized`);
239
- }
240
- if (this._initializing) {
241
- throw new Error(`Skill ${this.name} is already initializing`);
242
- }
243
- const initPromise = this._performInit();
244
- this._initializing = initPromise;
245
- if (!this.lazyInit) {
246
- try {
247
- await initPromise;
248
- } catch (error) {
249
- this._initialized = false;
250
- this._initializing = void 0;
251
- throw error;
252
- }
253
- }
254
- }
255
- isInitialized() {
256
- return this._initialized;
257
- }
258
- async _performInit() {
259
- await this._doInit();
260
- this._initialized = true;
261
- this._initializing = void 0;
262
- }
263
- async getToolDefinitions() {
264
- if (!this.isInitialized() && this._initializing) {
265
- await this._initializing;
266
- }
267
- if (!this.isInitialized()) {
268
- throw new Error(`Skill ${this.name} is not initialized`);
269
- }
270
- return this._filterTools(this._toolDefinitions);
271
- }
272
- _filterTools(tools) {
273
- return tools;
274
- }
275
- };
276
-
277
- // src/skill-manager/command-args.ts
278
- function getCommandArgs(skill) {
279
- const { name, command, packageName, args } = skill;
280
- if (!packageName && (!args || args.length === 0)) {
281
- throw new Error(`Skill ${name} has no packageName or args. Please provide one of them.`);
282
- }
283
- if (packageName && args && args.length > 0) {
284
- throw new Error(`Skill ${name} has both packageName and args. Please provide only one of them.`);
285
- }
286
- let newArgs = args && args.length > 0 ? args : [packageName];
287
- if (command === "npx" && !newArgs.includes("-y")) {
288
- newArgs = ["-y", ...newArgs];
289
- }
290
- return { command, args: newArgs };
280
+ // src/helpers/checkpoint.ts
281
+ function createInitialCheckpoint(checkpointId, params) {
282
+ return {
283
+ id: checkpointId,
284
+ jobId: params.jobId,
285
+ runId: params.runId,
286
+ expert: {
287
+ key: params.expertKey,
288
+ name: params.expert.name,
289
+ version: params.expert.version
290
+ },
291
+ stepNumber: 1,
292
+ status: "init",
293
+ messages: [],
294
+ usage: createEmptyUsage(),
295
+ contextWindow: params.contextWindow,
296
+ contextWindowUsage: params.contextWindow ? 0 : void 0
297
+ };
291
298
  }
292
-
293
- // src/skill-manager/delegate.ts
294
- var DelegateSkillManager = class extends BaseSkillManager {
295
- name;
296
- type = "delegate";
297
- lazyInit = false;
298
- expert;
299
- constructor(expert, jobId, runId, eventListener) {
300
- super(jobId, runId, eventListener);
301
- this.name = expert.name;
302
- this.expert = expert;
303
- }
304
- async _doInit() {
305
- this._toolDefinitions = [
306
- {
307
- skillName: this.expert.name,
308
- name: this.expert.name.split("/").pop() ?? this.expert.name,
309
- description: this.expert.description,
310
- inputSchema: {
311
- type: "object",
312
- properties: {
313
- query: { type: "string" }
314
- },
315
- required: ["query"]
316
- },
317
- interactive: false
318
- }
319
- ];
320
- }
321
- async close() {
322
- }
323
- async callTool(_toolName, _input) {
324
- return [];
325
- }
326
- };
327
-
328
- // src/skill-manager/interactive.ts
329
- var InteractiveSkillManager = class extends BaseSkillManager {
330
- name;
331
- type = "interactive";
332
- lazyInit = false;
333
- interactiveSkill;
334
- constructor(interactiveSkill, jobId, runId, eventListener) {
335
- super(jobId, runId, eventListener);
336
- this.name = interactiveSkill.name;
337
- this.interactiveSkill = interactiveSkill;
338
- }
339
- async _doInit() {
340
- this._toolDefinitions = Object.values(this.interactiveSkill.tools).map((tool2) => ({
341
- skillName: this.interactiveSkill.name,
342
- name: tool2.name,
343
- description: tool2.description,
344
- inputSchema: JSON.parse(tool2.inputJsonSchema),
345
- interactive: true
346
- }));
347
- }
348
- async close() {
349
- }
350
- async callTool(_toolName, _input) {
351
- return [];
352
- }
353
- };
354
-
355
- // src/skill-manager/ip-validator.ts
356
- function isPrivateOrLocalIP(hostname) {
357
- if (hostname === "localhost" || hostname === "127.0.0.1" || hostname === "::1" || hostname === "0.0.0.0") {
358
- return true;
359
- }
360
- const ipv4Match = hostname.match(/^(\d+)\.(\d+)\.(\d+)\.(\d+)$/);
361
- if (ipv4Match) {
362
- const [, a, b] = ipv4Match.map(Number);
363
- if (a === 10) return true;
364
- if (a === 172 && b >= 16 && b <= 31) return true;
365
- if (a === 192 && b === 168) return true;
366
- if (a === 169 && b === 254) return true;
367
- if (a === 127) return true;
368
- }
369
- if (hostname.includes(":")) {
370
- if (hostname.startsWith("fe80:")) return true;
371
- if (hostname.startsWith("fc") || hostname.startsWith("fd")) return true;
372
- }
373
- if (hostname.startsWith("::ffff:")) {
374
- const ipv4Part = hostname.slice(7);
375
- if (isPrivateOrLocalIP(ipv4Part)) {
376
- return true;
377
- }
378
- }
379
- return false;
299
+ function createNextStepCheckpoint(checkpointId, checkpoint) {
300
+ return {
301
+ ...checkpoint,
302
+ id: checkpointId,
303
+ stepNumber: checkpoint.stepNumber + 1
304
+ };
380
305
  }
381
- function handleToolError(error, toolName, McpErrorClass) {
382
- if (error instanceof McpErrorClass) {
383
- return [
384
- {
385
- type: "textPart",
386
- text: `Error calling tool ${toolName}: ${error.message}`,
387
- id: createId()
388
- }
389
- ];
306
+ function buildDelegationReturnState(currentSetting, resultCheckpoint, parentCheckpoint) {
307
+ const { messages, delegatedBy } = resultCheckpoint;
308
+ if (!delegatedBy) {
309
+ throw new Error("delegatedBy is required for buildDelegationReturnState");
390
310
  }
391
- throw error;
392
- }
393
- function convertToolResult(result, toolName, input) {
394
- if (!result.content || result.content.length === 0) {
395
- return [
396
- {
397
- type: "textPart",
398
- text: `Tool ${toolName} returned nothing with arguments: ${JSON.stringify(input)}`,
399
- id: createId()
400
- }
401
- ];
311
+ const delegateResultMessage = messages[messages.length - 1];
312
+ if (!delegateResultMessage || delegateResultMessage.type !== "expertMessage") {
313
+ throw new Error("Delegation error: delegation result message is incorrect");
402
314
  }
403
- return result.content.filter((part) => part.type !== "audio" && part.type !== "resource_link").map((part) => convertPart(part));
404
- }
405
- function convertPart(part) {
406
- switch (part.type) {
407
- case "text":
408
- if (!part.text || part.text === "") {
409
- return { type: "textPart", text: "Error: No content", id: createId() };
410
- }
411
- return { type: "textPart", text: part.text, id: createId() };
412
- case "image":
413
- if (!part.data || !part.mimeType) {
414
- throw new Error("Image part must have both data and mimeType");
415
- }
416
- return {
417
- type: "imageInlinePart",
418
- encodedData: part.data,
419
- mimeType: part.mimeType,
420
- id: createId()
421
- };
422
- case "resource":
423
- if (!part.resource) {
424
- throw new Error("Resource part must have resource content");
425
- }
426
- return convertResource(part.resource);
315
+ const delegateText = delegateResultMessage.contents.find((content) => content.type === "textPart");
316
+ if (!delegateText) {
317
+ console.warn(
318
+ `Delegation result from ${resultCheckpoint.expert.key} has no text content. Parent expert ${delegatedBy.expert.key} will receive empty string.`
319
+ );
427
320
  }
321
+ const { expert, toolCallId, toolName } = delegatedBy;
322
+ return {
323
+ setting: {
324
+ ...currentSetting,
325
+ expertKey: expert.key,
326
+ input: {
327
+ interactiveToolCallResult: {
328
+ toolCallId,
329
+ toolName,
330
+ skillName: `delegate/${resultCheckpoint.expert.key}`,
331
+ text: delegateText?.text ?? ""
332
+ }
333
+ }
334
+ },
335
+ checkpoint: {
336
+ ...parentCheckpoint,
337
+ stepNumber: resultCheckpoint.stepNumber,
338
+ usage: resultCheckpoint.usage,
339
+ pendingToolCalls: parentCheckpoint.pendingToolCalls,
340
+ partialToolResults: parentCheckpoint.partialToolResults
341
+ }
342
+ };
428
343
  }
429
- function convertResource(resource) {
430
- if (!resource.mimeType) {
431
- throw new Error(`Resource ${JSON.stringify(resource)} has no mimeType`);
432
- }
433
- if (resource.text && typeof resource.text === "string") {
434
- return { type: "textPart", text: resource.text, id: createId() };
435
- }
436
- if (resource.blob && typeof resource.blob === "string") {
437
- return {
438
- type: "fileInlinePart",
439
- encodedData: resource.blob,
440
- mimeType: resource.mimeType,
441
- id: createId()
442
- };
344
+ async function resolveExpertToRun(expertKey, experts, clientOptions) {
345
+ if (experts[expertKey]) {
346
+ return experts[expertKey];
443
347
  }
444
- throw new Error(`Unsupported resource type: ${JSON.stringify(resource)}`);
348
+ const client = new ApiV1Client({
349
+ baseUrl: clientOptions.perstackApiBaseUrl,
350
+ apiKey: clientOptions.perstackApiKey
351
+ });
352
+ const { expert } = await client.registry.experts.get({ expertKey });
353
+ return toRuntimeExpert(expert);
445
354
  }
446
- var DefaultTransportFactory = class {
447
- createStdio(options) {
448
- return new StdioClientTransport({
449
- command: options.command,
450
- args: options.args,
451
- env: options.env,
452
- stderr: options.stderr
453
- });
454
- }
455
- createSse(options) {
456
- return new SSEClientTransport(options.url);
457
- }
458
- };
459
- var defaultTransportFactory = new DefaultTransportFactory();
460
-
461
- // src/skill-manager/mcp.ts
462
- var McpSkillManager = class extends BaseSkillManager {
463
- name;
464
- type = "mcp";
465
- lazyInit;
466
- skill;
467
- _mcpClient;
468
- _env;
469
- _transportFactory;
470
- constructor(skill, env, jobId, runId, eventListener, options) {
471
- super(jobId, runId, eventListener);
472
- this.name = skill.name;
473
- this.skill = skill;
474
- this._env = env;
475
- this._transportFactory = options?.transportFactory ?? defaultTransportFactory;
476
- this.lazyInit = skill.type === "mcpStdioSkill" && skill.lazyInit && skill.name !== "@perstack/base";
477
- }
478
- async _doInit() {
479
- this._mcpClient = new Client({
480
- name: `${this.skill.name}-mcp-client`,
481
- version: "1.0.0"
482
- });
483
- let timingInfo;
484
- if (this.skill.type === "mcpStdioSkill") {
485
- timingInfo = await this._initStdio(this.skill);
486
- } else {
487
- await this._initSse(this.skill);
488
- }
489
- const toolDiscoveryStartTime = Date.now();
490
- const { tools } = await this._mcpClient.listTools();
491
- const toolDiscoveryDurationMs = Date.now() - toolDiscoveryStartTime;
492
- this._toolDefinitions = tools.map((tool2) => ({
493
- skillName: this.skill.name,
494
- name: tool2.name,
495
- description: tool2.description,
496
- inputSchema: tool2.inputSchema,
497
- interactive: false
498
- }));
499
- if (this._eventListener && timingInfo) {
500
- const totalDurationMs = Date.now() - timingInfo.startTime;
501
- const event = createRuntimeEvent("skillConnected", this._jobId, this._runId, {
502
- skillName: this.skill.name,
503
- serverInfo: timingInfo.serverInfo,
504
- spawnDurationMs: timingInfo.spawnDurationMs,
505
- handshakeDurationMs: timingInfo.handshakeDurationMs,
506
- toolDiscoveryDurationMs,
507
- connectDurationMs: timingInfo.spawnDurationMs + timingInfo.handshakeDurationMs,
508
- totalDurationMs
509
- });
510
- this._eventListener(event);
511
- }
512
- }
513
- async _initStdio(skill) {
514
- if (!skill.command) {
515
- throw new Error(`Skill ${skill.name} has no command`);
516
- }
517
- const requiredEnv = {};
518
- for (const envName of skill.requiredEnv) {
519
- if (!this._env[envName]) {
520
- throw new Error(`Skill ${skill.name} requires environment variable ${envName}`);
521
- }
522
- requiredEnv[envName] = this._env[envName];
523
- }
524
- const env = getFilteredEnv(requiredEnv);
525
- const startTime = Date.now();
526
- const { command, args } = getCommandArgs(skill);
527
- if (this._eventListener) {
528
- const event = createRuntimeEvent("skillStarting", this._jobId, this._runId, {
529
- skillName: skill.name,
530
- command,
531
- args
532
- });
533
- this._eventListener(event);
534
- }
535
- const transport = this._transportFactory.createStdio({ command, args, env, stderr: "pipe" });
536
- const spawnDurationMs = Date.now() - startTime;
537
- if (transport.stderr) {
538
- transport.stderr.on("data", (chunk) => {
539
- if (this._eventListener) {
540
- const event = createRuntimeEvent("skillStderr", this._jobId, this._runId, {
541
- skillName: skill.name,
542
- message: chunk.toString().trim()
543
- });
544
- this._eventListener(event);
355
+ function toRuntimeExpert(expert) {
356
+ const skills = Object.fromEntries(
357
+ Object.entries(expert.skills).map(([name, skill]) => {
358
+ switch (skill.type) {
359
+ case "mcpStdioSkill":
360
+ return [name, { ...skill, name }];
361
+ case "mcpSseSkill":
362
+ return [name, { ...skill, name }];
363
+ case "interactiveSkill":
364
+ return [name, { ...skill, name }];
365
+ default: {
366
+ throw new Error(`Unknown skill type: ${skill.type}`);
545
367
  }
546
- });
547
- }
548
- const connectStartTime = Date.now();
549
- await this._mcpClient.connect(transport);
550
- const handshakeDurationMs = Date.now() - connectStartTime;
551
- const serverVersion = this._mcpClient.getServerVersion();
552
- return {
553
- startTime,
554
- spawnDurationMs,
555
- handshakeDurationMs,
556
- serverInfo: serverVersion ? { name: serverVersion.name, version: serverVersion.version } : void 0
557
- };
558
- }
559
- async _initSse(skill) {
560
- if (!skill.endpoint) {
561
- throw new Error(`Skill ${skill.name} has no endpoint`);
562
- }
563
- const url = new URL(skill.endpoint);
564
- if (url.protocol !== "https:") {
565
- throw new Error(`Skill ${skill.name} SSE endpoint must use HTTPS: ${skill.endpoint}`);
566
- }
567
- if (isPrivateOrLocalIP(url.hostname)) {
568
- throw new Error(
569
- `Skill ${skill.name} SSE endpoint cannot use private/local IP: ${skill.endpoint}`
570
- );
571
- }
572
- const transport = this._transportFactory.createSse({ url });
573
- await this._mcpClient.connect(transport);
574
- }
575
- async close() {
576
- if (this._mcpClient) {
577
- await this._mcpClient.close();
578
- if (this._eventListener && this.skill) {
579
- const event = createRuntimeEvent("skillDisconnected", this._jobId, this._runId, {
580
- skillName: this.skill.name
581
- });
582
- this._eventListener(event);
583
368
  }
584
- }
585
- }
586
- _filterTools(tools) {
587
- const omit = this.skill.omit ?? [];
588
- const pick = this.skill.pick ?? [];
589
- return tools.filter((tool2) => omit.length > 0 ? !omit.includes(tool2.name) : true).filter((tool2) => pick.length > 0 ? pick.includes(tool2.name) : true);
590
- }
591
- async callTool(toolName, input) {
592
- if (!this.isInitialized() || !this._mcpClient) {
593
- throw new Error(`${this.name} is not initialized`);
594
- }
595
- try {
596
- const result = await this._mcpClient.callTool({
597
- name: toolName,
598
- arguments: input
599
- });
600
- return convertToolResult(result, toolName, input);
601
- } catch (error) {
602
- return handleToolError(error, toolName, McpError);
603
- }
604
- }
605
- };
606
-
607
- // src/skill-manager/skill-manager-factory.ts
608
- var DefaultSkillManagerFactory = class {
609
- createMcp(skill, context) {
610
- return new McpSkillManager(
611
- skill,
612
- context.env,
613
- context.jobId,
614
- context.runId,
615
- context.eventListener,
616
- context.mcpOptions
617
- );
618
- }
619
- createInteractive(skill, context) {
620
- return new InteractiveSkillManager(skill, context.jobId, context.runId, context.eventListener);
621
- }
622
- createDelegate(expert, context) {
623
- return new DelegateSkillManager(expert, context.jobId, context.runId, context.eventListener);
624
- }
625
- };
626
- var defaultSkillManagerFactory = new DefaultSkillManagerFactory();
369
+ })
370
+ );
371
+ return { ...expert, skills };
372
+ }
627
373
 
628
- // src/skill-manager/helpers.ts
629
- async function initSkillManagersWithCleanup(managers, allManagers) {
630
- const results = await Promise.allSettled(managers.map((m) => m.init()));
631
- const firstRejected = results.find((r) => r.status === "rejected");
632
- if (firstRejected) {
633
- await Promise.all(allManagers.map((m) => m.close().catch(() => {
634
- })));
635
- throw firstRejected.reason;
636
- }
637
- }
638
- async function getSkillManagers(expert, experts, setting, eventListener, options) {
639
- const { perstackBaseSkillCommand, env, jobId, runId } = setting;
640
- const { skills } = expert;
641
- const factory = options?.factory ?? defaultSkillManagerFactory;
642
- if (!skills["@perstack/base"]) {
643
- throw new Error("Base skill is not defined");
644
- }
645
- const factoryContext = {
646
- env,
647
- jobId,
648
- runId,
649
- eventListener
374
+ // src/helpers/setup-experts.ts
375
+ async function setupExperts(setting, resolveExpertToRun2 = resolveExpertToRun) {
376
+ const { expertKey } = setting;
377
+ const experts = { ...setting.experts };
378
+ const clientOptions = {
379
+ perstackApiBaseUrl: setting.perstackApiBaseUrl,
380
+ perstackApiKey: setting.perstackApiKey
650
381
  };
651
- const allManagers = [];
652
- const mcpSkills = Object.values(skills).filter(
653
- (skill) => skill.type === "mcpStdioSkill" || skill.type === "mcpSseSkill"
654
- ).map((skill) => {
655
- if (perstackBaseSkillCommand && skill.type === "mcpStdioSkill") {
656
- const matchesBaseByPackage = skill.command === "npx" && skill.packageName === "@perstack/base";
657
- const matchesBaseByArgs = skill.command === "npx" && Array.isArray(skill.args) && skill.args.includes("@perstack/base");
658
- if (matchesBaseByPackage || matchesBaseByArgs) {
659
- const [overrideCommand, ...overrideArgs] = perstackBaseSkillCommand;
660
- if (!overrideCommand) {
661
- throw new Error("perstackBaseSkillCommand must have at least one element");
662
- }
663
- return {
664
- ...skill,
665
- command: overrideCommand,
666
- packageName: void 0,
667
- args: overrideArgs,
668
- lazyInit: false
669
- };
670
- }
671
- }
672
- return skill;
673
- });
674
- const mcpSkillManagers = mcpSkills.map((skill) => {
675
- const manager = factory.createMcp(skill, factoryContext);
676
- allManagers.push(manager);
677
- return manager;
678
- });
679
- await initSkillManagersWithCleanup(mcpSkillManagers, allManagers);
680
- if (!options?.isDelegatedRun) {
681
- const interactiveSkills = Object.values(skills).filter(
682
- (skill) => skill.type === "interactiveSkill"
683
- );
684
- const interactiveSkillManagers = interactiveSkills.map((interactiveSkill) => {
685
- const manager = factory.createInteractive(interactiveSkill, factoryContext);
686
- allManagers.push(manager);
687
- return manager;
688
- });
689
- await initSkillManagersWithCleanup(interactiveSkillManagers, allManagers);
690
- }
691
- const delegateSkillManagers = [];
692
- for (const delegateExpertName of expert.delegates) {
693
- const delegate = experts[delegateExpertName];
382
+ const expertToRun = await resolveExpertToRun2(expertKey, experts, clientOptions);
383
+ experts[expertKey] = expertToRun;
384
+ for (const delegateName of expertToRun.delegates) {
385
+ const delegate = await resolveExpertToRun2(delegateName, experts, clientOptions);
694
386
  if (!delegate) {
695
- await Promise.all(allManagers.map((m) => m.close().catch(() => {
696
- })));
697
- throw new Error(`Delegate expert "${delegateExpertName}" not found in experts`);
698
- }
699
- const manager = factory.createDelegate(delegate, factoryContext);
700
- allManagers.push(manager);
701
- delegateSkillManagers.push(manager);
702
- }
703
- await initSkillManagersWithCleanup(delegateSkillManagers, allManagers);
704
- const skillManagers = {};
705
- for (const manager of allManagers) {
706
- skillManagers[manager.name] = manager;
707
- }
708
- return skillManagers;
709
- }
710
- async function closeSkillManagers(skillManagers) {
711
- await Promise.all(Object.values(skillManagers).map((m) => m.close().catch(() => {
712
- })));
713
- }
714
- async function getSkillManagerByToolName(skillManagers, toolName) {
715
- for (const skillManager of Object.values(skillManagers)) {
716
- const toolDefinitions = await skillManager.getToolDefinitions();
717
- for (const toolDefinition of toolDefinitions) {
718
- if (toolDefinition.name === toolName) {
719
- return skillManager;
720
- }
387
+ throw new Error(`Delegate ${delegateName} not found`);
721
388
  }
389
+ experts[delegateName] = delegate;
722
390
  }
723
- throw new Error(`Tool ${toolName} not found`);
391
+ return { expertToRun, experts };
724
392
  }
725
- async function getToolSet(skillManagers) {
726
- const tools = {};
727
- for (const skillManager of Object.values(skillManagers)) {
728
- const toolDefinitions = await skillManager.getToolDefinitions();
729
- for (const toolDefinition of toolDefinitions) {
730
- tools[toolDefinition.name] = tool({
731
- description: toolDefinition.description,
732
- inputSchema: jsonSchema(toolDefinition.inputSchema)
733
- });
734
- }
735
- }
736
- return tools;
393
+
394
+ // src/helpers/thinking.ts
395
+ function extractThinkingParts(reasoning) {
396
+ if (!reasoning) return [];
397
+ return reasoning.map((r) => ({
398
+ type: "thinkingPart",
399
+ thinking: r.text,
400
+ // Signature is in providerMetadata for Anthropic (output from API)
401
+ signature: r.providerMetadata?.anthropic?.signature
402
+ }));
403
+ }
404
+ function extractThinkingText(reasoning) {
405
+ if (!reasoning) return "";
406
+ return reasoning.filter((r) => r.text).map((r) => r.text).join("\n");
737
407
  }
738
408
  function isFileInfo(value) {
739
409
  return typeof value === "object" && value !== null && "path" in value && "mimeType" in value && "size" in value && typeof value.path === "string" && typeof value.mimeType === "string" && typeof value.size === "number";
@@ -759,9 +429,9 @@ async function processFileToolResult(toolResult, toolName) {
759
429
  processedContents.push(part);
760
430
  continue;
761
431
  }
762
- const { path, mimeType } = fileInfo;
432
+ const { path: path2, mimeType } = fileInfo;
763
433
  try {
764
- const buffer = await readFile(path);
434
+ const buffer = await readFile(path2);
765
435
  if (toolName === "readImageFile") {
766
436
  processedContents.push({
767
437
  type: "imageInlinePart",
@@ -781,7 +451,7 @@ async function processFileToolResult(toolResult, toolName) {
781
451
  processedContents.push({
782
452
  type: "textPart",
783
453
  id: part.id,
784
- text: `Failed to read file "${path}": ${error instanceof Error ? error.message : String(error)}`
454
+ text: `Failed to read file "${path2}": ${error instanceof Error ? error.message : String(error)}`
785
455
  });
786
456
  }
787
457
  }
@@ -1157,6 +827,8 @@ function expertContentsToCoreContent(contents) {
1157
827
  return textPartToCoreTextPart(part);
1158
828
  case "toolCallPart":
1159
829
  return toolCallPartToCoreToolCallPart(part);
830
+ case "thinkingPart":
831
+ return thinkingPartToCoreThinkingPart(part);
1160
832
  default:
1161
833
  throw new Error(`Unknown expert content type: ${part.type}`);
1162
834
  }
@@ -1228,6 +900,13 @@ function toolCallPartToCoreToolCallPart(part) {
1228
900
  input: part.args
1229
901
  };
1230
902
  }
903
+ function thinkingPartToCoreThinkingPart(part) {
904
+ return {
905
+ type: "reasoning",
906
+ text: part.thinking,
907
+ providerOptions: part.signature ? { anthropic: { signature: part.signature } } : void 0
908
+ };
909
+ }
1231
910
  function toolResultPartToCoreToolResultPart(part) {
1232
911
  const { contents } = part;
1233
912
  if (contents.length === 1 && contents[0].type === "textPart") {
@@ -1256,7 +935,9 @@ function toolResultPartToCoreToolResultPart(part) {
1256
935
  async function generatingRunResultLogic({
1257
936
  setting,
1258
937
  checkpoint,
1259
- step
938
+ step,
939
+ eventListener,
940
+ llmExecutor
1260
941
  }) {
1261
942
  if (!step.toolCalls || !step.toolResults || step.toolResults.length === 0) {
1262
943
  throw new Error("No tool calls or tool results found");
@@ -1273,37 +954,90 @@ async function generatingRunResultLogic({
1273
954
  };
1274
955
  });
1275
956
  const toolMessage = createToolMessage(toolResultParts);
1276
- const model = getModel(setting.model, setting.providerConfig, { proxyUrl: setting.proxyUrl });
1277
957
  const { messages } = checkpoint;
1278
- let generationResult;
1279
- try {
1280
- generationResult = await generateText({
1281
- model,
1282
- messages: [...messages, toolMessage].map(messageToCoreMessage),
1283
- temperature: setting.temperature,
958
+ const coreMessages = [...messages, toolMessage].map(messageToCoreMessage);
959
+ let reasoningCompletedViaCallback = false;
960
+ const callbacks = {
961
+ onReasoningStart: () => {
962
+ eventListener(createRuntimeEvent("startReasoning", setting.jobId, setting.runId, {}));
963
+ },
964
+ onReasoningDelta: (delta) => {
965
+ eventListener(createRuntimeEvent("streamReasoning", setting.jobId, setting.runId, { delta }));
966
+ },
967
+ onReasoningComplete: (text2) => {
968
+ eventListener(createRuntimeEvent("completeReasoning", setting.jobId, setting.runId, { text: text2 }));
969
+ reasoningCompletedViaCallback = true;
970
+ },
971
+ onResultStart: () => {
972
+ eventListener(createRuntimeEvent("startRunResult", setting.jobId, setting.runId, {}));
973
+ },
974
+ onResultDelta: (delta) => {
975
+ eventListener(createRuntimeEvent("streamRunResult", setting.jobId, setting.runId, { delta }));
976
+ }
977
+ };
978
+ const executionResult = await llmExecutor.streamText(
979
+ {
980
+ messages: coreMessages,
1284
981
  maxRetries: setting.maxRetries,
1285
- abortSignal: AbortSignal.timeout(setting.timeout)
1286
- });
1287
- } catch (error) {
1288
- if (error instanceof Error) {
1289
- const reason = JSON.stringify({ error: error.name, message: error.message });
1290
- return retry(setting, checkpoint, {
1291
- reason,
1292
- newMessages: [toolMessage, createUserMessage([{ type: "textPart", text: reason }])],
1293
- usage: createEmptyUsage()
982
+ tools: {},
983
+ // No tools for run result generation
984
+ abortSignal: AbortSignal.timeout(setting.timeout),
985
+ reasoningBudget: setting.reasoningBudget
986
+ },
987
+ callbacks
988
+ );
989
+ if (!executionResult.success) {
990
+ const { error, isRetryable } = executionResult;
991
+ const currentRetryCount = checkpoint.retryCount ?? 0;
992
+ if (!isRetryable || currentRetryCount >= setting.maxRetries) {
993
+ return stopRunByError(setting, checkpoint, {
994
+ checkpoint: {
995
+ ...checkpoint,
996
+ status: "stoppedByError"
997
+ },
998
+ step: {
999
+ ...step,
1000
+ finishedAt: Date.now()
1001
+ },
1002
+ error: {
1003
+ name: error.name ?? "Error",
1004
+ message: currentRetryCount >= setting.maxRetries ? `Max retries (${setting.maxRetries}) exceeded: ${error.message}` : error.message,
1005
+ statusCode: error.statusCode,
1006
+ isRetryable: false
1007
+ }
1294
1008
  });
1295
1009
  }
1296
- throw error;
1010
+ const reason = JSON.stringify({ error: error.name ?? "Error", message: error.message });
1011
+ return retry(setting, checkpoint, {
1012
+ reason,
1013
+ newMessages: [toolMessage, createUserMessage([{ type: "textPart", text: reason }])],
1014
+ usage: createEmptyUsage()
1015
+ });
1297
1016
  }
1017
+ const generationResult = executionResult.result;
1298
1018
  const usage = usageFromGenerateTextResult(generationResult);
1299
- const { text } = generationResult;
1300
- const newMessages = [toolMessage, createExpertMessage(text ? [{ type: "textPart", text }] : [])];
1019
+ const { text, reasoning } = generationResult;
1020
+ const thinkingParts = extractThinkingParts(reasoning);
1021
+ const thinkingText = extractThinkingText(reasoning);
1022
+ const expertContents = [
1023
+ ...thinkingParts,
1024
+ { type: "textPart", text: text ?? "" }
1025
+ ];
1026
+ const newMessages = [toolMessage, createExpertMessage(expertContents)];
1027
+ if (thinkingText && !reasoningCompletedViaCallback) {
1028
+ await eventListener(
1029
+ createRuntimeEvent("completeReasoning", setting.jobId, setting.runId, {
1030
+ text: thinkingText
1031
+ })
1032
+ );
1033
+ }
1034
+ const newUsage = sumUsage(checkpoint.usage, usage);
1301
1035
  return completeRun(setting, checkpoint, {
1302
1036
  checkpoint: {
1303
1037
  ...checkpoint,
1304
1038
  messages: [...messages, ...newMessages],
1305
- usage: sumUsage(checkpoint.usage, usage),
1306
- contextWindowUsage: checkpoint.contextWindow ? calculateContextWindowUsage(usage, checkpoint.contextWindow) : void 0,
1039
+ usage: newUsage,
1040
+ contextWindowUsage: checkpoint.contextWindow ? calculateContextWindowUsage(newUsage, checkpoint.contextWindow) : void 0,
1307
1041
  status: "completed"
1308
1042
  },
1309
1043
  step: {
@@ -1354,38 +1088,121 @@ function buildToolCalls(toolCalls) {
1354
1088
  async function generatingToolCallLogic({
1355
1089
  setting,
1356
1090
  checkpoint,
1357
- skillManagers
1091
+ step,
1092
+ skillManagers,
1093
+ eventListener,
1094
+ llmExecutor
1358
1095
  }) {
1359
1096
  const { messages } = checkpoint;
1360
- const model = getModel(setting.model, setting.providerConfig, { proxyUrl: setting.proxyUrl });
1361
- let result;
1362
- try {
1363
- result = await generateText({
1364
- model,
1097
+ let reasoningCompletedViaCallback = false;
1098
+ const callbacks = {
1099
+ onReasoningStart: () => {
1100
+ eventListener(createRuntimeEvent("startReasoning", setting.jobId, setting.runId, {}));
1101
+ },
1102
+ onReasoningDelta: (delta) => {
1103
+ eventListener(createRuntimeEvent("streamReasoning", setting.jobId, setting.runId, { delta }));
1104
+ },
1105
+ onReasoningComplete: (text2) => {
1106
+ eventListener(createRuntimeEvent("completeReasoning", setting.jobId, setting.runId, { text: text2 }));
1107
+ reasoningCompletedViaCallback = true;
1108
+ }
1109
+ // onResultStart and onResultDelta intentionally not set - result streaming only in GeneratingRunResult
1110
+ };
1111
+ const executionResult = await llmExecutor.streamText(
1112
+ {
1365
1113
  messages: messages.map(messageToCoreMessage),
1366
- temperature: setting.temperature,
1367
1114
  maxRetries: setting.maxRetries,
1368
1115
  tools: await getToolSet(skillManagers),
1369
- toolChoice: "required",
1370
- abortSignal: AbortSignal.timeout(setting.timeout)
1116
+ toolChoice: "auto",
1117
+ abortSignal: AbortSignal.timeout(setting.timeout),
1118
+ reasoningBudget: setting.reasoningBudget
1119
+ },
1120
+ callbacks
1121
+ );
1122
+ if (!executionResult.success) {
1123
+ const { error, isRetryable } = executionResult;
1124
+ const currentRetryCount = checkpoint.retryCount ?? 0;
1125
+ if (!isRetryable || currentRetryCount >= setting.maxRetries) {
1126
+ return stopRunByError(setting, checkpoint, {
1127
+ checkpoint: {
1128
+ ...checkpoint,
1129
+ status: "stoppedByError"
1130
+ },
1131
+ step: {
1132
+ ...step,
1133
+ finishedAt: Date.now()
1134
+ },
1135
+ error: {
1136
+ name: error.name ?? "Error",
1137
+ message: currentRetryCount >= setting.maxRetries ? `Max retries (${setting.maxRetries}) exceeded: ${error.message}` : error.message,
1138
+ statusCode: error.statusCode,
1139
+ isRetryable: false
1140
+ }
1141
+ });
1142
+ }
1143
+ const reason = JSON.stringify({ error: error.name ?? "Error", message: error.message });
1144
+ return retry(setting, checkpoint, {
1145
+ reason,
1146
+ newMessages: [createUserMessage([{ type: "textPart", text: reason }])],
1147
+ usage: createEmptyUsage()
1148
+ });
1149
+ }
1150
+ const result = executionResult.result;
1151
+ const usage = usageFromGenerateTextResult(result);
1152
+ const { text, toolCalls, finishReason, reasoning } = result;
1153
+ const thinkingParts = extractThinkingParts(reasoning);
1154
+ const thinkingText = extractThinkingText(reasoning);
1155
+ if (toolCalls.length === 0 && text) {
1156
+ const contents = [...thinkingParts, { type: "textPart", text }];
1157
+ const newMessage = createExpertMessage(contents);
1158
+ const newUsage = sumUsage(checkpoint.usage, usage);
1159
+ if (thinkingText && !reasoningCompletedViaCallback) {
1160
+ await eventListener(
1161
+ createRuntimeEvent("completeReasoning", setting.jobId, setting.runId, {
1162
+ text: thinkingText
1163
+ })
1164
+ );
1165
+ }
1166
+ return completeRun(setting, checkpoint, {
1167
+ checkpoint: {
1168
+ ...checkpoint,
1169
+ messages: [...messages, newMessage],
1170
+ usage: newUsage,
1171
+ contextWindowUsage: checkpoint.contextWindow ? calculateContextWindowUsage(newUsage, checkpoint.contextWindow) : void 0,
1172
+ status: "completed"
1173
+ },
1174
+ step: {
1175
+ ...step,
1176
+ newMessages: [...step.newMessages, newMessage],
1177
+ finishedAt: Date.now(),
1178
+ usage: sumUsage(step.usage, usage)
1179
+ },
1180
+ text,
1181
+ usage
1371
1182
  });
1372
- } catch (error) {
1373
- if (error instanceof Error) {
1374
- const reason = JSON.stringify({ error: error.name, message: error.message });
1375
- return retry(setting, checkpoint, {
1376
- reason,
1377
- newMessages: [createUserMessage([{ type: "textPart", text: reason }])],
1378
- usage: createEmptyUsage()
1379
- });
1380
- }
1381
- throw error;
1382
1183
  }
1383
- const usage = usageFromGenerateTextResult(result);
1384
- const { text, toolCalls, finishReason } = result;
1385
1184
  if (toolCalls.length === 0) {
1185
+ const currentRetryCount = checkpoint.retryCount ?? 0;
1186
+ if (currentRetryCount >= setting.maxRetries) {
1187
+ return stopRunByError(setting, checkpoint, {
1188
+ checkpoint: {
1189
+ ...checkpoint,
1190
+ status: "stoppedByError"
1191
+ },
1192
+ step: {
1193
+ ...step,
1194
+ finishedAt: Date.now()
1195
+ },
1196
+ error: {
1197
+ name: "MaxRetriesExceeded",
1198
+ message: `Max retries (${setting.maxRetries}) exceeded: No tool call or text generated`,
1199
+ isRetryable: false
1200
+ }
1201
+ });
1202
+ }
1386
1203
  const reason = JSON.stringify({
1387
- error: "Error: No tool call generated",
1388
- message: "You must generate a tool call. Try again."
1204
+ error: "Error: No tool call or text generated",
1205
+ message: "You must generate a tool call or provide a response. Try again."
1389
1206
  });
1390
1207
  return retry(setting, checkpoint, {
1391
1208
  reason,
@@ -1397,11 +1214,15 @@ async function generatingToolCallLogic({
1397
1214
  const sorted = sortToolCallsByPriority(classified);
1398
1215
  if (finishReason === "tool-calls" || finishReason === "stop") {
1399
1216
  const toolCallParts = buildToolCallParts(sorted);
1400
- const contents = [...toolCallParts];
1401
- if (text) {
1402
- contents.push({ type: "textPart", text });
1403
- }
1217
+ const contents = [...thinkingParts, ...text ? [{ type: "textPart", text }] : [], ...toolCallParts];
1404
1218
  const allToolCalls = buildToolCalls(sorted);
1219
+ if (thinkingText && !reasoningCompletedViaCallback) {
1220
+ await eventListener(
1221
+ createRuntimeEvent("completeReasoning", setting.jobId, setting.runId, {
1222
+ text: thinkingText
1223
+ })
1224
+ );
1225
+ }
1405
1226
  return callTools(setting, checkpoint, {
1406
1227
  newMessage: createExpertMessage(contents),
1407
1228
  toolCalls: allToolCalls,
@@ -1413,6 +1234,24 @@ async function generatingToolCallLogic({
1413
1234
  if (!firstToolCall) {
1414
1235
  throw new Error("No tool call found");
1415
1236
  }
1237
+ const currentRetryCount = checkpoint.retryCount ?? 0;
1238
+ if (currentRetryCount >= setting.maxRetries) {
1239
+ return stopRunByError(setting, checkpoint, {
1240
+ checkpoint: {
1241
+ ...checkpoint,
1242
+ status: "stoppedByError"
1243
+ },
1244
+ step: {
1245
+ ...step,
1246
+ finishedAt: Date.now()
1247
+ },
1248
+ error: {
1249
+ name: "MaxRetriesExceeded",
1250
+ message: `Max retries (${setting.maxRetries}) exceeded: Generation length exceeded`,
1251
+ isRetryable: false
1252
+ }
1253
+ });
1254
+ }
1416
1255
  const reason = JSON.stringify({
1417
1256
  error: "Error: Tool call generation failed",
1418
1257
  message: "Generation length exceeded. Try again."
@@ -1662,11 +1501,6 @@ async function resolvingToolResultLogic({
1662
1501
  });
1663
1502
  }
1664
1503
 
1665
- // src/state-machine/states/resolving-thought.ts
1666
- async function resolvingThoughtLogic(context) {
1667
- return resolvingToolResultLogic(context);
1668
- }
1669
-
1670
1504
  // src/state-machine/machine.ts
1671
1505
  var runtimeStateMachine = setup({
1672
1506
  types: {
@@ -1689,7 +1523,8 @@ var runtimeStateMachine = setup({
1689
1523
  startedAt: Date.now()
1690
1524
  },
1691
1525
  eventListener: input.eventListener,
1692
- skillManagers: input.skillManagers
1526
+ skillManagers: input.skillManagers,
1527
+ llmExecutor: input.llmExecutor
1693
1528
  }),
1694
1529
  states: {
1695
1530
  Init: {
@@ -1768,7 +1603,8 @@ var runtimeStateMachine = setup({
1768
1603
  checkpoint: ({ context, event }) => ({
1769
1604
  ...context.checkpoint,
1770
1605
  messages: [...context.checkpoint.messages, ...event.newMessages],
1771
- usage: sumUsage(context.checkpoint.usage, event.usage)
1606
+ usage: sumUsage(context.checkpoint.usage, event.usage),
1607
+ retryCount: (context.checkpoint.retryCount ?? 0) + 1
1772
1608
  }),
1773
1609
  step: ({ context, event }) => ({
1774
1610
  ...context.step,
@@ -1779,15 +1615,42 @@ var runtimeStateMachine = setup({
1779
1615
  })
1780
1616
  })
1781
1617
  },
1618
+ stopRunByError: {
1619
+ target: "Stopped",
1620
+ actions: assign({
1621
+ checkpoint: ({ event }) => ({
1622
+ ...event.checkpoint,
1623
+ error: event.error
1624
+ }),
1625
+ step: ({ event }) => ({
1626
+ ...event.step,
1627
+ inputMessages: void 0
1628
+ })
1629
+ })
1630
+ },
1631
+ completeRun: {
1632
+ target: "Stopped",
1633
+ actions: assign({
1634
+ checkpoint: ({ event }) => ({ ...event.checkpoint, retryCount: 0 }),
1635
+ step: ({ event }) => ({
1636
+ ...event.step,
1637
+ inputMessages: void 0
1638
+ })
1639
+ })
1640
+ },
1782
1641
  callTools: {
1783
1642
  target: "CallingTool",
1784
1643
  actions: assign({
1785
- checkpoint: ({ context, event }) => ({
1786
- ...context.checkpoint,
1787
- messages: [...context.checkpoint.messages, event.newMessage],
1788
- usage: sumUsage(context.checkpoint.usage, event.usage),
1789
- contextWindowUsage: context.checkpoint.contextWindow ? calculateContextWindowUsage(event.usage, context.checkpoint.contextWindow) : void 0
1790
- }),
1644
+ checkpoint: ({ context, event }) => {
1645
+ const newUsage = sumUsage(context.checkpoint.usage, event.usage);
1646
+ return {
1647
+ ...context.checkpoint,
1648
+ messages: [...context.checkpoint.messages, event.newMessage],
1649
+ usage: newUsage,
1650
+ contextWindowUsage: context.checkpoint.contextWindow ? calculateContextWindowUsage(newUsage, context.checkpoint.contextWindow) : void 0,
1651
+ retryCount: 0
1652
+ };
1653
+ },
1791
1654
  step: ({ context, event }) => ({
1792
1655
  ...context.step,
1793
1656
  newMessages: [event.newMessage],
@@ -1799,12 +1662,17 @@ var runtimeStateMachine = setup({
1799
1662
  callInteractiveTool: {
1800
1663
  target: "CallingInteractiveTool",
1801
1664
  actions: assign({
1802
- checkpoint: ({ context, event }) => ({
1803
- ...context.checkpoint,
1804
- messages: [...context.checkpoint.messages, event.newMessage],
1805
- usage: sumUsage(context.checkpoint.usage, event.usage),
1806
- contextWindowUsage: context.checkpoint.contextWindow ? calculateContextWindowUsage(event.usage, context.checkpoint.contextWindow) : void 0
1807
- }),
1665
+ checkpoint: ({ context, event }) => {
1666
+ const newUsage = sumUsage(context.checkpoint.usage, event.usage);
1667
+ return {
1668
+ ...context.checkpoint,
1669
+ messages: [...context.checkpoint.messages, event.newMessage],
1670
+ usage: newUsage,
1671
+ contextWindowUsage: context.checkpoint.contextWindow ? calculateContextWindowUsage(newUsage, context.checkpoint.contextWindow) : void 0,
1672
+ retryCount: 0
1673
+ // Reset on successful generation
1674
+ };
1675
+ },
1808
1676
  step: ({ context, event }) => ({
1809
1677
  ...context.step,
1810
1678
  newMessages: [event.newMessage],
@@ -1816,12 +1684,17 @@ var runtimeStateMachine = setup({
1816
1684
  callDelegate: {
1817
1685
  target: "CallingDelegate",
1818
1686
  actions: assign({
1819
- checkpoint: ({ context, event }) => ({
1820
- ...context.checkpoint,
1821
- messages: [...context.checkpoint.messages, event.newMessage],
1822
- usage: sumUsage(context.checkpoint.usage, event.usage),
1823
- contextWindowUsage: context.checkpoint.contextWindow ? calculateContextWindowUsage(event.usage, context.checkpoint.contextWindow) : void 0
1824
- }),
1687
+ checkpoint: ({ context, event }) => {
1688
+ const newUsage = sumUsage(context.checkpoint.usage, event.usage);
1689
+ return {
1690
+ ...context.checkpoint,
1691
+ messages: [...context.checkpoint.messages, event.newMessage],
1692
+ usage: newUsage,
1693
+ contextWindowUsage: context.checkpoint.contextWindow ? calculateContextWindowUsage(newUsage, context.checkpoint.contextWindow) : void 0,
1694
+ retryCount: 0
1695
+ // Reset on successful generation
1696
+ };
1697
+ },
1825
1698
  step: ({ context, event }) => ({
1826
1699
  ...context.step,
1827
1700
  newMessages: [event.newMessage],
@@ -1844,15 +1717,6 @@ var runtimeStateMachine = setup({
1844
1717
  })
1845
1718
  })
1846
1719
  },
1847
- resolveThought: {
1848
- target: "ResolvingThought",
1849
- actions: assign({
1850
- step: ({ context, event }) => ({
1851
- ...context.step,
1852
- toolResults: [event.toolResult]
1853
- })
1854
- })
1855
- },
1856
1720
  attemptCompletion: {
1857
1721
  target: "GeneratingRunResult",
1858
1722
  actions: assign({
@@ -1905,23 +1769,6 @@ var runtimeStateMachine = setup({
1905
1769
  }
1906
1770
  }
1907
1771
  },
1908
- ResolvingThought: {
1909
- on: {
1910
- finishToolCall: {
1911
- target: "FinishingStep",
1912
- actions: assign({
1913
- checkpoint: ({ context, event }) => ({
1914
- ...context.checkpoint,
1915
- messages: [...context.checkpoint.messages, ...event.newMessages]
1916
- }),
1917
- step: ({ context, event }) => ({
1918
- ...context.step,
1919
- newMessages: [...context.step.newMessages, ...event.newMessages]
1920
- })
1921
- })
1922
- }
1923
- }
1924
- },
1925
1772
  GeneratingRunResult: {
1926
1773
  on: {
1927
1774
  retry: {
@@ -1930,7 +1777,8 @@ var runtimeStateMachine = setup({
1930
1777
  checkpoint: ({ context, event }) => ({
1931
1778
  ...context.checkpoint,
1932
1779
  messages: [...context.checkpoint.messages, ...event.newMessages],
1933
- usage: sumUsage(context.checkpoint.usage, event.usage)
1780
+ usage: sumUsage(context.checkpoint.usage, event.usage),
1781
+ retryCount: (context.checkpoint.retryCount ?? 0) + 1
1934
1782
  }),
1935
1783
  step: ({ context, event }) => ({
1936
1784
  ...context.step,
@@ -1941,10 +1789,23 @@ var runtimeStateMachine = setup({
1941
1789
  })
1942
1790
  })
1943
1791
  },
1792
+ stopRunByError: {
1793
+ target: "Stopped",
1794
+ actions: assign({
1795
+ checkpoint: ({ event }) => ({
1796
+ ...event.checkpoint,
1797
+ error: event.error
1798
+ }),
1799
+ step: ({ event }) => ({
1800
+ ...event.step,
1801
+ inputMessages: void 0
1802
+ })
1803
+ })
1804
+ },
1944
1805
  completeRun: {
1945
1806
  target: "Stopped",
1946
1807
  actions: assign({
1947
- checkpoint: ({ event }) => event.checkpoint,
1808
+ checkpoint: ({ event }) => ({ ...event.checkpoint, retryCount: 0 }),
1948
1809
  step: ({ event }) => ({
1949
1810
  ...event.step,
1950
1811
  inputMessages: void 0
@@ -2017,7 +1878,6 @@ var StateMachineLogics = {
2017
1878
  GeneratingToolCall: generatingToolCallLogic,
2018
1879
  CallingTool: callingToolLogic,
2019
1880
  ResolvingToolResult: resolvingToolResultLogic,
2020
- ResolvingThought: resolvingThoughtLogic,
2021
1881
  GeneratingRunResult: generatingRunResultLogic,
2022
1882
  CallingInteractiveTool: callingInteractiveToolLogic,
2023
1883
  CallingDelegate: callingDelegateLogic,
@@ -2048,13 +1908,14 @@ var StateMachineCoordinator = class {
2048
1908
  * Execute the state machine and return the final checkpoint.
2049
1909
  */
2050
1910
  async execute() {
2051
- const { setting, initialCheckpoint, eventListener, skillManagers } = this.params;
1911
+ const { setting, initialCheckpoint, eventListener, skillManagers, llmExecutor } = this.params;
2052
1912
  this.actor = this.actorFactory.create({
2053
1913
  input: {
2054
1914
  setting,
2055
1915
  initialCheckpoint,
2056
1916
  eventListener,
2057
- skillManagers
1917
+ skillManagers,
1918
+ llmExecutor
2058
1919
  }
2059
1920
  });
2060
1921
  return new Promise((resolve, reject) => {
@@ -2131,118 +1992,6 @@ async function executeStateMachine(params) {
2131
1992
  const coordinator = new StateMachineCoordinator(params);
2132
1993
  return coordinator.execute();
2133
1994
  }
2134
-
2135
- // src/helpers/checkpoint.ts
2136
- function createInitialCheckpoint(checkpointId, params) {
2137
- return {
2138
- id: checkpointId,
2139
- jobId: params.jobId,
2140
- runId: params.runId,
2141
- expert: {
2142
- key: params.expertKey,
2143
- name: params.expert.name,
2144
- version: params.expert.version
2145
- },
2146
- stepNumber: 1,
2147
- status: "init",
2148
- messages: [],
2149
- usage: createEmptyUsage(),
2150
- contextWindow: params.contextWindow,
2151
- contextWindowUsage: params.contextWindow ? 0 : void 0
2152
- };
2153
- }
2154
- function createNextStepCheckpoint(checkpointId, checkpoint) {
2155
- return {
2156
- ...checkpoint,
2157
- id: checkpointId,
2158
- stepNumber: checkpoint.stepNumber + 1
2159
- };
2160
- }
2161
- function buildDelegationReturnState(currentSetting, resultCheckpoint, parentCheckpoint) {
2162
- const { messages, delegatedBy } = resultCheckpoint;
2163
- if (!delegatedBy) {
2164
- throw new Error("delegatedBy is required for buildDelegationReturnState");
2165
- }
2166
- const delegateResultMessage = messages[messages.length - 1];
2167
- if (!delegateResultMessage || delegateResultMessage.type !== "expertMessage") {
2168
- throw new Error("Delegation error: delegation result message is incorrect");
2169
- }
2170
- const delegateText = delegateResultMessage.contents.find((content) => content.type === "textPart");
2171
- if (!delegateText) {
2172
- throw new Error("Delegation error: delegation result message does not contain a text");
2173
- }
2174
- const { expert, toolCallId, toolName } = delegatedBy;
2175
- return {
2176
- setting: {
2177
- ...currentSetting,
2178
- expertKey: expert.key,
2179
- input: {
2180
- interactiveToolCallResult: {
2181
- toolCallId,
2182
- toolName,
2183
- skillName: `delegate/${resultCheckpoint.expert.key}`,
2184
- text: delegateText.text
2185
- }
2186
- }
2187
- },
2188
- checkpoint: {
2189
- ...parentCheckpoint,
2190
- stepNumber: resultCheckpoint.stepNumber,
2191
- usage: resultCheckpoint.usage,
2192
- pendingToolCalls: parentCheckpoint.pendingToolCalls,
2193
- partialToolResults: parentCheckpoint.partialToolResults
2194
- }
2195
- };
2196
- }
2197
- async function resolveExpertToRun(expertKey, experts, clientOptions) {
2198
- if (experts[expertKey]) {
2199
- return experts[expertKey];
2200
- }
2201
- const client = new ApiV1Client({
2202
- baseUrl: clientOptions.perstackApiBaseUrl,
2203
- apiKey: clientOptions.perstackApiKey
2204
- });
2205
- const { expert } = await client.registry.experts.get({ expertKey });
2206
- return toRuntimeExpert(expert);
2207
- }
2208
- function toRuntimeExpert(expert) {
2209
- const skills = Object.fromEntries(
2210
- Object.entries(expert.skills).map(([name, skill]) => {
2211
- switch (skill.type) {
2212
- case "mcpStdioSkill":
2213
- return [name, { ...skill, name }];
2214
- case "mcpSseSkill":
2215
- return [name, { ...skill, name }];
2216
- case "interactiveSkill":
2217
- return [name, { ...skill, name }];
2218
- default: {
2219
- throw new Error(`Unknown skill type: ${skill.type}`);
2220
- }
2221
- }
2222
- })
2223
- );
2224
- return { ...expert, skills };
2225
- }
2226
-
2227
- // src/helpers/setup-experts.ts
2228
- async function setupExperts(setting, resolveExpertToRun2 = resolveExpertToRun) {
2229
- const { expertKey } = setting;
2230
- const experts = { ...setting.experts };
2231
- const clientOptions = {
2232
- perstackApiBaseUrl: setting.perstackApiBaseUrl,
2233
- perstackApiKey: setting.perstackApiKey
2234
- };
2235
- const expertToRun = await resolveExpertToRun2(expertKey, experts, clientOptions);
2236
- experts[expertKey] = expertToRun;
2237
- for (const delegateName of expertToRun.delegates) {
2238
- const delegate = await resolveExpertToRun2(delegateName, experts, clientOptions);
2239
- if (!delegate) {
2240
- throw new Error(`Delegate ${delegateName} not found`);
2241
- }
2242
- experts[delegateName] = delegate;
2243
- }
2244
- return { expertToRun, experts };
2245
- }
2246
1995
  var SingleDelegationStrategy = class {
2247
1996
  async execute(delegations, setting, context, parentExpert, _runFn, _parentOptions) {
2248
1997
  if (delegations.length !== 1) {
@@ -2409,13 +2158,15 @@ var ParallelDelegationStrategy = class {
2409
2158
  }
2410
2159
  const textPart = lastMessage.contents.find((c) => c.type === "textPart");
2411
2160
  if (!textPart || textPart.type !== "textPart") {
2412
- throw new Error("Delegation error: delegation result message does not contain text");
2161
+ console.warn(
2162
+ `Delegation result from ${expertKey} has no text content. Parent expert will receive empty string.`
2163
+ );
2413
2164
  }
2414
2165
  return {
2415
2166
  toolCallId,
2416
2167
  toolName,
2417
2168
  expertKey,
2418
- text: textPart.text,
2169
+ text: textPart?.type === "textPart" ? textPart.text : "",
2419
2170
  stepNumber: checkpoint.stepNumber,
2420
2171
  deltaUsage: checkpoint.usage
2421
2172
  };
@@ -2468,22 +2219,272 @@ var RunEventEmitter = class {
2468
2219
  }
2469
2220
  };
2470
2221
 
2222
+ // src/helpers/provider-adapter-factory.ts
2223
+ var PROVIDER_PACKAGE_NAMES = {
2224
+ anthropic: "anthropic-provider",
2225
+ openai: "openai-provider",
2226
+ google: "google-provider",
2227
+ ollama: "ollama-provider",
2228
+ "azure-openai": "azure-openai-provider",
2229
+ "amazon-bedrock": "bedrock-provider",
2230
+ "google-vertex": "vertex-provider",
2231
+ deepseek: "deepseek-provider"
2232
+ };
2233
+ var ProviderNotInstalledError = class extends Error {
2234
+ constructor(providerName) {
2235
+ const packageName = PROVIDER_PACKAGE_NAMES[providerName];
2236
+ super(
2237
+ `Provider "${providerName}" is not installed. Run: npm install @perstack/${packageName}`
2238
+ );
2239
+ this.name = "ProviderNotInstalledError";
2240
+ }
2241
+ };
2242
+ var adapterRegistry = /* @__PURE__ */ new Map();
2243
+ var adapterInstances = /* @__PURE__ */ new Map();
2244
+ var pendingCreations = /* @__PURE__ */ new Map();
2245
+ function getCacheKey(config, options) {
2246
+ return JSON.stringify({
2247
+ providerName: config.providerName,
2248
+ apiKey: "apiKey" in config ? config.apiKey : void 0,
2249
+ baseUrl: "baseUrl" in config ? config.baseUrl : void 0,
2250
+ proxyUrl: options?.proxyUrl
2251
+ });
2252
+ }
2253
+ function registerProviderAdapter(providerName, loader) {
2254
+ adapterRegistry.set(providerName, loader);
2255
+ }
2256
+ async function createProviderAdapter(config, options) {
2257
+ const cacheKey = getCacheKey(config, options);
2258
+ const cached = adapterInstances.get(cacheKey);
2259
+ if (cached) return cached;
2260
+ const pending = pendingCreations.get(cacheKey);
2261
+ if (pending) return pending;
2262
+ const loader = adapterRegistry.get(config.providerName);
2263
+ if (!loader) {
2264
+ throw new ProviderNotInstalledError(config.providerName);
2265
+ }
2266
+ const creationPromise = (async () => {
2267
+ try {
2268
+ const AdapterClass = await loader();
2269
+ const adapter = new AdapterClass(config, options);
2270
+ adapterInstances.set(cacheKey, adapter);
2271
+ return adapter;
2272
+ } finally {
2273
+ pendingCreations.delete(cacheKey);
2274
+ }
2275
+ })();
2276
+ pendingCreations.set(cacheKey, creationPromise);
2277
+ return creationPromise;
2278
+ }
2279
+
2280
+ // src/helpers/register-providers.ts
2281
+ registerProviderAdapter("anthropic", async () => {
2282
+ const { AnthropicProviderAdapter } = await import('@perstack/anthropic-provider');
2283
+ return AnthropicProviderAdapter;
2284
+ });
2285
+ registerProviderAdapter("openai", async () => {
2286
+ const { OpenAIProviderAdapter } = await import('@perstack/openai-provider');
2287
+ return OpenAIProviderAdapter;
2288
+ });
2289
+ registerProviderAdapter("google", async () => {
2290
+ const { GoogleProviderAdapter } = await import('@perstack/google-provider');
2291
+ return GoogleProviderAdapter;
2292
+ });
2293
+ registerProviderAdapter("ollama", async () => {
2294
+ const { OllamaProviderAdapter } = await import('@perstack/ollama-provider');
2295
+ return OllamaProviderAdapter;
2296
+ });
2297
+ registerProviderAdapter("azure-openai", async () => {
2298
+ const { AzureOpenAIProviderAdapter } = await import('@perstack/azure-openai-provider');
2299
+ return AzureOpenAIProviderAdapter;
2300
+ });
2301
+ registerProviderAdapter("amazon-bedrock", async () => {
2302
+ const { BedrockProviderAdapter } = await import('@perstack/bedrock-provider');
2303
+ return BedrockProviderAdapter;
2304
+ });
2305
+ registerProviderAdapter("google-vertex", async () => {
2306
+ const { VertexProviderAdapter } = await import('@perstack/vertex-provider');
2307
+ return VertexProviderAdapter;
2308
+ });
2309
+ registerProviderAdapter("deepseek", async () => {
2310
+ const { DeepseekProviderAdapter } = await import('@perstack/deepseek-provider');
2311
+ return DeepseekProviderAdapter;
2312
+ });
2313
+ var shouldEnableReasoning = (budget) => budget !== void 0 && budget !== "none" && budget !== 0;
2314
+ var LLMExecutor = class {
2315
+ constructor(adapter, model) {
2316
+ this.adapter = adapter;
2317
+ this.model = model;
2318
+ }
2319
+ async generateText(params) {
2320
+ const providerTools = this.adapter.getProviderTools(
2321
+ params.providerToolNames ?? [],
2322
+ params.providerToolOptions
2323
+ );
2324
+ const baseProviderOptions = this.adapter.getProviderOptions(params.providerOptionsConfig);
2325
+ const reasoningEnabled = shouldEnableReasoning(params.reasoningBudget);
2326
+ const reasoningOptions = reasoningEnabled && params.reasoningBudget ? this.adapter.getReasoningOptions(params.reasoningBudget) : void 0;
2327
+ const providerOptions = this.mergeProviderOptions(baseProviderOptions, reasoningOptions);
2328
+ try {
2329
+ const result = await generateText({
2330
+ model: this.model,
2331
+ messages: params.messages,
2332
+ maxRetries: params.maxRetries,
2333
+ tools: { ...params.tools, ...providerTools },
2334
+ toolChoice: params.toolChoice,
2335
+ abortSignal: params.abortSignal,
2336
+ providerOptions
2337
+ });
2338
+ return { success: true, result };
2339
+ } catch (error) {
2340
+ const providerError = this.adapter.normalizeError(error);
2341
+ return {
2342
+ success: false,
2343
+ error: providerError,
2344
+ isRetryable: this.adapter.isRetryable(error)
2345
+ };
2346
+ }
2347
+ }
2348
+ mergeProviderOptions(...options) {
2349
+ const defined = options.filter(Boolean);
2350
+ if (defined.length === 0) return void 0;
2351
+ const result = {};
2352
+ for (const opt of defined) {
2353
+ for (const [provider, settings] of Object.entries(opt)) {
2354
+ result[provider] = { ...result[provider], ...settings };
2355
+ }
2356
+ }
2357
+ return result;
2358
+ }
2359
+ async generateTextWithoutTools(params) {
2360
+ const baseProviderOptions = this.adapter.getProviderOptions(params.providerOptionsConfig);
2361
+ const reasoningEnabled = shouldEnableReasoning(params.reasoningBudget);
2362
+ const reasoningOptions = reasoningEnabled && params.reasoningBudget ? this.adapter.getReasoningOptions(params.reasoningBudget) : void 0;
2363
+ const providerOptions = this.mergeProviderOptions(baseProviderOptions, reasoningOptions);
2364
+ try {
2365
+ const result = await generateText({
2366
+ model: this.model,
2367
+ messages: params.messages,
2368
+ maxRetries: params.maxRetries,
2369
+ abortSignal: params.abortSignal,
2370
+ providerOptions
2371
+ });
2372
+ return { success: true, result };
2373
+ } catch (error) {
2374
+ const providerError = this.adapter.normalizeError(error);
2375
+ return {
2376
+ success: false,
2377
+ error: providerError,
2378
+ isRetryable: this.adapter.isRetryable(error)
2379
+ };
2380
+ }
2381
+ }
2382
+ async streamText(params, callbacks) {
2383
+ const providerTools = this.adapter.getProviderTools(
2384
+ params.providerToolNames ?? [],
2385
+ params.providerToolOptions
2386
+ );
2387
+ const baseProviderOptions = this.adapter.getProviderOptions(params.providerOptionsConfig);
2388
+ const reasoningEnabled = shouldEnableReasoning(params.reasoningBudget);
2389
+ const reasoningOptions = reasoningEnabled && params.reasoningBudget ? this.adapter.getReasoningOptions(params.reasoningBudget) : void 0;
2390
+ const providerOptions = this.mergeProviderOptions(baseProviderOptions, reasoningOptions);
2391
+ const streamResult = streamText({
2392
+ model: this.model,
2393
+ messages: params.messages,
2394
+ maxRetries: params.maxRetries,
2395
+ tools: { ...params.tools, ...providerTools },
2396
+ toolChoice: params.toolChoice,
2397
+ abortSignal: params.abortSignal,
2398
+ providerOptions
2399
+ });
2400
+ let reasoningStarted = false;
2401
+ let reasoningCompleted = false;
2402
+ let resultStarted = false;
2403
+ let accumulatedReasoning = "";
2404
+ try {
2405
+ for await (const part of streamResult.fullStream) {
2406
+ if (part.type === "reasoning-delta") {
2407
+ if (!reasoningStarted) {
2408
+ callbacks.onReasoningStart?.();
2409
+ reasoningStarted = true;
2410
+ }
2411
+ accumulatedReasoning += part.text;
2412
+ callbacks.onReasoningDelta?.(part.text);
2413
+ }
2414
+ if (part.type === "text-delta") {
2415
+ if (reasoningStarted && !reasoningCompleted) {
2416
+ callbacks.onReasoningComplete?.(accumulatedReasoning);
2417
+ reasoningCompleted = true;
2418
+ }
2419
+ if (!resultStarted) {
2420
+ callbacks.onResultStart?.();
2421
+ resultStarted = true;
2422
+ }
2423
+ callbacks.onResultDelta?.(part.text);
2424
+ }
2425
+ }
2426
+ if (reasoningStarted && !reasoningCompleted) {
2427
+ callbacks.onReasoningComplete?.(accumulatedReasoning);
2428
+ reasoningCompleted = true;
2429
+ }
2430
+ const text = await streamResult.text;
2431
+ const toolCalls = await streamResult.toolCalls;
2432
+ const finishReason = await streamResult.finishReason;
2433
+ const usage = await streamResult.usage;
2434
+ const reasoning = await streamResult.reasoning;
2435
+ const response = await streamResult.response;
2436
+ const result = {
2437
+ text,
2438
+ toolCalls,
2439
+ finishReason,
2440
+ usage,
2441
+ reasoning,
2442
+ response,
2443
+ // These properties are required by GenerateTextResult but not available in streamText
2444
+ // They're optional or have safe defaults
2445
+ toolResults: [],
2446
+ steps: [],
2447
+ experimental_output: void 0,
2448
+ providerMetadata: void 0,
2449
+ request: { body: "" }
2450
+ };
2451
+ return { success: true, result };
2452
+ } catch (error) {
2453
+ const providerError = this.adapter.normalizeError(error);
2454
+ return {
2455
+ success: false,
2456
+ error: providerError,
2457
+ isRetryable: this.adapter.isRetryable(error)
2458
+ };
2459
+ }
2460
+ }
2461
+ };
2462
+
2471
2463
  // src/orchestration/single-run-executor.ts
2472
2464
  var SingleRunExecutor = class {
2473
2465
  constructor(options = {}) {
2474
2466
  this.options = options;
2475
2467
  }
2476
2468
  async execute(setting, checkpoint) {
2469
+ const adapter = await createProviderAdapter(setting.providerConfig, {
2470
+ proxyUrl: setting.proxyUrl
2471
+ });
2472
+ const model = adapter.createModel(setting.model);
2473
+ const llmExecutor = new LLMExecutor(adapter, model);
2477
2474
  const contextWindow = getContextWindow(setting.providerConfig.providerName, setting.model);
2478
2475
  const { expertToRun, experts } = await setupExperts(setting, this.options.resolveExpertToRun);
2479
2476
  this.emitInitEvent(setting, expertToRun, experts);
2480
- const skillManagers = await getSkillManagers(
2477
+ const lockfileExpert = this.options.lockfile?.experts[setting.expertKey];
2478
+ const skillManagers = lockfileExpert ? await getSkillManagersFromLockfile(
2481
2479
  expertToRun,
2482
2480
  experts,
2483
2481
  setting,
2482
+ getLockfileExpertToolDefinitions(lockfileExpert),
2484
2483
  this.options.eventListener,
2485
2484
  { isDelegatedRun: !!checkpoint?.delegatedBy }
2486
- );
2485
+ ) : await getSkillManagers(expertToRun, experts, setting, this.options.eventListener, {
2486
+ isDelegatedRun: !!checkpoint?.delegatedBy
2487
+ });
2487
2488
  const initialCheckpoint = checkpoint ? createNextStepCheckpoint(createId(), checkpoint) : createInitialCheckpoint(createId(), {
2488
2489
  jobId: setting.jobId,
2489
2490
  runId: setting.runId,
@@ -2499,6 +2500,7 @@ var SingleRunExecutor = class {
2499
2500
  initialCheckpoint,
2500
2501
  eventListener,
2501
2502
  skillManagers,
2503
+ llmExecutor,
2502
2504
  eventEmitter,
2503
2505
  storeCheckpoint: this.options.storeCheckpoint ?? (async () => {
2504
2506
  }),
@@ -2524,7 +2526,6 @@ var SingleRunExecutor = class {
2524
2526
  expertName: expertToRun.name,
2525
2527
  experts: Object.keys(experts),
2526
2528
  model: setting.model,
2527
- temperature: setting.temperature,
2528
2529
  maxSteps: setting.maxSteps,
2529
2530
  maxRetries: setting.maxRetries,
2530
2531
  timeout: setting.timeout,
@@ -2565,7 +2566,8 @@ async function run(runInput, options) {
2565
2566
  storeCheckpoint: options?.storeCheckpoint,
2566
2567
  storeEvent: options?.storeEvent,
2567
2568
  eventListener: options?.eventListener,
2568
- resolveExpertToRun: options?.resolveExpertToRun
2569
+ resolveExpertToRun: options?.resolveExpertToRun,
2570
+ lockfile: options?.lockfile
2569
2571
  });
2570
2572
  while (true) {
2571
2573
  const runResult = await runExecutor.execute(setting, checkpoint);
@@ -2633,6 +2635,6 @@ async function run(runInput, options) {
2633
2635
  }
2634
2636
  }
2635
2637
 
2636
- export { getModel, package_default, run, runtimeStateMachine };
2637
- //# sourceMappingURL=chunk-3RWT2GPO.js.map
2638
- //# sourceMappingURL=chunk-3RWT2GPO.js.map
2638
+ export { findLockfile, getLockfileExpertToolDefinitions, getModel, loadLockfile, package_default, run, runtimeStateMachine };
2639
+ //# sourceMappingURL=chunk-LDJKVMQK.js.map
2640
+ //# sourceMappingURL=chunk-LDJKVMQK.js.map