@standardagents/builder 0.9.12 → 0.9.15

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js CHANGED
@@ -349,20 +349,22 @@ var init_OpenAIProvider = __esm({
349
349
  const request = {
350
350
  model: modelId,
351
351
  messages: context.messages,
352
- stream: true
353
- };
354
- if (context.tools && context.tools.length > 0) {
355
- request.tools = context.tools;
356
- if (context.parallel_tool_calls !== void 0) {
357
- request.parallel_tool_calls = context.parallel_tool_calls;
358
- }
359
- if (context.tool_choice) {
360
- request.tool_choice = context.tool_choice;
352
+ stream: true,
353
+ // Optional fields - add only if present
354
+ ...context.tools && context.tools.length > 0 && {
355
+ tools: context.tools,
356
+ ...context.parallel_tool_calls !== void 0 && {
357
+ parallel_tool_calls: context.parallel_tool_calls
358
+ },
359
+ ...context.tool_choice && {
360
+ tool_choice: context.tool_choice
361
+ }
362
+ },
363
+ // OpenAI o-series models use max_completion_tokens for reasoning
364
+ ...context.reasoning?.max_tokens && {
365
+ max_completion_tokens: context.reasoning.max_tokens
361
366
  }
362
- }
363
- if (context.reasoning?.max_tokens) {
364
- request.max_completion_tokens = context.reasoning.max_tokens;
365
- }
367
+ };
366
368
  await this.logActualRequest(request, logId, state);
367
369
  const stream = await client.chat.completions.create(request, {
368
370
  signal
@@ -450,53 +452,57 @@ var init_OpenRouterProvider = __esm({
450
452
  apiKey: this.config.api_key,
451
453
  baseURL: "https://openrouter.ai/api/v1"
452
454
  });
453
- const request = {
454
- model: modelId,
455
- messages: context.messages,
456
- stream: true,
457
- // Enable usage accounting to get cost information
458
- // https://openrouter.ai/docs/use-cases/usage-accounting
459
- usage: {
460
- include: true
461
- }
462
- };
455
+ let providerRouting;
463
456
  if (modelConfig?.included_providers) {
464
457
  try {
465
458
  const providers = JSON.parse(modelConfig.included_providers);
466
459
  if (Array.isArray(providers) && providers.length > 0) {
467
- request.provider = {
468
- only: providers
469
- };
460
+ providerRouting = { only: providers };
470
461
  }
471
462
  } catch (error) {
472
463
  console.error("[OpenRouter] Failed to parse included_providers:", error);
473
464
  }
474
465
  }
475
- if (context.tools && context.tools.length > 0) {
476
- request.tools = context.tools;
477
- if (context.parallel_tool_calls !== void 0) {
478
- request.parallel_tool_calls = context.parallel_tool_calls;
479
- }
480
- if (context.tool_choice) {
481
- request.tool_choice = context.tool_choice;
482
- }
483
- }
466
+ let reasoningConfig;
484
467
  if (context.reasoning) {
485
- request.reasoning = {};
468
+ reasoningConfig = {};
486
469
  if (context.reasoning.effort) {
487
- request.reasoning.effort = context.reasoning.effort;
470
+ reasoningConfig.effort = context.reasoning.effort;
488
471
  }
489
472
  if (context.reasoning.max_tokens) {
490
- request.reasoning.max_tokens = context.reasoning.max_tokens;
473
+ reasoningConfig.max_tokens = context.reasoning.max_tokens;
491
474
  }
492
475
  if (context.reasoning.exclude) {
493
- request.reasoning.exclude = true;
476
+ reasoningConfig.exclude = true;
494
477
  }
495
478
  }
479
+ const request = {
480
+ model: modelId,
481
+ messages: context.messages,
482
+ stream: true,
483
+ // OpenRouter-specific: enable usage accounting
484
+ // https://openrouter.ai/docs/use-cases/usage-accounting
485
+ usage: { include: true },
486
+ // Optional OpenRouter routing
487
+ ...providerRouting && { provider: providerRouting },
488
+ // Optional reasoning config
489
+ ...reasoningConfig && { reasoning: reasoningConfig },
490
+ // Optional tools
491
+ ...context.tools && context.tools.length > 0 && {
492
+ tools: context.tools,
493
+ ...context.parallel_tool_calls !== void 0 && {
494
+ parallel_tool_calls: context.parallel_tool_calls
495
+ },
496
+ ...context.tool_choice && {
497
+ tool_choice: context.tool_choice
498
+ }
499
+ }
500
+ };
496
501
  await this.logActualRequest(request, logId, state);
497
- const stream = await client.chat.completions.create(request, {
498
- signal
499
- });
502
+ const stream = await client.chat.completions.create(
503
+ request,
504
+ { signal }
505
+ );
500
506
  let content = "";
501
507
  let reasoning_content = "";
502
508
  let reasoning_details = [];
@@ -508,7 +514,8 @@ var init_OpenRouterProvider = __esm({
508
514
  let actualProvider;
509
515
  const allChunks = [];
510
516
  try {
511
- for await (const chunk of stream) {
517
+ for await (const rawChunk of stream) {
518
+ const chunk = rawChunk;
512
519
  if (signal?.aborted || await state.thread.instance.shouldStop()) {
513
520
  break;
514
521
  }
@@ -522,18 +529,15 @@ var init_OpenRouterProvider = __esm({
522
529
  this.streamContentChunk(choice.delta.content, state);
523
530
  }
524
531
  if (choice.delta?.reasoning) {
525
- const reasoning = choice.delta.reasoning;
526
- reasoning_content += reasoning;
532
+ reasoning_content += choice.delta.reasoning;
527
533
  }
528
534
  if (choice.delta?.reasoning_details) {
529
535
  const details = choice.delta.reasoning_details;
530
- if (Array.isArray(details)) {
531
- for (const item of details) {
532
- if (item.type === "reasoning.text") {
533
- reasoning_content += item.text || "";
534
- } else {
535
- reasoning_details.push(item);
536
- }
536
+ for (const item of details) {
537
+ if (item.type === "reasoning.text") {
538
+ reasoning_content += item.text || "";
539
+ } else {
540
+ reasoning_details.push(item);
537
541
  }
538
542
  }
539
543
  }
@@ -574,22 +578,24 @@ var init_OpenRouterProvider = __esm({
574
578
  }
575
579
  }
576
580
  } catch (streamError) {
581
+ const err = streamError;
577
582
  console.error(`[openrouter] Stream error for model ${modelId}:`, {
578
583
  error: streamError,
579
- code: streamError?.code,
580
- status: streamError?.status,
581
- message: streamError?.message,
582
- type: streamError?.type
584
+ code: err.code,
585
+ status: err.status,
586
+ message: err.message,
587
+ type: err.type
583
588
  });
584
589
  throw new Error(
585
- `Stream error: ${streamError?.message || "Unknown error"} (code: ${streamError?.code || "unknown"})`
590
+ `Stream error: ${err.message || "Unknown error"} (code: ${err.code || "unknown"})`
586
591
  );
587
592
  }
588
- if (usage && cost !== void 0) {
589
- usage.cost = cost;
593
+ const extendedUsage = usage;
594
+ if (extendedUsage && cost !== void 0) {
595
+ extendedUsage.cost = cost;
590
596
  }
591
- if (usage && actualProvider !== void 0) {
592
- usage.provider = actualProvider;
597
+ if (extendedUsage && actualProvider !== void 0) {
598
+ extendedUsage.provider = actualProvider;
593
599
  }
594
600
  const toolCalls = toolCallsMap.size > 0 ? this.finalizeToolCalls(toolCallsMap) : void 0;
595
601
  const aggregateResponse = {
@@ -609,7 +615,7 @@ var init_OpenRouterProvider = __esm({
609
615
  finish_reason: finishReason || "stop"
610
616
  }
611
617
  ],
612
- usage: usage || {
618
+ usage: extendedUsage || {
613
619
  prompt_tokens: 0,
614
620
  completion_tokens: 0,
615
621
  total_tokens: 0
@@ -617,8 +623,8 @@ var init_OpenRouterProvider = __esm({
617
623
  // Include all chunks for debugging/auditing
618
624
  _raw_chunks: allChunks.length
619
625
  };
620
- let finalReasoningContent = reasoning_content || null;
621
- let finalReasoningDetails = reasoning_details.length > 0 ? reasoning_details : void 0;
626
+ const finalReasoningContent = reasoning_content || null;
627
+ const finalReasoningDetails = reasoning_details.length > 0 ? reasoning_details : void 0;
622
628
  return {
623
629
  id: responseId,
624
630
  model: modelId,
@@ -643,12 +649,343 @@ var init_OpenRouterProvider = __esm({
643
649
  }
644
650
  });
645
651
 
652
+ // src/agents/providers/TestScript.ts
653
+ var TestScript;
654
+ var init_TestScript = __esm({
655
+ "src/agents/providers/TestScript.ts"() {
656
+ TestScript = class _TestScript {
657
+ responses = [];
658
+ streamingConfig;
659
+ /**
660
+ * Create a new TestScript instance
661
+ */
662
+ static create() {
663
+ return new _TestScript();
664
+ }
665
+ /**
666
+ * Add a response to the sequence
667
+ */
668
+ addResponse(response) {
669
+ this.responses.push(response);
670
+ return this;
671
+ }
672
+ /**
673
+ * Add a simple text response (stops after response)
674
+ */
675
+ addTextResponse(content, options) {
676
+ return this.addResponse({
677
+ content,
678
+ finishReason: "stop",
679
+ ...options
680
+ });
681
+ }
682
+ /**
683
+ * Add a response with tool calls
684
+ */
685
+ addToolCallResponse(toolCalls, content, options) {
686
+ return this.addResponse({
687
+ content: content ?? null,
688
+ toolCalls: toolCalls.map((tc) => ({
689
+ id: tc.id || `call_${crypto.randomUUID().substring(0, 8)}`,
690
+ type: "function",
691
+ function: {
692
+ name: tc.name,
693
+ arguments: JSON.stringify(tc.arguments)
694
+ }
695
+ })),
696
+ finishReason: "tool_calls",
697
+ ...options
698
+ });
699
+ }
700
+ /**
701
+ * Add an error response (simulates provider error)
702
+ */
703
+ addErrorResponse(message, code, status, options) {
704
+ return this.addResponse({
705
+ error: { message, code, status },
706
+ ...options
707
+ });
708
+ }
709
+ /**
710
+ * Add a response with reasoning content (for o1-style models)
711
+ */
712
+ addReasoningResponse(content, reasoningContent, options) {
713
+ return this.addResponse({
714
+ content,
715
+ reasoningContent,
716
+ finishReason: "stop",
717
+ ...options
718
+ });
719
+ }
720
+ /**
721
+ * Enable streaming simulation for all responses
722
+ */
723
+ withStreamingSimulation(config) {
724
+ this.streamingConfig = config;
725
+ return this;
726
+ }
727
+ /**
728
+ * Get all scripted responses
729
+ */
730
+ getResponses() {
731
+ return [...this.responses];
732
+ }
733
+ /**
734
+ * Get streaming configuration
735
+ */
736
+ getStreamingConfig() {
737
+ return this.streamingConfig;
738
+ }
739
+ /**
740
+ * Get total number of responses
741
+ */
742
+ get length() {
743
+ return this.responses.length;
744
+ }
745
+ /**
746
+ * Check if script is empty
747
+ */
748
+ isEmpty() {
749
+ return this.responses.length === 0;
750
+ }
751
+ /**
752
+ * Create a clone of this script
753
+ */
754
+ clone() {
755
+ const cloned = new _TestScript();
756
+ cloned.responses = [...this.responses];
757
+ cloned.streamingConfig = this.streamingConfig ? { ...this.streamingConfig } : void 0;
758
+ return cloned;
759
+ }
760
+ };
761
+ }
762
+ });
763
+
764
+ // src/agents/providers/TestProvider.ts
765
+ var TestProvider;
766
+ var init_TestProvider = __esm({
767
+ "src/agents/providers/TestProvider.ts"() {
768
+ init_BaseProvider();
769
+ TestProvider = class extends BaseProvider {
770
+ script;
771
+ responseIndex = 0;
772
+ validateInputs;
773
+ debugLog;
774
+ requestLog = [];
775
+ constructor(config) {
776
+ super(config);
777
+ this.script = config.script;
778
+ this.validateInputs = config.validateInputs ?? false;
779
+ this.debugLog = config.debugLog ?? false;
780
+ }
781
+ get name() {
782
+ return "test";
783
+ }
784
+ /**
785
+ * Test provider supports any model ID
786
+ */
787
+ supportsModel(_modelId) {
788
+ return true;
789
+ }
790
+ /**
791
+ * Return the next scripted response
792
+ */
793
+ async chat(modelId, context, state, logId, signal, _modelConfig) {
794
+ if (this.debugLog) {
795
+ console.log(`[TestProvider] Request ${this.responseIndex + 1}:`, {
796
+ messageCount: context.messages.length,
797
+ lastMessage: context.messages.slice(-1)[0]
798
+ });
799
+ }
800
+ this.requestLog.push({ ...context });
801
+ if (signal?.aborted) {
802
+ throw new Error("Request aborted");
803
+ }
804
+ const responses = this.script.getResponses();
805
+ if (this.responseIndex >= responses.length) {
806
+ const lastMessage = context.messages.slice(-1)[0];
807
+ throw new Error(
808
+ `TestProvider: Script exhausted after ${this.responseIndex} responses. Expected ${responses.length} total requests. Received request with ${context.messages.length} messages. Last message role: "${lastMessage?.role}", content: "${lastMessage?.content?.substring(0, 100)}..."`
809
+ );
810
+ }
811
+ const scripted = responses[this.responseIndex];
812
+ this.responseIndex++;
813
+ if (this.validateInputs && scripted.expectInput) {
814
+ this.validateInput(context, scripted.expectInput);
815
+ }
816
+ if (scripted.delayMs) {
817
+ await this.sleep(scripted.delayMs);
818
+ }
819
+ if (scripted.error) {
820
+ const error = new Error(scripted.error.message);
821
+ error.code = scripted.error.code;
822
+ error.status = scripted.error.status;
823
+ await this.logError(state, error, `TestProvider: ${modelId}`);
824
+ throw error;
825
+ }
826
+ await this.logActualRequest(
827
+ { model: modelId, messages: context.messages, tools: context.tools },
828
+ logId,
829
+ state
830
+ );
831
+ const streamingConfig = this.script.getStreamingConfig();
832
+ if (streamingConfig && scripted.content) {
833
+ await this.simulateStreaming(scripted.content, streamingConfig, state);
834
+ } else if (scripted.content) {
835
+ this.streamContent(scripted.content, state);
836
+ this.streamContentChunk(scripted.content, state);
837
+ }
838
+ const responseId = `test_${crypto.randomUUID().substring(0, 12)}`;
839
+ const promptTokens = scripted.usage?.promptTokens ?? this.estimateTokens(context.messages);
840
+ const completionTokens = scripted.usage?.completionTokens ?? this.estimateTokens([{ content: scripted.content }]);
841
+ const reasoningTokens = scripted.usage?.reasoningTokens ?? 0;
842
+ return {
843
+ id: responseId,
844
+ model: modelId,
845
+ content: scripted.content ?? null,
846
+ reasoning_content: scripted.reasoningContent ?? null,
847
+ reasoning_details: scripted.reasoningDetails,
848
+ tool_calls: scripted.toolCalls,
849
+ finish_reason: scripted.finishReason ?? "stop",
850
+ usage: {
851
+ prompt_tokens: promptTokens,
852
+ completion_tokens: completionTokens,
853
+ total_tokens: promptTokens + completionTokens,
854
+ completion_tokens_details: reasoningTokens ? { reasoning_tokens: reasoningTokens } : void 0
855
+ }
856
+ };
857
+ }
858
+ /**
859
+ * Simulate streaming by sending content in chunks
860
+ */
861
+ async simulateStreaming(content, config, state) {
862
+ for (let i = 0; i < content.length; i += config.chunkSize) {
863
+ const chunk = content.substring(i, i + config.chunkSize);
864
+ this.streamContent(chunk, state);
865
+ this.streamContentChunk(chunk, state);
866
+ if (config.chunkDelayMs > 0) {
867
+ await this.sleep(config.chunkDelayMs);
868
+ }
869
+ }
870
+ }
871
+ /**
872
+ * Validate request matches expectations
873
+ */
874
+ validateInput(context, expectations) {
875
+ if (expectations.messageCount !== void 0) {
876
+ if (context.messages.length !== expectations.messageCount) {
877
+ throw new Error(
878
+ `TestProvider: Expected ${expectations.messageCount} messages, got ${context.messages.length}`
879
+ );
880
+ }
881
+ }
882
+ if (expectations.containsMessage) {
883
+ const pattern = expectations.containsMessage;
884
+ const found = context.messages.some((m) => {
885
+ if (!m.content) return false;
886
+ return typeof pattern === "string" ? m.content.includes(pattern) : pattern.test(m.content);
887
+ });
888
+ if (!found) {
889
+ throw new Error(
890
+ `TestProvider: Expected message containing "${pattern}" not found`
891
+ );
892
+ }
893
+ }
894
+ if (expectations.containsToolResult) {
895
+ const { toolName, resultContains } = expectations.containsToolResult;
896
+ const found = context.messages.some((m) => {
897
+ if (m.role !== "tool") return false;
898
+ if (!m.content) return false;
899
+ return !resultContains || m.content.includes(resultContains);
900
+ });
901
+ if (!found) {
902
+ throw new Error(
903
+ `TestProvider: Expected tool result for "${toolName}" not found`
904
+ );
905
+ }
906
+ }
907
+ if (expectations.systemPromptContains) {
908
+ const pattern = expectations.systemPromptContains;
909
+ const systemMessage = context.messages.find((m) => m.role === "system");
910
+ if (!systemMessage?.content) {
911
+ throw new Error(`TestProvider: No system message found`);
912
+ }
913
+ const matches = typeof pattern === "string" ? systemMessage.content.includes(pattern) : pattern.test(systemMessage.content);
914
+ if (!matches) {
915
+ throw new Error(
916
+ `TestProvider: System prompt does not contain "${pattern}"`
917
+ );
918
+ }
919
+ }
920
+ }
921
+ /**
922
+ * Simple token estimation (for mock usage stats)
923
+ */
924
+ estimateTokens(messages) {
925
+ return messages.reduce((sum, m) => {
926
+ const content = m.content || "";
927
+ return sum + Math.ceil(content.length / 4);
928
+ }, 0);
929
+ }
930
+ sleep(ms) {
931
+ return new Promise((resolve) => setTimeout(resolve, ms));
932
+ }
933
+ // ============ Test Utility Methods ============
934
+ /**
935
+ * Get logged requests (for test assertions)
936
+ */
937
+ getRequestLog() {
938
+ return [...this.requestLog];
939
+ }
940
+ /**
941
+ * Get the last request made
942
+ */
943
+ getLastRequest() {
944
+ return this.requestLog[this.requestLog.length - 1];
945
+ }
946
+ /**
947
+ * Reset provider state for reuse across tests
948
+ */
949
+ reset() {
950
+ this.responseIndex = 0;
951
+ this.requestLog = [];
952
+ }
953
+ /**
954
+ * Replace the script with a new one and reset
955
+ */
956
+ setScript(script) {
957
+ this.script = script;
958
+ this.reset();
959
+ }
960
+ /**
961
+ * Check if all scripted responses were consumed
962
+ */
963
+ isScriptComplete() {
964
+ return this.responseIndex === this.script.length;
965
+ }
966
+ /**
967
+ * Get remaining unconsumed response count
968
+ */
969
+ remainingResponses() {
970
+ return this.script.length - this.responseIndex;
971
+ }
972
+ /**
973
+ * Get current response index
974
+ */
975
+ getCurrentIndex() {
976
+ return this.responseIndex;
977
+ }
978
+ };
979
+ }
980
+ });
981
+
646
982
  // src/agents/providers/ProviderRegistry.ts
647
983
  var ProviderRegistryImpl, ProviderRegistry;
648
984
  var init_ProviderRegistry = __esm({
649
985
  "src/agents/providers/ProviderRegistry.ts"() {
650
986
  init_OpenAIProvider();
651
987
  init_OpenRouterProvider();
988
+ init_TestProvider();
652
989
  ProviderRegistryImpl = class {
653
990
  factories = /* @__PURE__ */ new Map();
654
991
  providerCache = /* @__PURE__ */ new Map();
@@ -658,6 +995,7 @@ var init_ProviderRegistry = __esm({
658
995
  registerBuiltInProviders() {
659
996
  this.register("openai", (config) => new OpenAIProvider(config));
660
997
  this.register("openrouter", (config) => new OpenRouterProvider(config));
998
+ this.register("test", (config) => new TestProvider(config));
661
999
  }
662
1000
  register(sdk, factory) {
663
1001
  this.factories.set(sdk.toLowerCase(), factory);
@@ -733,6 +1071,14 @@ var init_ProviderRegistry = __esm({
733
1071
  * Provider API keys come from environment variables.
734
1072
  */
735
1073
  async getProviderConfig(providerName, env) {
1074
+ if (providerName.toLowerCase() === "test") {
1075
+ return {
1076
+ name: "test",
1077
+ sdk: "test",
1078
+ api_key: "test"
1079
+ // Dummy key for test provider
1080
+ };
1081
+ }
736
1082
  const apiKeyEnvVarMap = {
737
1083
  openai: "OPENAI_API_KEY",
738
1084
  openrouter: "OPENROUTER_API_KEY",
@@ -768,7 +1114,9 @@ __export(providers_exports, {
768
1114
  BaseProvider: () => BaseProvider,
769
1115
  OpenAIProvider: () => OpenAIProvider,
770
1116
  OpenRouterProvider: () => OpenRouterProvider,
771
- ProviderRegistry: () => ProviderRegistry
1117
+ ProviderRegistry: () => ProviderRegistry,
1118
+ TestProvider: () => TestProvider,
1119
+ TestScript: () => TestScript
772
1120
  });
773
1121
  var init_providers = __esm({
774
1122
  "src/agents/providers/index.ts"() {
@@ -776,6 +1124,8 @@ var init_providers = __esm({
776
1124
  init_BaseProvider();
777
1125
  init_OpenAIProvider();
778
1126
  init_OpenRouterProvider();
1127
+ init_TestScript();
1128
+ init_TestProvider();
779
1129
  init_ProviderRegistry();
780
1130
  }
781
1131
  });
@@ -933,7 +1283,7 @@ var init_LLMRequest = __esm({
933
1283
  message_id: state.rootMessageId || crypto.randomUUID(),
934
1284
  provider: this.getProviderFromModel(modelId),
935
1285
  model: modelId,
936
- model_name: modelName,
1286
+ model_name: modelName ?? void 0,
937
1287
  endpoint: "chat.completions",
938
1288
  request_body: JSON.stringify({
939
1289
  model: modelName || modelId,
@@ -944,11 +1294,11 @@ var init_LLMRequest = __esm({
944
1294
  }),
945
1295
  tools_available: context.tools ? context.tools.length : 0,
946
1296
  message_history_length: context.messages.length,
947
- prompt_name: context.promptName || null,
948
- parent_log_id: context.parentLogId || null,
949
- retry_of_log_id: retryOfLogId || null,
950
- tools_schema: context.tools ? JSON.stringify(context.tools) : null,
951
- system_prompt: context.systemPrompt || null,
1297
+ prompt_name: context.promptName ?? void 0,
1298
+ parent_log_id: context.parentLogId ?? void 0,
1299
+ retry_of_log_id: retryOfLogId ?? void 0,
1300
+ tools_schema: context.tools ? JSON.stringify(context.tools) : void 0,
1301
+ system_prompt: context.systemPrompt ?? void 0,
952
1302
  is_complete: false,
953
1303
  // Incomplete until response received
954
1304
  created_at: Date.now() * TIMESTAMP_MULTIPLIER
@@ -1057,13 +1407,13 @@ var init_LLMRequest = __esm({
1057
1407
  total_tokens: response.usage.total_tokens,
1058
1408
  latency_ms: Date.now() - startTime,
1059
1409
  finish_reason: response.finish_reason,
1060
- tools_called: toolsCalled,
1061
- cost_total,
1062
- provider: actualProvider,
1410
+ tools_called: toolsCalled ?? void 0,
1411
+ cost_total: cost_total ?? void 0,
1412
+ provider: actualProvider ?? void 0,
1063
1413
  // Update provider with actual value if available
1064
1414
  is_complete: true,
1065
1415
  // Now complete
1066
- reasoning_content: response.reasoning_content || null
1416
+ reasoning_content: response.reasoning_content ?? void 0
1067
1417
  // Store reasoning separately
1068
1418
  };
1069
1419
  await state.stream.waitFor(async () => {
@@ -1910,7 +2260,7 @@ var init_ToolExecutor = __esm({
1910
2260
  if (state.parentMessageId !== void 0 && processedResult.status === "error") {
1911
2261
  const parentToolConfig = state.context.__parentToolConfig;
1912
2262
  let includeErrors = true;
1913
- if (typeof parentToolConfig === "object" && parentToolConfig !== null) {
2263
+ if (parentToolConfig !== void 0 && parentToolConfig !== null) {
1914
2264
  includeErrors = parentToolConfig.includeErrors ?? true;
1915
2265
  }
1916
2266
  if (includeErrors && !content.startsWith("Error: ")) {
@@ -2611,21 +2961,22 @@ var init_FlowEngine = __esm({
2611
2961
  id: newAgentName,
2612
2962
  title: newAgentDef.title || newAgentName,
2613
2963
  type: newAgentDef.type,
2614
- max_session_turns: newAgentDef.maxSessionTurns,
2615
- side_a_label: newAgentDef.sideA?.label,
2616
- side_a_agent_prompt: newAgentDef.sideA?.prompt,
2964
+ created_at: Date.now(),
2965
+ max_session_turns: newAgentDef.maxSessionTurns ?? null,
2966
+ side_a_label: newAgentDef.sideA?.label ?? null,
2967
+ side_a_agent_prompt: newAgentDef.sideA?.prompt ?? null,
2617
2968
  side_a_stop_on_response: newAgentDef.sideA?.stopOnResponse ?? false,
2618
- side_a_stop_tool: newAgentDef.sideA?.stopTool,
2619
- side_a_stop_tool_response_property: newAgentDef.sideA?.stopToolResponseProperty,
2620
- side_a_max_turns: newAgentDef.sideA?.maxTurns,
2621
- side_a_end_conversation_tool: newAgentDef.sideA?.endConversationTool,
2622
- side_b_label: newAgentDef.sideB?.label,
2623
- side_b_agent_prompt: newAgentDef.sideB?.prompt,
2969
+ side_a_stop_tool: newAgentDef.sideA?.stopTool ?? null,
2970
+ side_a_stop_tool_response_property: newAgentDef.sideA?.stopToolResponseProperty ?? null,
2971
+ side_a_max_turns: newAgentDef.sideA?.maxTurns ?? null,
2972
+ side_a_end_conversation_tool: newAgentDef.sideA?.endConversationTool ?? null,
2973
+ side_b_label: newAgentDef.sideB?.label ?? null,
2974
+ side_b_agent_prompt: newAgentDef.sideB?.prompt ?? null,
2624
2975
  side_b_stop_on_response: newAgentDef.sideB?.stopOnResponse ?? false,
2625
- side_b_stop_tool: newAgentDef.sideB?.stopTool,
2626
- side_b_stop_tool_response_property: newAgentDef.sideB?.stopToolResponseProperty,
2627
- side_b_max_turns: newAgentDef.sideB?.maxTurns,
2628
- side_b_end_conversation_tool: newAgentDef.sideB?.endConversationTool
2976
+ side_b_stop_tool: newAgentDef.sideB?.stopTool ?? null,
2977
+ side_b_stop_tool_response_property: newAgentDef.sideB?.stopToolResponseProperty ?? null,
2978
+ side_b_max_turns: newAgentDef.sideB?.maxTurns ?? null,
2979
+ side_b_end_conversation_tool: newAgentDef.sideB?.endConversationTool ?? null
2629
2980
  };
2630
2981
  const { sideAPrompt, sideBPrompt } = await this.loadAgentAndPrompts(
2631
2982
  agentConfig,
@@ -5480,7 +5831,7 @@ function scanApiDirectory(dir, baseRoute = "") {
5480
5831
  routes.push(...scanApiDirectory(fullPath, subRoute));
5481
5832
  } else if (entry.isFile() && entry.name.endsWith(".ts")) {
5482
5833
  const fileName = entry.name.replace(/\.ts$/, "");
5483
- let method = "GET";
5834
+ let method = "";
5484
5835
  let routePath = baseRoute;
5485
5836
  if (fileName.includes(".")) {
5486
5837
  const parts = fileName.split(".");
@@ -5848,12 +6199,12 @@ function agentbuilder(options = {}) {
5848
6199
  }
5849
6200
  return {
5850
6201
  name: "vite-plugin-agent",
6202
+ enforce: "pre",
5851
6203
  config() {
5852
6204
  const depsToExclude = [
5853
6205
  "@standardagents/builder",
5854
6206
  "@standardagents/builder/runtime",
5855
- "@standardagents/builder/built-in-routes",
5856
- "@standardagents/builder/mcp"
6207
+ "@standardagents/builder/built-in-routes"
5857
6208
  ];
5858
6209
  return {
5859
6210
  optimizeDeps: {
@@ -5862,8 +6213,6 @@ function agentbuilder(options = {}) {
5862
6213
  exclude: depsToExclude
5863
6214
  },
5864
6215
  ssr: {
5865
- // Only MCP should be external (not used in worker)
5866
- external: ["@standardagents/builder/mcp"],
5867
6216
  // noExternal ensures Vite transforms these instead of leaving as external
5868
6217
  // The Cloudflare plugin handles the actual bundling and knows about cloudflare:workers
5869
6218
  noExternal: [
@@ -5871,11 +6220,6 @@ function agentbuilder(options = {}) {
5871
6220
  "@standardagents/builder/runtime",
5872
6221
  "@standardagents/builder/built-in-routes"
5873
6222
  ]
5874
- },
5875
- build: {
5876
- rollupOptions: {
5877
- external: ["@standardagents/builder/mcp"]
5878
- }
5879
6223
  }
5880
6224
  };
5881
6225
  },
@@ -5884,8 +6228,7 @@ function agentbuilder(options = {}) {
5884
6228
  const depsToExclude = [
5885
6229
  "@standardagents/builder",
5886
6230
  "@standardagents/builder/runtime",
5887
- "@standardagents/builder/built-in-routes",
5888
- "@standardagents/builder/mcp"
6231
+ "@standardagents/builder/built-in-routes"
5889
6232
  ];
5890
6233
  config.optimizeDeps = config.optimizeDeps || {};
5891
6234
  config.optimizeDeps.exclude = [
@@ -6630,6 +6973,101 @@ export class DurableAgentBuilder extends _BaseDurableAgentBuilder {
6630
6973
  return;
6631
6974
  }
6632
6975
  }
6976
+ if (pathWithoutMount === "/api/prompts" && method === "GET") {
6977
+ try {
6978
+ const fs4 = await import('fs');
6979
+ const files = fs4.existsSync(promptsDir) ? fs4.readdirSync(promptsDir).filter((f) => f.endsWith(".ts")) : [];
6980
+ const modelFiles = fs4.existsSync(modelsDir) ? fs4.readdirSync(modelsDir).filter((f) => f.endsWith(".ts")) : [];
6981
+ const modelMap = {};
6982
+ for (const file of modelFiles) {
6983
+ try {
6984
+ const filePath = path3.join(modelsDir, file);
6985
+ const content = fs4.readFileSync(filePath, "utf-8");
6986
+ const name = content.match(/name:\s*['"]([^'"]+)['"]/)?.[1];
6987
+ const provider = content.match(/provider:\s*['"]([^'"]+)['"]/)?.[1];
6988
+ if (name) {
6989
+ modelMap[name] = { name, provider: provider || "unknown" };
6990
+ }
6991
+ } catch (error) {
6992
+ }
6993
+ }
6994
+ const promptList = files.map((file) => {
6995
+ try {
6996
+ const filePath = path3.join(promptsDir, file);
6997
+ const content = fs4.readFileSync(filePath, "utf-8");
6998
+ const getName = (c) => c.match(/name:\s*['"]([^'"]+)['"]/)?.[1];
6999
+ const getToolDescription = (c) => c.match(/toolDescription:\s*['"]([^'"]+)['"]/)?.[1];
7000
+ const getModel = (c) => c.match(/model:\s*['"]([^'"]+)['"]/)?.[1];
7001
+ const getIncludeChat = (c) => c.match(/includeChat:\s*(true|false)/)?.[1] === "true";
7002
+ const getIncludePastTools = (c) => c.match(/includePastTools:\s*(true|false)/)?.[1] === "true";
7003
+ const getParallelToolCalls = (c) => c.match(/parallelToolCalls:\s*(true|false)/)?.[1] === "true";
7004
+ const getToolChoice = (c) => c.match(/toolChoice:\s*['"]([^'"]+)['"]/)?.[1];
7005
+ const getBeforeTool = (c) => c.match(/beforeTool:\s*['"]([^'"]+)['"]/)?.[1];
7006
+ const getAfterTool = (c) => c.match(/afterTool:\s*['"]([^'"]+)['"]/)?.[1];
7007
+ const getTools = (c) => {
7008
+ const match = c.match(/tools:\s*\[([^\]]*)\]/);
7009
+ if (!match) return [];
7010
+ const items = match[1].match(/['"]([^'"]+)['"]/g);
7011
+ return items ? items.map((s) => s.replace(/['"]/g, "")) : [];
7012
+ };
7013
+ const getHandoffAgents = (c) => {
7014
+ const match = c.match(/handoffAgents:\s*\[([^\]]*)\]/);
7015
+ if (!match) return [];
7016
+ const items = match[1].match(/['"]([^'"]+)['"]/g);
7017
+ return items ? items.map((s) => s.replace(/['"]/g, "")) : [];
7018
+ };
7019
+ const getPrompt = (c) => {
7020
+ const backtickMatch = c.match(/prompt:\s*`([\s\S]*?)`/);
7021
+ if (backtickMatch) return backtickMatch[1];
7022
+ const quotedMatch = c.match(/prompt:\s*['"]([^'"]*)['"]/);
7023
+ if (quotedMatch) return quotedMatch[1];
7024
+ const arrayMatch = c.match(/prompt:\s*(\[[\s\S]*?\])/);
7025
+ if (arrayMatch) return arrayMatch[1];
7026
+ return "";
7027
+ };
7028
+ const name = getName(content);
7029
+ if (!name) return null;
7030
+ const modelId = getModel(content);
7031
+ const modelDef = modelId ? modelMap[modelId] : null;
7032
+ return {
7033
+ id: name,
7034
+ name,
7035
+ tool_description: getToolDescription(content) || "",
7036
+ prompt: getPrompt(content),
7037
+ required_schema: null,
7038
+ // Complex to parse, skip for now
7039
+ model_id: modelId || "",
7040
+ model_name: modelDef?.name || modelId || "",
7041
+ model_provider: modelDef?.provider || "unknown",
7042
+ include_chat: getIncludeChat(content),
7043
+ include_past_tools: getIncludePastTools(content),
7044
+ parallel_tool_calls: getParallelToolCalls(content),
7045
+ tool_choice: getToolChoice(content) || "auto",
7046
+ before_tool: getBeforeTool(content) || null,
7047
+ after_tool: getAfterTool(content) || null,
7048
+ tools: getTools(content),
7049
+ prompts: getHandoffAgents(content),
7050
+ reasoning: null,
7051
+ // Complex to parse
7052
+ created_at: Math.floor(Date.now() / 1e3)
7053
+ };
7054
+ } catch (error) {
7055
+ console.error(`Error loading prompt ${file}:`, error);
7056
+ return null;
7057
+ }
7058
+ });
7059
+ const validPrompts = promptList.filter(Boolean);
7060
+ res.statusCode = 200;
7061
+ res.setHeader("Content-Type", "application/json");
7062
+ res.end(JSON.stringify({ prompts: validPrompts }));
7063
+ return;
7064
+ } catch (error) {
7065
+ res.statusCode = 500;
7066
+ res.setHeader("Content-Type", "application/json");
7067
+ res.end(JSON.stringify({ error: error.message || "Failed to list prompts" }));
7068
+ return;
7069
+ }
7070
+ }
6633
7071
  if (pathWithoutMount === "/api/prompts" && method === "POST") {
6634
7072
  try {
6635
7073
  const rawBody = await parseRequestBody(req);
@@ -6755,6 +7193,54 @@ export class DurableAgentBuilder extends _BaseDurableAgentBuilder {
6755
7193
  return;
6756
7194
  }
6757
7195
  }
7196
+ if (pathWithoutMount === "/api/agents" && method === "GET") {
7197
+ try {
7198
+ const fs4 = await import('fs');
7199
+ const files = fs4.existsSync(agentsDir) ? fs4.readdirSync(agentsDir).filter((f) => f.endsWith(".ts")) : [];
7200
+ const agentList = files.map((file) => {
7201
+ try {
7202
+ const filePath = path3.join(agentsDir, file);
7203
+ const content = fs4.readFileSync(filePath, "utf-8");
7204
+ const getName = (c) => c.match(/name:\s*['"]([^'"]+)['"]/)?.[1];
7205
+ const getTitle = (c) => c.match(/title:\s*['"]([^'"]+)['"]/)?.[1];
7206
+ const getType = (c) => c.match(/type:\s*['"]([^'"]+)['"]/)?.[1];
7207
+ const getDefaultPrompt = (c) => c.match(/defaultPrompt:\s*['"]([^'"]+)['"]/)?.[1];
7208
+ const getDefaultModel = (c) => c.match(/defaultModel:\s*['"]([^'"]+)['"]/)?.[1];
7209
+ const getTools = (c) => {
7210
+ const match = c.match(/tools:\s*\[([^\]]*)\]/);
7211
+ if (!match) return [];
7212
+ const items = match[1].match(/['"]([^'"]+)['"]/g);
7213
+ return items ? items.map((s) => s.replace(/['"]/g, "")) : [];
7214
+ };
7215
+ const name = getName(content);
7216
+ if (!name) return null;
7217
+ return {
7218
+ id: name,
7219
+ name,
7220
+ title: getTitle(content) || name,
7221
+ type: getType(content) || "ai_human",
7222
+ default_prompt: getDefaultPrompt(content) || "",
7223
+ default_model: getDefaultModel(content) || "",
7224
+ tools: getTools(content),
7225
+ created_at: Math.floor(Date.now() / 1e3)
7226
+ };
7227
+ } catch (error) {
7228
+ console.error(`Error loading agent ${file}:`, error);
7229
+ return null;
7230
+ }
7231
+ });
7232
+ const validAgents = agentList.filter(Boolean);
7233
+ res.statusCode = 200;
7234
+ res.setHeader("Content-Type", "application/json");
7235
+ res.end(JSON.stringify({ agents: validAgents }));
7236
+ return;
7237
+ } catch (error) {
7238
+ res.statusCode = 500;
7239
+ res.setHeader("Content-Type", "application/json");
7240
+ res.end(JSON.stringify({ error: error.message || "Failed to list agents" }));
7241
+ return;
7242
+ }
7243
+ }
6758
7244
  if (pathWithoutMount === "/api/agents" && method === "POST") {
6759
7245
  try {
6760
7246
  const rawBody = await parseRequestBody(req);
@@ -10203,7 +10689,7 @@ function defineModel(options) {
10203
10689
  if (!options.model) {
10204
10690
  throw new Error("Model ID is required");
10205
10691
  }
10206
- const validProviders = ["openai", "openrouter", "anthropic", "google"];
10692
+ const validProviders = ["openai", "openrouter", "anthropic", "google", "test"];
10207
10693
  if (!validProviders.includes(options.provider)) {
10208
10694
  throw new Error(
10209
10695
  `Invalid provider '${options.provider}'. Must be one of: ${validProviders.join(", ")}`