@mcpjam/inspector 0.9.3 → 0.9.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +2 -6
- package/dist/client/assets/index-C-vxFpw4.css +1 -0
- package/dist/client/assets/index-CTiiyjex.js +1723 -0
- package/dist/client/assets/index-CTiiyjex.js.map +1 -0
- package/dist/client/deepseek_logo.svg +1 -0
- package/dist/client/index.html +2 -2
- package/dist/server/index.js +362 -342
- package/dist/server/index.js.map +1 -1
- package/package.json +8 -6
- package/dist/client/assets/index-Bv0p7Vhi.css +0 -1
- package/dist/client/assets/index-DzF9hvwr.js +0 -1713
- package/dist/client/assets/index-DzF9hvwr.js.map +0 -1
package/dist/server/index.js
CHANGED
|
@@ -647,10 +647,24 @@ import { Agent } from "@mastra/core/agent";
|
|
|
647
647
|
import { createAnthropic } from "@ai-sdk/anthropic";
|
|
648
648
|
import { createOpenAI } from "@ai-sdk/openai";
|
|
649
649
|
import { createOllama } from "ollama-ai-provider";
|
|
650
|
-
import { MCPClient as MCPClient2 } from "@mastra/mcp";
|
|
651
650
|
import { TextEncoder as TextEncoder2 } from "util";
|
|
652
|
-
|
|
651
|
+
|
|
652
|
+
// ../client/src/lib/chat-utils.ts
|
|
653
|
+
function getDefaultTemperatureByProvider(provider) {
|
|
654
|
+
switch (provider) {
|
|
655
|
+
case "openai":
|
|
656
|
+
return 1;
|
|
657
|
+
case "anthropic":
|
|
658
|
+
return 0;
|
|
659
|
+
default:
|
|
660
|
+
return 0;
|
|
661
|
+
}
|
|
662
|
+
}
|
|
663
|
+
|
|
664
|
+
// routes/mcp/chat.ts
|
|
653
665
|
var DEBUG_ENABLED = process.env.MCP_DEBUG !== "false";
|
|
666
|
+
var ELICITATION_TIMEOUT = 3e5;
|
|
667
|
+
var MAX_AGENT_STEPS = 10;
|
|
654
668
|
var dbg = (...args) => {
|
|
655
669
|
if (DEBUG_ENABLED) console.log("[mcp/chat]", ...args);
|
|
656
670
|
};
|
|
@@ -659,6 +673,287 @@ try {
|
|
|
659
673
|
} catch {
|
|
660
674
|
}
|
|
661
675
|
var pendingElicitations2 = /* @__PURE__ */ new Map();
|
|
676
|
+
var chat = new Hono5();
|
|
677
|
+
var createLlmModel = (modelDefinition, apiKey, ollamaBaseUrl) => {
|
|
678
|
+
if (!modelDefinition?.id || !modelDefinition?.provider) {
|
|
679
|
+
throw new Error(
|
|
680
|
+
`Invalid model definition: ${JSON.stringify(modelDefinition)}`
|
|
681
|
+
);
|
|
682
|
+
}
|
|
683
|
+
switch (modelDefinition.provider) {
|
|
684
|
+
case "anthropic":
|
|
685
|
+
return createAnthropic({ apiKey })(modelDefinition.id);
|
|
686
|
+
case "openai":
|
|
687
|
+
return createOpenAI({ apiKey })(modelDefinition.id);
|
|
688
|
+
case "deepseek":
|
|
689
|
+
return createOpenAI({ apiKey, baseURL: "https://api.deepseek.com/v1" })(
|
|
690
|
+
modelDefinition.id
|
|
691
|
+
);
|
|
692
|
+
case "ollama":
|
|
693
|
+
const baseUrl = ollamaBaseUrl || "http://localhost:11434";
|
|
694
|
+
return createOllama({
|
|
695
|
+
baseURL: `${baseUrl}`
|
|
696
|
+
})(modelDefinition.id, {
|
|
697
|
+
simulateStreaming: true
|
|
698
|
+
});
|
|
699
|
+
default:
|
|
700
|
+
throw new Error(
|
|
701
|
+
`Unsupported provider: ${modelDefinition.provider} for model: ${modelDefinition.id}`
|
|
702
|
+
);
|
|
703
|
+
}
|
|
704
|
+
};
|
|
705
|
+
var createElicitationHandler = (streamingContext) => {
|
|
706
|
+
return async (elicitationRequest) => {
|
|
707
|
+
const requestId = `elicit_${Date.now()}_${Math.random().toString(36).substr(2, 9)}`;
|
|
708
|
+
if (streamingContext.controller && streamingContext.encoder) {
|
|
709
|
+
streamingContext.controller.enqueue(
|
|
710
|
+
streamingContext.encoder.encode(
|
|
711
|
+
`data: ${JSON.stringify({
|
|
712
|
+
type: "elicitation_request",
|
|
713
|
+
requestId,
|
|
714
|
+
message: elicitationRequest.message,
|
|
715
|
+
schema: elicitationRequest.requestedSchema,
|
|
716
|
+
timestamp: /* @__PURE__ */ new Date()
|
|
717
|
+
})}
|
|
718
|
+
|
|
719
|
+
`
|
|
720
|
+
)
|
|
721
|
+
);
|
|
722
|
+
}
|
|
723
|
+
return new Promise((resolve, reject) => {
|
|
724
|
+
pendingElicitations2.set(requestId, { resolve, reject });
|
|
725
|
+
setTimeout(() => {
|
|
726
|
+
if (pendingElicitations2.has(requestId)) {
|
|
727
|
+
pendingElicitations2.delete(requestId);
|
|
728
|
+
reject(new Error("Elicitation timeout"));
|
|
729
|
+
}
|
|
730
|
+
}, ELICITATION_TIMEOUT);
|
|
731
|
+
});
|
|
732
|
+
};
|
|
733
|
+
};
|
|
734
|
+
var wrapToolsWithStreaming = (tools2, streamingContext) => {
|
|
735
|
+
const wrappedTools = {};
|
|
736
|
+
for (const [name, tool] of Object.entries(tools2)) {
|
|
737
|
+
wrappedTools[name] = {
|
|
738
|
+
...tool,
|
|
739
|
+
execute: async (params) => {
|
|
740
|
+
const currentToolCallId = ++streamingContext.toolCallId;
|
|
741
|
+
const startedAt = Date.now();
|
|
742
|
+
if (streamingContext.controller && streamingContext.encoder) {
|
|
743
|
+
streamingContext.controller.enqueue(
|
|
744
|
+
streamingContext.encoder.encode(
|
|
745
|
+
`data: ${JSON.stringify({
|
|
746
|
+
type: "tool_call",
|
|
747
|
+
toolCall: {
|
|
748
|
+
id: currentToolCallId,
|
|
749
|
+
name,
|
|
750
|
+
parameters: params,
|
|
751
|
+
timestamp: /* @__PURE__ */ new Date(),
|
|
752
|
+
status: "executing"
|
|
753
|
+
}
|
|
754
|
+
})}
|
|
755
|
+
|
|
756
|
+
`
|
|
757
|
+
)
|
|
758
|
+
);
|
|
759
|
+
}
|
|
760
|
+
dbg("Tool executing", { name, currentToolCallId, params });
|
|
761
|
+
try {
|
|
762
|
+
const result = await tool.execute(params);
|
|
763
|
+
dbg("Tool result", {
|
|
764
|
+
name,
|
|
765
|
+
currentToolCallId,
|
|
766
|
+
ms: Date.now() - startedAt
|
|
767
|
+
});
|
|
768
|
+
if (streamingContext.controller && streamingContext.encoder) {
|
|
769
|
+
streamingContext.controller.enqueue(
|
|
770
|
+
streamingContext.encoder.encode(
|
|
771
|
+
`data: ${JSON.stringify({
|
|
772
|
+
type: "tool_result",
|
|
773
|
+
toolResult: {
|
|
774
|
+
id: currentToolCallId,
|
|
775
|
+
toolCallId: currentToolCallId,
|
|
776
|
+
result,
|
|
777
|
+
timestamp: /* @__PURE__ */ new Date()
|
|
778
|
+
}
|
|
779
|
+
})}
|
|
780
|
+
|
|
781
|
+
`
|
|
782
|
+
)
|
|
783
|
+
);
|
|
784
|
+
}
|
|
785
|
+
return result;
|
|
786
|
+
} catch (error) {
|
|
787
|
+
dbg("Tool error", {
|
|
788
|
+
name,
|
|
789
|
+
currentToolCallId,
|
|
790
|
+
error: error instanceof Error ? error.message : String(error)
|
|
791
|
+
});
|
|
792
|
+
if (streamingContext.controller && streamingContext.encoder) {
|
|
793
|
+
streamingContext.controller.enqueue(
|
|
794
|
+
streamingContext.encoder.encode(
|
|
795
|
+
`data: ${JSON.stringify({
|
|
796
|
+
type: "tool_result",
|
|
797
|
+
toolResult: {
|
|
798
|
+
id: currentToolCallId,
|
|
799
|
+
toolCallId: currentToolCallId,
|
|
800
|
+
error: error instanceof Error ? error.message : String(error),
|
|
801
|
+
timestamp: /* @__PURE__ */ new Date()
|
|
802
|
+
}
|
|
803
|
+
})}
|
|
804
|
+
|
|
805
|
+
`
|
|
806
|
+
)
|
|
807
|
+
);
|
|
808
|
+
}
|
|
809
|
+
throw error;
|
|
810
|
+
}
|
|
811
|
+
}
|
|
812
|
+
};
|
|
813
|
+
}
|
|
814
|
+
return wrappedTools;
|
|
815
|
+
};
|
|
816
|
+
var handleAgentStepFinish = (streamingContext, text, toolCalls, toolResults) => {
|
|
817
|
+
try {
|
|
818
|
+
if (toolCalls && Array.isArray(toolCalls)) {
|
|
819
|
+
for (const call of toolCalls) {
|
|
820
|
+
const currentToolCallId = ++streamingContext.toolCallId;
|
|
821
|
+
streamingContext.lastEmittedToolCallId = currentToolCallId;
|
|
822
|
+
if (streamingContext.controller && streamingContext.encoder) {
|
|
823
|
+
streamingContext.controller.enqueue(
|
|
824
|
+
streamingContext.encoder.encode(
|
|
825
|
+
`data: ${JSON.stringify({
|
|
826
|
+
type: "tool_call",
|
|
827
|
+
toolCall: {
|
|
828
|
+
id: currentToolCallId,
|
|
829
|
+
name: call.name || call.toolName,
|
|
830
|
+
parameters: call.params || call.args || {},
|
|
831
|
+
timestamp: /* @__PURE__ */ new Date(),
|
|
832
|
+
status: "executing"
|
|
833
|
+
}
|
|
834
|
+
})}
|
|
835
|
+
|
|
836
|
+
`
|
|
837
|
+
)
|
|
838
|
+
);
|
|
839
|
+
}
|
|
840
|
+
}
|
|
841
|
+
}
|
|
842
|
+
if (toolResults && Array.isArray(toolResults)) {
|
|
843
|
+
for (const result of toolResults) {
|
|
844
|
+
const currentToolCallId = streamingContext.lastEmittedToolCallId != null ? streamingContext.lastEmittedToolCallId : ++streamingContext.toolCallId;
|
|
845
|
+
if (streamingContext.controller && streamingContext.encoder) {
|
|
846
|
+
streamingContext.controller.enqueue(
|
|
847
|
+
streamingContext.encoder.encode(
|
|
848
|
+
`data: ${JSON.stringify({
|
|
849
|
+
type: "tool_result",
|
|
850
|
+
toolResult: {
|
|
851
|
+
id: currentToolCallId,
|
|
852
|
+
toolCallId: currentToolCallId,
|
|
853
|
+
result: result.result,
|
|
854
|
+
error: result.error,
|
|
855
|
+
timestamp: /* @__PURE__ */ new Date()
|
|
856
|
+
}
|
|
857
|
+
})}
|
|
858
|
+
|
|
859
|
+
`
|
|
860
|
+
)
|
|
861
|
+
);
|
|
862
|
+
}
|
|
863
|
+
}
|
|
864
|
+
}
|
|
865
|
+
} catch (err) {
|
|
866
|
+
dbg("onStepFinish error", err);
|
|
867
|
+
}
|
|
868
|
+
};
|
|
869
|
+
var streamAgentResponse = async (streamingContext, stream) => {
|
|
870
|
+
let hasContent = false;
|
|
871
|
+
let chunkCount = 0;
|
|
872
|
+
for await (const chunk of stream.textStream) {
|
|
873
|
+
if (chunk && chunk.trim()) {
|
|
874
|
+
hasContent = true;
|
|
875
|
+
chunkCount++;
|
|
876
|
+
streamingContext.controller.enqueue(
|
|
877
|
+
streamingContext.encoder.encode(
|
|
878
|
+
`data: ${JSON.stringify({ type: "text", content: chunk })}
|
|
879
|
+
|
|
880
|
+
`
|
|
881
|
+
)
|
|
882
|
+
);
|
|
883
|
+
}
|
|
884
|
+
}
|
|
885
|
+
dbg("Streaming finished", { hasContent, chunkCount });
|
|
886
|
+
return { hasContent, chunkCount };
|
|
887
|
+
};
|
|
888
|
+
var fallbackToCompletion = async (agent, messages, streamingContext, provider) => {
|
|
889
|
+
try {
|
|
890
|
+
const result = await agent.generate(messages, {
|
|
891
|
+
temperature: getDefaultTemperatureByProvider(provider)
|
|
892
|
+
});
|
|
893
|
+
if (result.text && result.text.trim()) {
|
|
894
|
+
streamingContext.controller.enqueue(
|
|
895
|
+
streamingContext.encoder.encode(
|
|
896
|
+
`data: ${JSON.stringify({
|
|
897
|
+
type: "text",
|
|
898
|
+
content: result.text
|
|
899
|
+
})}
|
|
900
|
+
|
|
901
|
+
`
|
|
902
|
+
)
|
|
903
|
+
);
|
|
904
|
+
}
|
|
905
|
+
} catch (fallbackErr) {
|
|
906
|
+
streamingContext.controller.enqueue(
|
|
907
|
+
streamingContext.encoder.encode(
|
|
908
|
+
`data: ${JSON.stringify({
|
|
909
|
+
type: "text",
|
|
910
|
+
content: "Failed to generate response. Please try again. ",
|
|
911
|
+
error: fallbackErr instanceof Error ? fallbackErr.message : "Unknown error"
|
|
912
|
+
})}
|
|
913
|
+
|
|
914
|
+
`
|
|
915
|
+
)
|
|
916
|
+
);
|
|
917
|
+
}
|
|
918
|
+
};
|
|
919
|
+
var safeDisconnect = async (client) => {
|
|
920
|
+
if (client) {
|
|
921
|
+
try {
|
|
922
|
+
await client.disconnect();
|
|
923
|
+
} catch (cleanupError) {
|
|
924
|
+
console.warn("[mcp/chat] Error cleaning up MCP client:", cleanupError);
|
|
925
|
+
}
|
|
926
|
+
}
|
|
927
|
+
};
|
|
928
|
+
var createStreamingResponse = async (agent, messages, toolsets, streamingContext, provider) => {
|
|
929
|
+
const stream = await agent.stream(messages, {
|
|
930
|
+
maxSteps: MAX_AGENT_STEPS,
|
|
931
|
+
temperature: getDefaultTemperatureByProvider(provider),
|
|
932
|
+
toolsets,
|
|
933
|
+
onStepFinish: ({ text, toolCalls, toolResults }) => {
|
|
934
|
+
handleAgentStepFinish(streamingContext, text, toolCalls, toolResults);
|
|
935
|
+
}
|
|
936
|
+
});
|
|
937
|
+
const { hasContent } = await streamAgentResponse(streamingContext, stream);
|
|
938
|
+
if (!hasContent) {
|
|
939
|
+
dbg("No content from textStream; falling back to completion");
|
|
940
|
+
await fallbackToCompletion(agent, messages, streamingContext, provider);
|
|
941
|
+
}
|
|
942
|
+
streamingContext.controller.enqueue(
|
|
943
|
+
streamingContext.encoder.encode(
|
|
944
|
+
`data: ${JSON.stringify({
|
|
945
|
+
type: "elicitation_complete"
|
|
946
|
+
})}
|
|
947
|
+
|
|
948
|
+
`
|
|
949
|
+
)
|
|
950
|
+
);
|
|
951
|
+
streamingContext.controller.enqueue(
|
|
952
|
+
streamingContext.encoder.encode(`data: [DONE]
|
|
953
|
+
|
|
954
|
+
`)
|
|
955
|
+
);
|
|
956
|
+
};
|
|
662
957
|
chat.post("/", async (c) => {
|
|
663
958
|
let client = null;
|
|
664
959
|
try {
|
|
@@ -666,6 +961,7 @@ chat.post("/", async (c) => {
|
|
|
666
961
|
const {
|
|
667
962
|
serverConfigs,
|
|
668
963
|
model,
|
|
964
|
+
provider,
|
|
669
965
|
apiKey,
|
|
670
966
|
systemPrompt,
|
|
671
967
|
messages,
|
|
@@ -698,7 +994,7 @@ chat.post("/", async (c) => {
|
|
|
698
994
|
pendingElicitations2.delete(requestId);
|
|
699
995
|
return c.json({ success: true });
|
|
700
996
|
}
|
|
701
|
-
if (!model
|
|
997
|
+
if (!model?.id || !apiKey || !messages) {
|
|
702
998
|
return c.json(
|
|
703
999
|
{
|
|
704
1000
|
success: false,
|
|
@@ -707,323 +1003,88 @@ chat.post("/", async (c) => {
|
|
|
707
1003
|
400
|
|
708
1004
|
);
|
|
709
1005
|
}
|
|
710
|
-
|
|
711
|
-
|
|
712
|
-
|
|
713
|
-
|
|
714
|
-
|
|
715
|
-
|
|
716
|
-
|
|
717
|
-
|
|
718
|
-
if (!validation.success) {
|
|
719
|
-
dbg("Server config validation failed", validation.errors || validation.error);
|
|
720
|
-
return c.json(
|
|
721
|
-
{
|
|
722
|
-
success: false,
|
|
723
|
-
error: validation.error.message,
|
|
724
|
-
details: validation.errors
|
|
725
|
-
},
|
|
726
|
-
validation.error.status
|
|
727
|
-
);
|
|
728
|
-
}
|
|
729
|
-
client = createMCPClientWithMultipleConnections(validation.validConfigs);
|
|
730
|
-
dbg("Created MCP client with servers", Object.keys(validation.validConfigs));
|
|
731
|
-
} else {
|
|
732
|
-
client = new MCPClient2({
|
|
733
|
-
id: `chat-${Date.now()}`,
|
|
734
|
-
servers: {}
|
|
735
|
-
});
|
|
736
|
-
dbg("Created MCP client without servers");
|
|
1006
|
+
if (!serverConfigs || Object.keys(serverConfigs).length === 0) {
|
|
1007
|
+
return c.json(
|
|
1008
|
+
{
|
|
1009
|
+
success: false,
|
|
1010
|
+
error: "No server configs provided"
|
|
1011
|
+
},
|
|
1012
|
+
400
|
|
1013
|
+
);
|
|
737
1014
|
}
|
|
738
|
-
const
|
|
739
|
-
|
|
740
|
-
|
|
741
|
-
|
|
742
|
-
|
|
743
|
-
|
|
744
|
-
|
|
745
|
-
|
|
746
|
-
|
|
747
|
-
|
|
748
|
-
|
|
749
|
-
|
|
750
|
-
|
|
751
|
-
|
|
752
|
-
message: elicitationRequest.message,
|
|
753
|
-
schema: elicitationRequest.requestedSchema,
|
|
754
|
-
timestamp: /* @__PURE__ */ new Date()
|
|
755
|
-
})}
|
|
756
|
-
|
|
757
|
-
`
|
|
758
|
-
)
|
|
759
|
-
);
|
|
760
|
-
}
|
|
761
|
-
dbg("Elicitation requested", { requestId: requestId2 });
|
|
762
|
-
return new Promise((resolve, reject) => {
|
|
763
|
-
pendingElicitations2.set(requestId2, { resolve, reject });
|
|
764
|
-
setTimeout(() => {
|
|
765
|
-
if (pendingElicitations2.has(requestId2)) {
|
|
766
|
-
pendingElicitations2.delete(requestId2);
|
|
767
|
-
reject(new Error("Elicitation timeout"));
|
|
768
|
-
}
|
|
769
|
-
}, 3e5);
|
|
770
|
-
});
|
|
771
|
-
};
|
|
772
|
-
if (client.elicitation && client.elicitation.onRequest && serverConfigs) {
|
|
773
|
-
for (const serverName of Object.keys(serverConfigs)) {
|
|
774
|
-
const normalizedName = serverName.toLowerCase().replace(/[\s\-]+/g, "_").replace(/[^a-z0-9_]/g, "");
|
|
775
|
-
client.elicitation.onRequest(normalizedName, elicitationHandler);
|
|
776
|
-
dbg("Registered elicitation handler", { serverName, normalizedName });
|
|
777
|
-
}
|
|
1015
|
+
const validation = validateMultipleServerConfigs(serverConfigs);
|
|
1016
|
+
if (!validation.success) {
|
|
1017
|
+
dbg(
|
|
1018
|
+
"Server config validation failed",
|
|
1019
|
+
validation.errors || validation.error
|
|
1020
|
+
);
|
|
1021
|
+
return c.json(
|
|
1022
|
+
{
|
|
1023
|
+
success: false,
|
|
1024
|
+
error: validation.error.message,
|
|
1025
|
+
details: validation.errors
|
|
1026
|
+
},
|
|
1027
|
+
validation.error.status
|
|
1028
|
+
);
|
|
778
1029
|
}
|
|
1030
|
+
client = createMCPClientWithMultipleConnections(validation.validConfigs);
|
|
1031
|
+
const llmModel = createLlmModel(model, apiKey, ollamaBaseUrl);
|
|
779
1032
|
const tools2 = await client.getTools();
|
|
780
|
-
const originalTools = tools2 && Object.keys(tools2).length > 0 ? tools2 : {};
|
|
781
|
-
const wrappedTools = {};
|
|
782
|
-
for (const [name, tool] of Object.entries(originalTools)) {
|
|
783
|
-
wrappedTools[name] = {
|
|
784
|
-
...tool,
|
|
785
|
-
execute: async (params) => {
|
|
786
|
-
const currentToolCallId = ++toolCallId;
|
|
787
|
-
const startedAt = Date.now();
|
|
788
|
-
if (streamController && encoder) {
|
|
789
|
-
streamController.enqueue(
|
|
790
|
-
encoder.encode(
|
|
791
|
-
`data: ${JSON.stringify({
|
|
792
|
-
type: "tool_call",
|
|
793
|
-
toolCall: {
|
|
794
|
-
id: currentToolCallId,
|
|
795
|
-
name,
|
|
796
|
-
parameters: params,
|
|
797
|
-
timestamp: /* @__PURE__ */ new Date(),
|
|
798
|
-
status: "executing"
|
|
799
|
-
}
|
|
800
|
-
})}
|
|
801
|
-
|
|
802
|
-
`
|
|
803
|
-
)
|
|
804
|
-
);
|
|
805
|
-
}
|
|
806
|
-
dbg("Tool executing", { name, currentToolCallId, params });
|
|
807
|
-
try {
|
|
808
|
-
const result = await tool.execute(params);
|
|
809
|
-
dbg("Tool result", { name, currentToolCallId, ms: Date.now() - startedAt });
|
|
810
|
-
if (streamController && encoder) {
|
|
811
|
-
streamController.enqueue(
|
|
812
|
-
encoder.encode(
|
|
813
|
-
`data: ${JSON.stringify({
|
|
814
|
-
type: "tool_result",
|
|
815
|
-
toolResult: {
|
|
816
|
-
id: currentToolCallId,
|
|
817
|
-
toolCallId: currentToolCallId,
|
|
818
|
-
result,
|
|
819
|
-
timestamp: /* @__PURE__ */ new Date()
|
|
820
|
-
}
|
|
821
|
-
})}
|
|
822
|
-
|
|
823
|
-
`
|
|
824
|
-
)
|
|
825
|
-
);
|
|
826
|
-
}
|
|
827
|
-
return result;
|
|
828
|
-
} catch (error) {
|
|
829
|
-
dbg("Tool error", { name, currentToolCallId, error: error instanceof Error ? error.message : String(error) });
|
|
830
|
-
if (streamController && encoder) {
|
|
831
|
-
streamController.enqueue(
|
|
832
|
-
encoder.encode(
|
|
833
|
-
`data: ${JSON.stringify({
|
|
834
|
-
type: "tool_result",
|
|
835
|
-
toolResult: {
|
|
836
|
-
id: currentToolCallId,
|
|
837
|
-
toolCallId: currentToolCallId,
|
|
838
|
-
error: error instanceof Error ? error.message : String(error),
|
|
839
|
-
timestamp: /* @__PURE__ */ new Date()
|
|
840
|
-
}
|
|
841
|
-
})}
|
|
842
|
-
|
|
843
|
-
`
|
|
844
|
-
)
|
|
845
|
-
);
|
|
846
|
-
}
|
|
847
|
-
throw error;
|
|
848
|
-
}
|
|
849
|
-
}
|
|
850
|
-
};
|
|
851
|
-
}
|
|
852
1033
|
const agent = new Agent({
|
|
853
1034
|
name: "MCP Chat Agent",
|
|
854
1035
|
instructions: systemPrompt || "You are a helpful assistant with access to MCP tools.",
|
|
855
1036
|
model: llmModel,
|
|
856
|
-
tools:
|
|
1037
|
+
tools: void 0
|
|
1038
|
+
// Start without tools, add them in streaming context
|
|
857
1039
|
});
|
|
858
1040
|
const formattedMessages = messages.map((msg) => ({
|
|
859
1041
|
role: msg.role,
|
|
860
1042
|
content: msg.content
|
|
861
1043
|
}));
|
|
862
|
-
const toolsets =
|
|
1044
|
+
const toolsets = await client.getToolsets();
|
|
863
1045
|
dbg("Streaming start", {
|
|
864
|
-
toolsetServers:
|
|
1046
|
+
toolsetServers: Object.keys(toolsets),
|
|
865
1047
|
messageCount: formattedMessages.length
|
|
866
1048
|
});
|
|
867
|
-
|
|
868
|
-
const
|
|
869
|
-
|
|
870
|
-
|
|
871
|
-
|
|
872
|
-
|
|
873
|
-
|
|
874
|
-
|
|
875
|
-
|
|
876
|
-
|
|
877
|
-
|
|
878
|
-
|
|
879
|
-
|
|
880
|
-
|
|
881
|
-
|
|
882
|
-
|
|
883
|
-
|
|
884
|
-
|
|
885
|
-
|
|
886
|
-
|
|
887
|
-
|
|
888
|
-
|
|
889
|
-
|
|
890
|
-
|
|
891
|
-
encoder.encode(
|
|
892
|
-
`data: ${JSON.stringify({
|
|
893
|
-
type: "tool_call",
|
|
894
|
-
toolCall: {
|
|
895
|
-
id: currentToolCallId,
|
|
896
|
-
name: call.name || call.toolName,
|
|
897
|
-
parameters: call.params || call.args || {},
|
|
898
|
-
timestamp: /* @__PURE__ */ new Date(),
|
|
899
|
-
status: "executing"
|
|
900
|
-
}
|
|
901
|
-
})}
|
|
902
|
-
|
|
903
|
-
`
|
|
904
|
-
)
|
|
905
|
-
);
|
|
906
|
-
}
|
|
907
|
-
}
|
|
908
|
-
}
|
|
909
|
-
const trList = toolResults;
|
|
910
|
-
if (trList && Array.isArray(trList)) {
|
|
911
|
-
for (const result of trList) {
|
|
912
|
-
const currentToolCallId = lastEmittedToolCallId != null ? lastEmittedToolCallId : ++toolCallId;
|
|
913
|
-
if (streamController && encoder) {
|
|
914
|
-
streamController.enqueue(
|
|
915
|
-
encoder.encode(
|
|
916
|
-
`data: ${JSON.stringify({
|
|
917
|
-
type: "tool_result",
|
|
918
|
-
toolResult: {
|
|
919
|
-
id: currentToolCallId,
|
|
920
|
-
toolCallId: currentToolCallId,
|
|
921
|
-
result: result.result,
|
|
922
|
-
error: result.error,
|
|
923
|
-
timestamp: /* @__PURE__ */ new Date()
|
|
924
|
-
}
|
|
925
|
-
})}
|
|
926
|
-
|
|
927
|
-
`
|
|
928
|
-
)
|
|
929
|
-
);
|
|
930
|
-
}
|
|
931
|
-
}
|
|
1049
|
+
const encoder = new TextEncoder2();
|
|
1050
|
+
const readableStream = new ReadableStream({
|
|
1051
|
+
async start(controller) {
|
|
1052
|
+
const streamingContext = {
|
|
1053
|
+
controller,
|
|
1054
|
+
encoder,
|
|
1055
|
+
toolCallId: 0,
|
|
1056
|
+
lastEmittedToolCallId: null
|
|
1057
|
+
};
|
|
1058
|
+
const streamingWrappedTools = wrapToolsWithStreaming(
|
|
1059
|
+
tools2,
|
|
1060
|
+
streamingContext
|
|
1061
|
+
);
|
|
1062
|
+
const streamingAgent = new Agent({
|
|
1063
|
+
name: agent.name,
|
|
1064
|
+
instructions: agent.instructions,
|
|
1065
|
+
model: agent.model,
|
|
1066
|
+
tools: Object.keys(streamingWrappedTools).length > 0 ? streamingWrappedTools : void 0
|
|
1067
|
+
});
|
|
1068
|
+
if (client?.elicitation?.onRequest) {
|
|
1069
|
+
for (const serverName of Object.keys(serverConfigs)) {
|
|
1070
|
+
const normalizedName = normalizeServerConfigName(serverName);
|
|
1071
|
+
const elicitationHandler = createElicitationHandler(streamingContext);
|
|
1072
|
+
client.elicitation.onRequest(normalizedName, elicitationHandler);
|
|
932
1073
|
}
|
|
933
|
-
} catch (err) {
|
|
934
|
-
dbg("onStepFinish error", err);
|
|
935
1074
|
}
|
|
936
|
-
},
|
|
937
|
-
onFinish: ({ text, finishReason }) => {
|
|
938
|
-
dbg("onFinish called", { finishReason, hasText: Boolean(text) });
|
|
939
1075
|
try {
|
|
940
|
-
if (
|
|
941
|
-
|
|
942
|
-
|
|
943
|
-
|
|
944
|
-
|
|
945
|
-
|
|
946
|
-
|
|
947
|
-
)
|
|
1076
|
+
if (client) {
|
|
1077
|
+
await createStreamingResponse(
|
|
1078
|
+
streamingAgent,
|
|
1079
|
+
formattedMessages,
|
|
1080
|
+
toolsets,
|
|
1081
|
+
streamingContext,
|
|
1082
|
+
provider
|
|
948
1083
|
);
|
|
1084
|
+
} else {
|
|
1085
|
+
throw new Error("MCP client is null");
|
|
949
1086
|
}
|
|
950
|
-
} catch (err) {
|
|
951
|
-
dbg("onFinish enqueue error", err);
|
|
952
|
-
}
|
|
953
|
-
}
|
|
954
|
-
});
|
|
955
|
-
encoder = new TextEncoder2();
|
|
956
|
-
const readableStream = new ReadableStream({
|
|
957
|
-
async start(controller) {
|
|
958
|
-
streamController = controller;
|
|
959
|
-
try {
|
|
960
|
-
let hasContent = false;
|
|
961
|
-
let chunkCount = 0;
|
|
962
|
-
for await (const chunk of stream.textStream) {
|
|
963
|
-
if (chunk && chunk.trim()) {
|
|
964
|
-
hasContent = true;
|
|
965
|
-
chunkCount++;
|
|
966
|
-
controller.enqueue(
|
|
967
|
-
encoder.encode(
|
|
968
|
-
`data: ${JSON.stringify({ type: "text", content: chunk })}
|
|
969
|
-
|
|
970
|
-
`
|
|
971
|
-
)
|
|
972
|
-
);
|
|
973
|
-
}
|
|
974
|
-
}
|
|
975
|
-
dbg("Streaming finished", { hasContent, chunkCount });
|
|
976
|
-
if (!hasContent && !streamedAnyText) {
|
|
977
|
-
dbg("No content from textStream/callbacks; falling back to generate()");
|
|
978
|
-
try {
|
|
979
|
-
const gen = await agent.generate(formattedMessages, {
|
|
980
|
-
maxSteps: 10,
|
|
981
|
-
toolsets
|
|
982
|
-
});
|
|
983
|
-
const finalText = gen.text || "";
|
|
984
|
-
if (finalText) {
|
|
985
|
-
controller.enqueue(
|
|
986
|
-
encoder.encode(
|
|
987
|
-
`data: ${JSON.stringify({ type: "text", content: finalText })}
|
|
988
|
-
|
|
989
|
-
`
|
|
990
|
-
)
|
|
991
|
-
);
|
|
992
|
-
} else {
|
|
993
|
-
dbg("generate() also returned empty text");
|
|
994
|
-
controller.enqueue(
|
|
995
|
-
encoder.encode(
|
|
996
|
-
`data: ${JSON.stringify({ type: "text", content: "I apologize, but I couldn't generate a response. Please try again." })}
|
|
997
|
-
|
|
998
|
-
`
|
|
999
|
-
)
|
|
1000
|
-
);
|
|
1001
|
-
}
|
|
1002
|
-
} catch (fallbackErr) {
|
|
1003
|
-
console.error("[mcp/chat] Fallback generate() error:", fallbackErr);
|
|
1004
|
-
controller.enqueue(
|
|
1005
|
-
encoder.encode(
|
|
1006
|
-
`data: ${JSON.stringify({ type: "error", error: fallbackErr instanceof Error ? fallbackErr.message : String(fallbackErr) })}
|
|
1007
|
-
|
|
1008
|
-
`
|
|
1009
|
-
)
|
|
1010
|
-
);
|
|
1011
|
-
}
|
|
1012
|
-
}
|
|
1013
|
-
controller.enqueue(
|
|
1014
|
-
encoder.encode(
|
|
1015
|
-
`data: ${JSON.stringify({
|
|
1016
|
-
type: "elicitation_complete"
|
|
1017
|
-
})}
|
|
1018
|
-
|
|
1019
|
-
`
|
|
1020
|
-
)
|
|
1021
|
-
);
|
|
1022
|
-
controller.enqueue(encoder.encode(`data: [DONE]
|
|
1023
|
-
|
|
1024
|
-
`));
|
|
1025
1087
|
} catch (error) {
|
|
1026
|
-
console.error("[mcp/chat] Streaming error:", error);
|
|
1027
1088
|
controller.enqueue(
|
|
1028
1089
|
encoder.encode(
|
|
1029
1090
|
`data: ${JSON.stringify({
|
|
@@ -1035,16 +1096,7 @@ chat.post("/", async (c) => {
|
|
|
1035
1096
|
)
|
|
1036
1097
|
);
|
|
1037
1098
|
} finally {
|
|
1038
|
-
|
|
1039
|
-
try {
|
|
1040
|
-
await client.disconnect();
|
|
1041
|
-
} catch (cleanupError) {
|
|
1042
|
-
console.warn(
|
|
1043
|
-
"[mcp/chat] Error cleaning up MCP client after streaming:",
|
|
1044
|
-
cleanupError
|
|
1045
|
-
);
|
|
1046
|
-
}
|
|
1047
|
-
}
|
|
1099
|
+
await safeDisconnect(client);
|
|
1048
1100
|
controller.close();
|
|
1049
1101
|
}
|
|
1050
1102
|
}
|
|
@@ -1058,13 +1110,7 @@ chat.post("/", async (c) => {
|
|
|
1058
1110
|
});
|
|
1059
1111
|
} catch (error) {
|
|
1060
1112
|
console.error("[mcp/chat] Error in chat API:", error);
|
|
1061
|
-
|
|
1062
|
-
try {
|
|
1063
|
-
await client.disconnect();
|
|
1064
|
-
} catch (cleanupError) {
|
|
1065
|
-
console.warn("Error cleaning up MCP client after error:", cleanupError);
|
|
1066
|
-
}
|
|
1067
|
-
}
|
|
1113
|
+
await safeDisconnect(client);
|
|
1068
1114
|
return c.json(
|
|
1069
1115
|
{
|
|
1070
1116
|
success: false,
|
|
@@ -1074,32 +1120,6 @@ chat.post("/", async (c) => {
|
|
|
1074
1120
|
);
|
|
1075
1121
|
}
|
|
1076
1122
|
});
|
|
1077
|
-
var getLlmModel = (modelDefinition, apiKey, ollamaBaseUrl) => {
|
|
1078
|
-
if (!modelDefinition || !modelDefinition.id || !modelDefinition.provider) {
|
|
1079
|
-
throw new Error(
|
|
1080
|
-
`Invalid model definition: ${JSON.stringify(modelDefinition)}`
|
|
1081
|
-
);
|
|
1082
|
-
}
|
|
1083
|
-
switch (modelDefinition.provider) {
|
|
1084
|
-
case "anthropic":
|
|
1085
|
-
return createAnthropic({ apiKey })(modelDefinition.id);
|
|
1086
|
-
case "openai":
|
|
1087
|
-
return createOpenAI({ apiKey })(modelDefinition.id);
|
|
1088
|
-
case "ollama":
|
|
1089
|
-
const baseUrl = ollamaBaseUrl || "http://localhost:11434";
|
|
1090
|
-
return createOllama({
|
|
1091
|
-
// The provider expects the root Ollama URL; it internally targets the /api endpoints
|
|
1092
|
-
baseURL: `${baseUrl}`
|
|
1093
|
-
})(modelDefinition.id, {
|
|
1094
|
-
simulateStreaming: true
|
|
1095
|
-
// Enable streaming for Ollama models
|
|
1096
|
-
});
|
|
1097
|
-
default:
|
|
1098
|
-
throw new Error(
|
|
1099
|
-
`Unsupported provider: ${modelDefinition.provider} for model: ${modelDefinition.id}`
|
|
1100
|
-
);
|
|
1101
|
-
}
|
|
1102
|
-
};
|
|
1103
1123
|
var chat_default = chat;
|
|
1104
1124
|
|
|
1105
1125
|
// routes/mcp/oauth.ts
|