@mcpjam/inspector 0.9.2 → 0.9.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (36) hide show
  1. package/LICENSE +13 -0
  2. package/README.md +25 -6
  3. package/bin/start.js +6 -3
  4. package/dist/client/assets/index-C-vxFpw4.css +1 -0
  5. package/dist/client/assets/index-CTiiyjex.js +1723 -0
  6. package/dist/client/assets/index-CTiiyjex.js.map +1 -0
  7. package/dist/client/deepseek_logo.svg +1 -0
  8. package/dist/client/index.html +2 -2
  9. package/dist/server/index.js +381 -211
  10. package/dist/server/index.js.map +1 -1
  11. package/package.json +15 -10
  12. package/dist/client/assets/index-BxtnlkQW.css +0 -1
  13. package/dist/client/assets/index-C2tpp_KC.js +0 -357
  14. package/dist/client/assets/index-C2tpp_KC.js.map +0 -1
  15. package/dist/main/main.cjs +0 -1315
  16. package/dist/preload/preload.js +0 -26
  17. package/dist/renderer/assets/index-CYiU4_x2.css +0 -1
  18. package/dist/renderer/assets/index-woGCpEdp.js +0 -356
  19. package/dist/renderer/catalyst.png +0 -0
  20. package/dist/renderer/claude_logo.png +0 -0
  21. package/dist/renderer/demo_1.png +0 -0
  22. package/dist/renderer/demo_2.png +0 -0
  23. package/dist/renderer/demo_3.png +0 -0
  24. package/dist/renderer/file.svg +0 -1
  25. package/dist/renderer/globe.svg +0 -1
  26. package/dist/renderer/index.html +0 -14
  27. package/dist/renderer/mcp.svg +0 -1
  28. package/dist/renderer/mcp_jam.svg +0 -12
  29. package/dist/renderer/mcp_jam_dark.png +0 -0
  30. package/dist/renderer/mcp_jam_light.png +0 -0
  31. package/dist/renderer/next.svg +0 -1
  32. package/dist/renderer/ollama_dark.png +0 -0
  33. package/dist/renderer/ollama_logo.svg +0 -7
  34. package/dist/renderer/openai_logo.png +0 -0
  35. package/dist/renderer/vercel.svg +0 -1
  36. package/dist/renderer/window.svg +0 -1
@@ -0,0 +1 @@
1
+ <svg height="1em" style="flex:none;line-height:1" viewBox="0 0 24 24" width="1em" xmlns="http://www.w3.org/2000/svg"><title>DeepSeek</title><path d="M23.748 4.482c-.254-.124-.364.113-.512.234-.051.039-.094.09-.137.136-.372.397-.806.657-1.373.626-.829-.046-1.537.214-2.163.848-.133-.782-.575-1.248-1.247-1.548-.352-.156-.708-.311-.955-.65-.172-.241-.219-.51-.305-.774-.055-.16-.11-.323-.293-.35-.2-.031-.278.136-.356.276-.313.572-.434 1.202-.422 1.84.027 1.436.633 2.58 1.838 3.393.137.093.172.187.129.323-.082.28-.18.552-.266.833-.055.179-.137.217-.329.14a5.526 5.526 0 01-1.736-1.18c-.857-.828-1.631-1.742-2.597-2.458a11.365 11.365 0 00-.689-.471c-.985-.957.13-1.743.388-1.836.27-.098.093-.432-.779-.428-.872.004-1.67.295-2.687.684a3.055 3.055 0 01-.465.137 9.597 9.597 0 00-2.883-.102c-1.885.21-3.39 1.102-4.497 2.623C.082 8.606-.231 10.684.152 12.85c.403 2.284 1.569 4.175 3.36 5.653 1.858 1.533 3.997 2.284 6.438 2.14 1.482-.085 3.133-.284 4.994-1.86.47.234.962.327 1.78.397.63.059 1.236-.03 1.705-.128.735-.156.684-.837.419-.961-2.155-1.004-1.682-.595-2.113-.926 1.096-1.296 2.746-2.642 3.392-7.003.05-.347.007-.565 0-.845-.004-.17.035-.237.23-.256a4.173 4.173 0 001.545-.475c1.396-.763 1.96-2.015 2.093-3.517.02-.23-.004-.467-.247-.588zM11.581 18c-2.089-1.642-3.102-2.183-3.52-2.16-.392.024-.321.471-.235.763.09.288.207.486.371.739.114.167.192.416-.113.603-.673.416-1.842-.14-1.897-.167-1.361-.802-2.5-1.86-3.301-3.307-.774-1.393-1.224-2.887-1.298-4.482-.02-.386.093-.522.477-.592a4.696 4.696 0 011.529-.039c2.132.312 3.946 1.265 5.468 2.774.868.86 1.525 1.887 2.202 2.891.72 1.066 1.494 2.082 2.48 2.914.348.292.625.514.891.677-.802.09-2.14.11-3.054-.614zm1-6.44a.306.306 0 01.415-.287.302.302 0 01.2.288.306.306 0 01-.31.307.303.303 0 01-.304-.308zm3.11 1.596c-.2.081-.399.151-.59.16a1.245 1.245 0 01-.798-.254c-.274-.23-.47-.358-.552-.758a1.73 1.73 0 01.016-.588c.07-.327-.008-.537-.239-.727-.187-.156-.426-.199-.688-.199a.559.559 0 01-.254-.078c-.11-.054-.2-.19-.114-.358.028-.054.16-.186.192-.21.356-.202.767-.136 1.146.016.352.144.618.408 1.001.782.391.451.462.576.685.914.176.265.336.537.445.848.067.195-.019.354-.25.452z" fill="#4D6BFE"></path></svg>
@@ -5,8 +5,8 @@
5
5
  <link rel="icon" type="image/svg+xml" href="/mcp_jam.svg" />
6
6
  <meta name="viewport" content="width=device-width, initial-scale=1.0" />
7
7
  <title>MCPJam Inspector</title>
8
- <script type="module" crossorigin src="/assets/index-C2tpp_KC.js"></script>
9
- <link rel="stylesheet" crossorigin href="/assets/index-BxtnlkQW.css">
8
+ <script type="module" crossorigin src="/assets/index-CTiiyjex.js"></script>
9
+ <link rel="stylesheet" crossorigin href="/assets/index-C-vxFpw4.css">
10
10
  </head>
11
11
  <body>
12
12
  <div id="root"></div>
@@ -29,7 +29,10 @@ function validateServerConfig(serverConfig) {
29
29
  if (config.url) {
30
30
  try {
31
31
  if (typeof config.url === "string") {
32
- config.url = new URL(config.url);
32
+ const parsed = new URL(config.url);
33
+ parsed.search = "";
34
+ parsed.hash = "";
35
+ config.url = parsed;
33
36
  } else if (typeof config.url === "object" && !config.url.href) {
34
37
  return {
35
38
  success: false,
@@ -231,6 +234,7 @@ var connect_default = connect;
231
234
  // routes/mcp/tools.ts
232
235
  import { Hono as Hono2 } from "hono";
233
236
  import { zodToJsonSchema } from "zod-to-json-schema";
237
+ import { TextEncoder } from "util";
234
238
  var tools = new Hono2();
235
239
  var pendingElicitations = /* @__PURE__ */ new Map();
236
240
  tools.post("/", async (c) => {
@@ -643,9 +647,313 @@ import { Agent } from "@mastra/core/agent";
643
647
  import { createAnthropic } from "@ai-sdk/anthropic";
644
648
  import { createOpenAI } from "@ai-sdk/openai";
645
649
  import { createOllama } from "ollama-ai-provider";
646
- import { MCPClient as MCPClient2 } from "@mastra/mcp";
647
- var chat = new Hono5();
650
+ import { TextEncoder as TextEncoder2 } from "util";
651
+
652
+ // ../client/src/lib/chat-utils.ts
653
+ function getDefaultTemperatureByProvider(provider) {
654
+ switch (provider) {
655
+ case "openai":
656
+ return 1;
657
+ case "anthropic":
658
+ return 0;
659
+ default:
660
+ return 0;
661
+ }
662
+ }
663
+
664
+ // routes/mcp/chat.ts
665
+ var DEBUG_ENABLED = process.env.MCP_DEBUG !== "false";
666
+ var ELICITATION_TIMEOUT = 3e5;
667
+ var MAX_AGENT_STEPS = 10;
668
+ var dbg = (...args) => {
669
+ if (DEBUG_ENABLED) console.log("[mcp/chat]", ...args);
670
+ };
671
+ try {
672
+ process.setMaxListeners?.(50);
673
+ } catch {
674
+ }
648
675
  var pendingElicitations2 = /* @__PURE__ */ new Map();
676
+ var chat = new Hono5();
677
+ var createLlmModel = (modelDefinition, apiKey, ollamaBaseUrl) => {
678
+ if (!modelDefinition?.id || !modelDefinition?.provider) {
679
+ throw new Error(
680
+ `Invalid model definition: ${JSON.stringify(modelDefinition)}`
681
+ );
682
+ }
683
+ switch (modelDefinition.provider) {
684
+ case "anthropic":
685
+ return createAnthropic({ apiKey })(modelDefinition.id);
686
+ case "openai":
687
+ return createOpenAI({ apiKey })(modelDefinition.id);
688
+ case "deepseek":
689
+ return createOpenAI({ apiKey, baseURL: "https://api.deepseek.com/v1" })(
690
+ modelDefinition.id
691
+ );
692
+ case "ollama":
693
+ const baseUrl = ollamaBaseUrl || "http://localhost:11434";
694
+ return createOllama({
695
+ baseURL: `${baseUrl}`
696
+ })(modelDefinition.id, {
697
+ simulateStreaming: true
698
+ });
699
+ default:
700
+ throw new Error(
701
+ `Unsupported provider: ${modelDefinition.provider} for model: ${modelDefinition.id}`
702
+ );
703
+ }
704
+ };
705
+ var createElicitationHandler = (streamingContext) => {
706
+ return async (elicitationRequest) => {
707
+ const requestId = `elicit_${Date.now()}_${Math.random().toString(36).substr(2, 9)}`;
708
+ if (streamingContext.controller && streamingContext.encoder) {
709
+ streamingContext.controller.enqueue(
710
+ streamingContext.encoder.encode(
711
+ `data: ${JSON.stringify({
712
+ type: "elicitation_request",
713
+ requestId,
714
+ message: elicitationRequest.message,
715
+ schema: elicitationRequest.requestedSchema,
716
+ timestamp: /* @__PURE__ */ new Date()
717
+ })}
718
+
719
+ `
720
+ )
721
+ );
722
+ }
723
+ return new Promise((resolve, reject) => {
724
+ pendingElicitations2.set(requestId, { resolve, reject });
725
+ setTimeout(() => {
726
+ if (pendingElicitations2.has(requestId)) {
727
+ pendingElicitations2.delete(requestId);
728
+ reject(new Error("Elicitation timeout"));
729
+ }
730
+ }, ELICITATION_TIMEOUT);
731
+ });
732
+ };
733
+ };
734
+ var wrapToolsWithStreaming = (tools2, streamingContext) => {
735
+ const wrappedTools = {};
736
+ for (const [name, tool] of Object.entries(tools2)) {
737
+ wrappedTools[name] = {
738
+ ...tool,
739
+ execute: async (params) => {
740
+ const currentToolCallId = ++streamingContext.toolCallId;
741
+ const startedAt = Date.now();
742
+ if (streamingContext.controller && streamingContext.encoder) {
743
+ streamingContext.controller.enqueue(
744
+ streamingContext.encoder.encode(
745
+ `data: ${JSON.stringify({
746
+ type: "tool_call",
747
+ toolCall: {
748
+ id: currentToolCallId,
749
+ name,
750
+ parameters: params,
751
+ timestamp: /* @__PURE__ */ new Date(),
752
+ status: "executing"
753
+ }
754
+ })}
755
+
756
+ `
757
+ )
758
+ );
759
+ }
760
+ dbg("Tool executing", { name, currentToolCallId, params });
761
+ try {
762
+ const result = await tool.execute(params);
763
+ dbg("Tool result", {
764
+ name,
765
+ currentToolCallId,
766
+ ms: Date.now() - startedAt
767
+ });
768
+ if (streamingContext.controller && streamingContext.encoder) {
769
+ streamingContext.controller.enqueue(
770
+ streamingContext.encoder.encode(
771
+ `data: ${JSON.stringify({
772
+ type: "tool_result",
773
+ toolResult: {
774
+ id: currentToolCallId,
775
+ toolCallId: currentToolCallId,
776
+ result,
777
+ timestamp: /* @__PURE__ */ new Date()
778
+ }
779
+ })}
780
+
781
+ `
782
+ )
783
+ );
784
+ }
785
+ return result;
786
+ } catch (error) {
787
+ dbg("Tool error", {
788
+ name,
789
+ currentToolCallId,
790
+ error: error instanceof Error ? error.message : String(error)
791
+ });
792
+ if (streamingContext.controller && streamingContext.encoder) {
793
+ streamingContext.controller.enqueue(
794
+ streamingContext.encoder.encode(
795
+ `data: ${JSON.stringify({
796
+ type: "tool_result",
797
+ toolResult: {
798
+ id: currentToolCallId,
799
+ toolCallId: currentToolCallId,
800
+ error: error instanceof Error ? error.message : String(error),
801
+ timestamp: /* @__PURE__ */ new Date()
802
+ }
803
+ })}
804
+
805
+ `
806
+ )
807
+ );
808
+ }
809
+ throw error;
810
+ }
811
+ }
812
+ };
813
+ }
814
+ return wrappedTools;
815
+ };
816
+ var handleAgentStepFinish = (streamingContext, text, toolCalls, toolResults) => {
817
+ try {
818
+ if (toolCalls && Array.isArray(toolCalls)) {
819
+ for (const call of toolCalls) {
820
+ const currentToolCallId = ++streamingContext.toolCallId;
821
+ streamingContext.lastEmittedToolCallId = currentToolCallId;
822
+ if (streamingContext.controller && streamingContext.encoder) {
823
+ streamingContext.controller.enqueue(
824
+ streamingContext.encoder.encode(
825
+ `data: ${JSON.stringify({
826
+ type: "tool_call",
827
+ toolCall: {
828
+ id: currentToolCallId,
829
+ name: call.name || call.toolName,
830
+ parameters: call.params || call.args || {},
831
+ timestamp: /* @__PURE__ */ new Date(),
832
+ status: "executing"
833
+ }
834
+ })}
835
+
836
+ `
837
+ )
838
+ );
839
+ }
840
+ }
841
+ }
842
+ if (toolResults && Array.isArray(toolResults)) {
843
+ for (const result of toolResults) {
844
+ const currentToolCallId = streamingContext.lastEmittedToolCallId != null ? streamingContext.lastEmittedToolCallId : ++streamingContext.toolCallId;
845
+ if (streamingContext.controller && streamingContext.encoder) {
846
+ streamingContext.controller.enqueue(
847
+ streamingContext.encoder.encode(
848
+ `data: ${JSON.stringify({
849
+ type: "tool_result",
850
+ toolResult: {
851
+ id: currentToolCallId,
852
+ toolCallId: currentToolCallId,
853
+ result: result.result,
854
+ error: result.error,
855
+ timestamp: /* @__PURE__ */ new Date()
856
+ }
857
+ })}
858
+
859
+ `
860
+ )
861
+ );
862
+ }
863
+ }
864
+ }
865
+ } catch (err) {
866
+ dbg("onStepFinish error", err);
867
+ }
868
+ };
869
+ var streamAgentResponse = async (streamingContext, stream) => {
870
+ let hasContent = false;
871
+ let chunkCount = 0;
872
+ for await (const chunk of stream.textStream) {
873
+ if (chunk && chunk.trim()) {
874
+ hasContent = true;
875
+ chunkCount++;
876
+ streamingContext.controller.enqueue(
877
+ streamingContext.encoder.encode(
878
+ `data: ${JSON.stringify({ type: "text", content: chunk })}
879
+
880
+ `
881
+ )
882
+ );
883
+ }
884
+ }
885
+ dbg("Streaming finished", { hasContent, chunkCount });
886
+ return { hasContent, chunkCount };
887
+ };
888
+ var fallbackToCompletion = async (agent, messages, streamingContext, provider) => {
889
+ try {
890
+ const result = await agent.generate(messages, {
891
+ temperature: getDefaultTemperatureByProvider(provider)
892
+ });
893
+ if (result.text && result.text.trim()) {
894
+ streamingContext.controller.enqueue(
895
+ streamingContext.encoder.encode(
896
+ `data: ${JSON.stringify({
897
+ type: "text",
898
+ content: result.text
899
+ })}
900
+
901
+ `
902
+ )
903
+ );
904
+ }
905
+ } catch (fallbackErr) {
906
+ streamingContext.controller.enqueue(
907
+ streamingContext.encoder.encode(
908
+ `data: ${JSON.stringify({
909
+ type: "text",
910
+ content: "Failed to generate response. Please try again. ",
911
+ error: fallbackErr instanceof Error ? fallbackErr.message : "Unknown error"
912
+ })}
913
+
914
+ `
915
+ )
916
+ );
917
+ }
918
+ };
919
+ var safeDisconnect = async (client) => {
920
+ if (client) {
921
+ try {
922
+ await client.disconnect();
923
+ } catch (cleanupError) {
924
+ console.warn("[mcp/chat] Error cleaning up MCP client:", cleanupError);
925
+ }
926
+ }
927
+ };
928
+ var createStreamingResponse = async (agent, messages, toolsets, streamingContext, provider) => {
929
+ const stream = await agent.stream(messages, {
930
+ maxSteps: MAX_AGENT_STEPS,
931
+ temperature: getDefaultTemperatureByProvider(provider),
932
+ toolsets,
933
+ onStepFinish: ({ text, toolCalls, toolResults }) => {
934
+ handleAgentStepFinish(streamingContext, text, toolCalls, toolResults);
935
+ }
936
+ });
937
+ const { hasContent } = await streamAgentResponse(streamingContext, stream);
938
+ if (!hasContent) {
939
+ dbg("No content from textStream; falling back to completion");
940
+ await fallbackToCompletion(agent, messages, streamingContext, provider);
941
+ }
942
+ streamingContext.controller.enqueue(
943
+ streamingContext.encoder.encode(
944
+ `data: ${JSON.stringify({
945
+ type: "elicitation_complete"
946
+ })}
947
+
948
+ `
949
+ )
950
+ );
951
+ streamingContext.controller.enqueue(
952
+ streamingContext.encoder.encode(`data: [DONE]
953
+
954
+ `)
955
+ );
956
+ };
649
957
  chat.post("/", async (c) => {
650
958
  let client = null;
651
959
  try {
@@ -653,6 +961,7 @@ chat.post("/", async (c) => {
653
961
  const {
654
962
  serverConfigs,
655
963
  model,
964
+ provider,
656
965
  apiKey,
657
966
  systemPrompt,
658
967
  messages,
@@ -685,7 +994,7 @@ chat.post("/", async (c) => {
685
994
  pendingElicitations2.delete(requestId);
686
995
  return c.json({ success: true });
687
996
  }
688
- if (!model || !model.id || !apiKey || !messages) {
997
+ if (!model?.id || !apiKey || !messages) {
689
998
  return c.json(
690
999
  {
691
1000
  success: false,
@@ -694,186 +1003,88 @@ chat.post("/", async (c) => {
694
1003
  400
695
1004
  );
696
1005
  }
697
- if (serverConfigs && Object.keys(serverConfigs).length > 0) {
698
- const validation = validateMultipleServerConfigs(serverConfigs);
699
- if (!validation.success) {
700
- return c.json(
701
- {
702
- success: false,
703
- error: validation.error.message,
704
- details: validation.errors
705
- },
706
- validation.error.status
707
- );
708
- }
709
- client = createMCPClientWithMultipleConnections(validation.validConfigs);
710
- } else {
711
- client = new MCPClient2({
712
- id: `chat-${Date.now()}`,
713
- servers: {}
714
- });
715
- }
716
- const tools2 = await client.getTools();
717
- const llmModel = getLlmModel(model, apiKey, ollamaBaseUrl);
718
- let toolCallId = 0;
719
- let streamController = null;
720
- let encoder = null;
721
- const elicitationHandler = async (elicitationRequest) => {
722
- const requestId2 = `elicit_${Date.now()}_${Math.random().toString(36).substring(2, 11)}`;
723
- if (streamController && encoder) {
724
- streamController.enqueue(
725
- encoder.encode(
726
- `data: ${JSON.stringify({
727
- type: "elicitation_request",
728
- requestId: requestId2,
729
- message: elicitationRequest.message,
730
- schema: elicitationRequest.requestedSchema,
731
- timestamp: /* @__PURE__ */ new Date()
732
- })}
733
-
734
- `
735
- )
736
- );
737
- }
738
- return new Promise((resolve, reject) => {
739
- pendingElicitations2.set(requestId2, { resolve, reject });
740
- setTimeout(() => {
741
- if (pendingElicitations2.has(requestId2)) {
742
- pendingElicitations2.delete(requestId2);
743
- reject(new Error("Elicitation timeout"));
744
- }
745
- }, 3e5);
746
- });
747
- };
748
- if (client.elicitation && client.elicitation.onRequest && serverConfigs) {
749
- for (const serverName of Object.keys(serverConfigs)) {
750
- const normalizedName = serverName.toLowerCase().replace(/[\s\-]+/g, "_").replace(/[^a-z0-9_]/g, "");
751
- client.elicitation.onRequest(normalizedName, elicitationHandler);
752
- }
1006
+ if (!serverConfigs || Object.keys(serverConfigs).length === 0) {
1007
+ return c.json(
1008
+ {
1009
+ success: false,
1010
+ error: "No server configs provided"
1011
+ },
1012
+ 400
1013
+ );
753
1014
  }
754
- const originalTools = tools2 && Object.keys(tools2).length > 0 ? tools2 : {};
755
- const wrappedTools = {};
756
- for (const [name, tool] of Object.entries(originalTools)) {
757
- wrappedTools[name] = {
758
- ...tool,
759
- execute: async (params) => {
760
- const currentToolCallId = ++toolCallId;
761
- if (streamController && encoder) {
762
- streamController.enqueue(
763
- encoder.encode(
764
- `data: ${JSON.stringify({
765
- type: "tool_call",
766
- toolCall: {
767
- id: currentToolCallId,
768
- name,
769
- parameters: params,
770
- timestamp: /* @__PURE__ */ new Date(),
771
- status: "executing"
772
- }
773
- })}
774
-
775
- `
776
- )
777
- );
778
- }
779
- try {
780
- const result = await tool.execute(params);
781
- if (streamController && encoder) {
782
- streamController.enqueue(
783
- encoder.encode(
784
- `data: ${JSON.stringify({
785
- type: "tool_result",
786
- toolResult: {
787
- id: currentToolCallId,
788
- toolCallId: currentToolCallId,
789
- result,
790
- timestamp: /* @__PURE__ */ new Date()
791
- }
792
- })}
793
-
794
- `
795
- )
796
- );
797
- }
798
- return result;
799
- } catch (error) {
800
- if (streamController && encoder) {
801
- streamController.enqueue(
802
- encoder.encode(
803
- `data: ${JSON.stringify({
804
- type: "tool_result",
805
- toolResult: {
806
- id: currentToolCallId,
807
- toolCallId: currentToolCallId,
808
- error: error instanceof Error ? error.message : String(error),
809
- timestamp: /* @__PURE__ */ new Date()
810
- }
811
- })}
812
-
813
- `
814
- )
815
- );
816
- }
817
- throw error;
818
- }
819
- }
820
- };
1015
+ const validation = validateMultipleServerConfigs(serverConfigs);
1016
+ if (!validation.success) {
1017
+ dbg(
1018
+ "Server config validation failed",
1019
+ validation.errors || validation.error
1020
+ );
1021
+ return c.json(
1022
+ {
1023
+ success: false,
1024
+ error: validation.error.message,
1025
+ details: validation.errors
1026
+ },
1027
+ validation.error.status
1028
+ );
821
1029
  }
1030
+ client = createMCPClientWithMultipleConnections(validation.validConfigs);
1031
+ const llmModel = createLlmModel(model, apiKey, ollamaBaseUrl);
1032
+ const tools2 = await client.getTools();
822
1033
  const agent = new Agent({
823
1034
  name: "MCP Chat Agent",
824
1035
  instructions: systemPrompt || "You are a helpful assistant with access to MCP tools.",
825
1036
  model: llmModel,
826
- tools: Object.keys(wrappedTools).length > 0 ? wrappedTools : void 0
1037
+ tools: void 0
1038
+ // Start without tools, add them in streaming context
827
1039
  });
828
1040
  const formattedMessages = messages.map((msg) => ({
829
1041
  role: msg.role,
830
1042
  content: msg.content
831
1043
  }));
832
- const stream = await agent.stream(formattedMessages, {
833
- maxSteps: 10
834
- // Allow up to 10 steps for tool usage
1044
+ const toolsets = await client.getToolsets();
1045
+ dbg("Streaming start", {
1046
+ toolsetServers: Object.keys(toolsets),
1047
+ messageCount: formattedMessages.length
835
1048
  });
836
- encoder = new TextEncoder();
1049
+ const encoder = new TextEncoder2();
837
1050
  const readableStream = new ReadableStream({
838
1051
  async start(controller) {
839
- streamController = controller;
840
- try {
841
- let hasContent = false;
842
- for await (const chunk of stream.textStream) {
843
- if (chunk && chunk.trim()) {
844
- hasContent = true;
845
- controller.enqueue(
846
- encoder.encode(
847
- `data: ${JSON.stringify({ type: "text", content: chunk })}
848
-
849
- `
850
- )
851
- );
852
- }
1052
+ const streamingContext = {
1053
+ controller,
1054
+ encoder,
1055
+ toolCallId: 0,
1056
+ lastEmittedToolCallId: null
1057
+ };
1058
+ const streamingWrappedTools = wrapToolsWithStreaming(
1059
+ tools2,
1060
+ streamingContext
1061
+ );
1062
+ const streamingAgent = new Agent({
1063
+ name: agent.name,
1064
+ instructions: agent.instructions,
1065
+ model: agent.model,
1066
+ tools: Object.keys(streamingWrappedTools).length > 0 ? streamingWrappedTools : void 0
1067
+ });
1068
+ if (client?.elicitation?.onRequest) {
1069
+ for (const serverName of Object.keys(serverConfigs)) {
1070
+ const normalizedName = normalizeServerConfigName(serverName);
1071
+ const elicitationHandler = createElicitationHandler(streamingContext);
1072
+ client.elicitation.onRequest(normalizedName, elicitationHandler);
853
1073
  }
854
- if (!hasContent) {
855
- controller.enqueue(
856
- encoder.encode(
857
- `data: ${JSON.stringify({ type: "text", content: "I apologize, but I couldn't generate a response. Please try again." })}
858
-
859
- `
860
- )
1074
+ }
1075
+ try {
1076
+ if (client) {
1077
+ await createStreamingResponse(
1078
+ streamingAgent,
1079
+ formattedMessages,
1080
+ toolsets,
1081
+ streamingContext,
1082
+ provider
861
1083
  );
1084
+ } else {
1085
+ throw new Error("MCP client is null");
862
1086
  }
863
- controller.enqueue(
864
- encoder.encode(
865
- `data: ${JSON.stringify({
866
- type: "elicitation_complete"
867
- })}
868
-
869
- `
870
- )
871
- );
872
- controller.enqueue(encoder.encode(`data: [DONE]
873
-
874
- `));
875
1087
  } catch (error) {
876
- console.error("Streaming error:", error);
877
1088
  controller.enqueue(
878
1089
  encoder.encode(
879
1090
  `data: ${JSON.stringify({
@@ -885,16 +1096,7 @@ chat.post("/", async (c) => {
885
1096
  )
886
1097
  );
887
1098
  } finally {
888
- if (client) {
889
- try {
890
- await client.disconnect();
891
- } catch (cleanupError) {
892
- console.warn(
893
- "Error cleaning up MCP client after streaming:",
894
- cleanupError
895
- );
896
- }
897
- }
1099
+ await safeDisconnect(client);
898
1100
  controller.close();
899
1101
  }
900
1102
  }
@@ -907,14 +1109,8 @@ chat.post("/", async (c) => {
907
1109
  }
908
1110
  });
909
1111
  } catch (error) {
910
- console.error("Error in chat API:", error);
911
- if (client) {
912
- try {
913
- await client.disconnect();
914
- } catch (cleanupError) {
915
- console.warn("Error cleaning up MCP client after error:", cleanupError);
916
- }
917
- }
1112
+ console.error("[mcp/chat] Error in chat API:", error);
1113
+ await safeDisconnect(client);
918
1114
  return c.json(
919
1115
  {
920
1116
  success: false,
@@ -924,32 +1120,6 @@ chat.post("/", async (c) => {
924
1120
  );
925
1121
  }
926
1122
  });
927
- var getLlmModel = (modelDefinition, apiKey, ollamaBaseUrl) => {
928
- if (!modelDefinition || !modelDefinition.id || !modelDefinition.provider) {
929
- throw new Error(
930
- `Invalid model definition: ${JSON.stringify(modelDefinition)}`
931
- );
932
- }
933
- switch (modelDefinition.provider) {
934
- case "anthropic":
935
- return createAnthropic({ apiKey })(modelDefinition.id);
936
- case "openai":
937
- return createOpenAI({ apiKey })(modelDefinition.id);
938
- case "ollama":
939
- const baseUrl = ollamaBaseUrl || "http://localhost:11434";
940
- return createOllama({
941
- baseURL: `${baseUrl}/api`
942
- // Configurable Ollama API endpoint
943
- })(modelDefinition.id, {
944
- simulateStreaming: true
945
- // Enable streaming for Ollama models
946
- });
947
- default:
948
- throw new Error(
949
- `Unsupported provider: ${modelDefinition.provider} for model: ${modelDefinition.id}`
950
- );
951
- }
952
- };
953
1123
  var chat_default = chat;
954
1124
 
955
1125
  // routes/mcp/oauth.ts
@@ -1093,7 +1263,7 @@ if (process.env.NODE_ENV === "production") {
1093
1263
  } else {
1094
1264
  app.get("/", (c) => {
1095
1265
  return c.json({
1096
- message: "MCP Inspector API Server",
1266
+ message: "MCPJam API Server",
1097
1267
  environment: "development",
1098
1268
  frontend: `http://localhost:${serverPort}`
1099
1269
  });