@jaypie/mcp 0.2.1 → 0.2.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js CHANGED
@@ -1,13 +1,15 @@
1
1
  #!/usr/bin/env node
2
- import { realpathSync } from 'node:fs';
2
+ import { realpathSync, readFileSync } from 'node:fs';
3
+ import * as path from 'node:path';
4
+ import { join } from 'node:path';
3
5
  import { fileURLToPath, pathToFileURL } from 'node:url';
4
6
  import { StdioServerTransport } from '@modelcontextprotocol/sdk/server/stdio.js';
5
7
  import { McpServer } from '@modelcontextprotocol/sdk/server/mcp.js';
6
8
  import { z } from 'zod';
7
9
  import * as fs from 'node:fs/promises';
8
- import * as path from 'node:path';
9
10
  import matter from 'gray-matter';
10
11
  import * as https from 'node:https';
12
+ import { Llm } from '@jaypie/llm';
11
13
  import { StreamableHTTPServerTransport } from '@modelcontextprotocol/sdk/server/streamableHttp.js';
12
14
  import { randomUUID } from 'node:crypto';
13
15
 
@@ -797,7 +799,93 @@ async function searchDatadogRum(credentials, options = {}, logger = nullLogger)
797
799
  });
798
800
  }
799
801
 
800
- const BUILD_VERSION_STRING = "@jaypie/mcp@0.2.1#22724b60"
802
+ /**
803
+ * LLM debugging utilities for inspecting raw provider responses
804
+ */
805
+ // Default models for each provider
806
+ const DEFAULT_MODELS = {
807
+ anthropic: "claude-sonnet-4-20250514",
808
+ gemini: "gemini-2.0-flash",
809
+ openai: "gpt-4o-mini",
810
+ openrouter: "openai/gpt-4o-mini",
811
+ };
812
+ /**
813
+ * Make a debug LLM call and return the raw response data for inspection
814
+ */
815
+ async function debugLlmCall(params, log) {
816
+ const { provider, message } = params;
817
+ const model = params.model || DEFAULT_MODELS[provider];
818
+ log.info(`Making debug LLM call to ${provider} with model ${model}`);
819
+ try {
820
+ const llm = new Llm(provider, { model });
821
+ const result = await llm.operate(message, {
822
+ user: "[jaypie-mcp] Debug LLM Call",
823
+ });
824
+ if (result.error) {
825
+ return {
826
+ success: false,
827
+ provider,
828
+ model,
829
+ error: `${result.error.title}: ${result.error.detail || "Unknown error"}`,
830
+ };
831
+ }
832
+ // Calculate total reasoning tokens
833
+ const reasoningTokens = result.usage.reduce((sum, u) => sum + (u.reasoning || 0), 0);
834
+ return {
835
+ success: true,
836
+ provider,
837
+ model,
838
+ content: typeof result.content === "string"
839
+ ? result.content
840
+ : JSON.stringify(result.content),
841
+ reasoning: result.reasoning,
842
+ reasoningTokens,
843
+ history: result.history,
844
+ rawResponses: result.responses,
845
+ usage: result.usage,
846
+ };
847
+ }
848
+ catch (error) {
849
+ log.error(`Error calling ${provider}:`, error);
850
+ return {
851
+ success: false,
852
+ provider,
853
+ model,
854
+ error: error instanceof Error ? error.message : String(error),
855
+ };
856
+ }
857
+ }
858
+ /**
859
+ * List available providers and their default/reasoning models
860
+ */
861
+ function listLlmProviders() {
862
+ return {
863
+ providers: [
864
+ {
865
+ name: "openai",
866
+ defaultModel: DEFAULT_MODELS.openai,
867
+ reasoningModels: ["o3-mini", "o1-preview", "o1-mini"],
868
+ },
869
+ {
870
+ name: "anthropic",
871
+ defaultModel: DEFAULT_MODELS.anthropic,
872
+ reasoningModels: [], // Anthropic doesn't expose reasoning the same way
873
+ },
874
+ {
875
+ name: "gemini",
876
+ defaultModel: DEFAULT_MODELS.gemini,
877
+ reasoningModels: [], // Gemini has thoughtsTokenCount but unclear on content
878
+ },
879
+ {
880
+ name: "openrouter",
881
+ defaultModel: DEFAULT_MODELS.openrouter,
882
+ reasoningModels: ["openai/o3-mini", "openai/o1-preview"],
883
+ },
884
+ ],
885
+ };
886
+ }
887
+
888
+ const BUILD_VERSION_STRING = "@jaypie/mcp@0.2.3#9c8eed7d"
801
889
  ;
802
890
  const __filename$1 = fileURLToPath(import.meta.url);
803
891
  const __dirname$1 = path.dirname(__filename$1);
@@ -1610,6 +1698,88 @@ function createMcpServer(options = {}) {
1610
1698
  };
1611
1699
  });
1612
1700
  log.info("Registered tool: datadog_rum");
1701
+ // LLM Debug Tools
1702
+ server.tool("llm_debug_call", "Make a debug LLM API call and inspect the raw response. Useful for understanding how each provider formats responses, especially for reasoning/thinking content. Returns full history, raw responses, and extracted reasoning.", {
1703
+ provider: z
1704
+ .enum(["anthropic", "gemini", "openai", "openrouter"])
1705
+ .describe("LLM provider to call"),
1706
+ model: z
1707
+ .string()
1708
+ .optional()
1709
+ .describe("Model to use. If not provided, uses a sensible default. For reasoning tests, try 'o3-mini' with openai."),
1710
+ message: z
1711
+ .string()
1712
+ .describe("Message to send to the LLM. For reasoning tests, try something that requires thinking like 'What is 15 * 17? Think step by step.'"),
1713
+ }, async ({ provider, model, message }) => {
1714
+ log.info(`Tool called: llm_debug_call (provider: ${provider})`);
1715
+ const result = await debugLlmCall({ provider: provider, model, message }, log);
1716
+ if (!result.success) {
1717
+ return {
1718
+ content: [
1719
+ {
1720
+ type: "text",
1721
+ text: `Error calling ${provider}: ${result.error}`,
1722
+ },
1723
+ ],
1724
+ };
1725
+ }
1726
+ const sections = [
1727
+ `## LLM Debug Call Result`,
1728
+ `Provider: ${result.provider}`,
1729
+ `Model: ${result.model}`,
1730
+ ``,
1731
+ `### Content`,
1732
+ result.content || "(no content)",
1733
+ ``,
1734
+ `### Reasoning (${result.reasoning?.length || 0} items, ${result.reasoningTokens || 0} tokens)`,
1735
+ result.reasoning && result.reasoning.length > 0
1736
+ ? result.reasoning.map((r, i) => `[${i}] ${r}`).join("\n")
1737
+ : "(no reasoning extracted)",
1738
+ ``,
1739
+ `### Usage`,
1740
+ JSON.stringify(result.usage, null, 2),
1741
+ ``,
1742
+ `### History (${result.history?.length || 0} items)`,
1743
+ JSON.stringify(result.history, null, 2),
1744
+ ``,
1745
+ `### Raw Responses (${result.rawResponses?.length || 0} items)`,
1746
+ JSON.stringify(result.rawResponses, null, 2),
1747
+ ];
1748
+ return {
1749
+ content: [
1750
+ {
1751
+ type: "text",
1752
+ text: sections.join("\n"),
1753
+ },
1754
+ ],
1755
+ };
1756
+ });
1757
+ log.info("Registered tool: llm_debug_call");
1758
+ server.tool("llm_list_providers", "List available LLM providers with their default and reasoning-capable models.", {}, async () => {
1759
+ log.info("Tool called: llm_list_providers");
1760
+ const { providers } = listLlmProviders();
1761
+ const formatted = providers.map((p) => {
1762
+ const reasoningNote = p.reasoningModels.length > 0
1763
+ ? `Reasoning models: ${p.reasoningModels.join(", ")}`
1764
+ : "No known reasoning models";
1765
+ return `- ${p.name}: default=${p.defaultModel}, ${reasoningNote}`;
1766
+ });
1767
+ return {
1768
+ content: [
1769
+ {
1770
+ type: "text",
1771
+ text: [
1772
+ "## Available LLM Providers",
1773
+ "",
1774
+ ...formatted,
1775
+ "",
1776
+ "Use llm_debug_call to test responses from any provider.",
1777
+ ].join("\n"),
1778
+ },
1779
+ ],
1780
+ };
1781
+ });
1782
+ log.info("Registered tool: llm_list_providers");
1613
1783
  log.info("MCP server configuration complete");
1614
1784
  return server;
1615
1785
  }
@@ -1666,6 +1836,41 @@ async function mcpExpressHandler(options = {}) {
1666
1836
 
1667
1837
  // Version will be injected during build
1668
1838
  const version = "0.0.0";
1839
+ /**
1840
+ * Load environment variables from .env file in current working directory
1841
+ * Simple implementation that doesn't require external dependencies
1842
+ */
1843
+ function loadEnvFile() {
1844
+ try {
1845
+ const envPath = join(process.cwd(), ".env");
1846
+ const content = readFileSync(envPath, "utf-8");
1847
+ for (const line of content.split("\n")) {
1848
+ const trimmed = line.trim();
1849
+ // Skip comments and empty lines
1850
+ if (!trimmed || trimmed.startsWith("#"))
1851
+ continue;
1852
+ const eqIndex = trimmed.indexOf("=");
1853
+ if (eqIndex === -1)
1854
+ continue;
1855
+ const key = trimmed.slice(0, eqIndex).trim();
1856
+ let value = trimmed.slice(eqIndex + 1).trim();
1857
+ // Remove surrounding quotes if present
1858
+ if ((value.startsWith('"') && value.endsWith('"')) ||
1859
+ (value.startsWith("'") && value.endsWith("'"))) {
1860
+ value = value.slice(1, -1);
1861
+ }
1862
+ // Only set if not already defined (environment takes precedence)
1863
+ if (!process.env[key]) {
1864
+ process.env[key] = value;
1865
+ }
1866
+ }
1867
+ }
1868
+ catch {
1869
+ // .env file not found or not readable - that's fine
1870
+ }
1871
+ }
1872
+ // Load .env file before anything else
1873
+ loadEnvFile();
1669
1874
  // Parse command-line arguments
1670
1875
  const args = process.argv.slice(2);
1671
1876
  const verbose = args.includes("--verbose") || args.includes("-v");