@jaypie/mcp 0.2.1 → 0.2.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/llm.d.ts ADDED
@@ -0,0 +1,41 @@
1
+ /**
2
+ * LLM debugging utilities for inspecting raw provider responses
3
+ */
4
+ export type LlmProvider = "anthropic" | "gemini" | "openai" | "openrouter";
5
+ export interface LlmDebugCallParams {
6
+ provider: LlmProvider;
7
+ model?: string;
8
+ message: string;
9
+ }
10
+ export interface LlmDebugCallResult {
11
+ success: boolean;
12
+ provider: string;
13
+ model: string;
14
+ content?: string;
15
+ reasoning?: string[];
16
+ reasoningTokens?: number;
17
+ history?: unknown[];
18
+ rawResponses?: unknown[];
19
+ usage?: unknown[];
20
+ error?: string;
21
+ }
22
+ interface Logger {
23
+ info: (message: string, ...args: unknown[]) => void;
24
+ error: (message: string, ...args: unknown[]) => void;
25
+ }
26
+ export declare const REASONING_MODELS: Record<string, string>;
27
+ /**
28
+ * Make a debug LLM call and return the raw response data for inspection
29
+ */
30
+ export declare function debugLlmCall(params: LlmDebugCallParams, log: Logger): Promise<LlmDebugCallResult>;
31
+ /**
32
+ * List available providers and their default/reasoning models
33
+ */
34
+ export declare function listLlmProviders(): {
35
+ providers: Array<{
36
+ name: LlmProvider;
37
+ defaultModel: string;
38
+ reasoningModels: string[];
39
+ }>;
40
+ };
41
+ export {};
package/package.json CHANGED
@@ -1,10 +1,10 @@
1
1
  {
2
2
  "name": "@jaypie/mcp",
3
- "version": "0.2.1",
3
+ "version": "0.2.3",
4
4
  "description": "Jaypie MCP",
5
5
  "repository": {
6
6
  "type": "git",
7
- "url": "https://github.com/finlaysonstudio/jaypie"
7
+ "url": "git+https://github.com/finlaysonstudio/jaypie.git"
8
8
  },
9
9
  "license": "MIT",
10
10
  "author": "Finlayson Studio",
@@ -25,7 +25,7 @@
25
25
  "prompts"
26
26
  ],
27
27
  "scripts": {
28
- "build": "rollup --config",
28
+ "build": "rollup --config && chmod +x dist/index.js",
29
29
  "format": "eslint . --fix",
30
30
  "lint": "eslint .",
31
31
  "prepare": "npm run build",
@@ -34,6 +34,7 @@
34
34
  "typecheck": "tsc --noEmit"
35
35
  },
36
36
  "dependencies": {
37
+ "@jaypie/llm": "^1.2.2",
37
38
  "@modelcontextprotocol/sdk": "^1.17.0",
38
39
  "commander": "^14.0.0",
39
40
  "gray-matter": "^4.0.3",
@@ -49,6 +50,5 @@
49
50
  },
50
51
  "publishConfig": {
51
52
  "access": "public"
52
- },
53
- "gitHead": "22724b6031f0e737ba7fa4acf7df7c2391e2e2f4"
53
+ }
54
54
  }
@@ -12,7 +12,7 @@ Streamline API calls with multi-model capabilities
12
12
  ```
13
13
  export interface LlmProvider {
14
14
  operate(
15
- input: string | LlmHistory | LlmInputMessage,
15
+ input: string | LlmHistory | LlmInputMessage | LlmOperateInput,
16
16
  options?: LlmOperateOptions,
17
17
  ): Promise<LlmOperateResponse>;
18
18
  send(
@@ -21,11 +21,29 @@ export interface LlmProvider {
21
21
  ): Promise<string | JsonObject>;
22
22
  }
23
23
 
24
+ // Simplified input for files and images
25
+ type LlmOperateInput = LlmOperateInputContent[];
26
+ type LlmOperateInputContent = string | LlmOperateInputFile | LlmOperateInputImage;
27
+
28
+ interface LlmOperateInputFile {
29
+ file: string; // Path or filename
30
+ bucket?: string; // S3 bucket (uses CDK_ENV_BUCKET if omitted)
31
+ pages?: number[]; // Extract specific PDF pages (omit = all)
32
+ data?: string; // Base64 data (skips file loading)
33
+ }
34
+
35
+ interface LlmOperateInputImage {
36
+ image: string; // Path or filename
37
+ bucket?: string; // S3 bucket (uses CDK_ENV_BUCKET if omitted)
38
+ data?: string; // Base64 data (skips file loading)
39
+ }
40
+
24
41
  export interface LlmOperateOptions {
25
42
  data?: NaturalMap;
26
43
  explain?: boolean;
27
44
  format?: JsonObject | NaturalSchema | z.ZodType;
28
45
  history?: LlmHistory;
46
+ hooks?: LlmOperateHooks;
29
47
  instructions?: string;
30
48
  model?: string;
31
49
  placeholders?: {
@@ -35,26 +53,44 @@ export interface LlmOperateOptions {
35
53
  };
36
54
  providerOptions?: JsonObject;
37
55
  system?: string;
38
- tools?: LlmTool[];
56
+ tools?: LlmTool[] | Toolkit;
39
57
  turns?: boolean | number;
40
58
  user?: string;
41
59
  }
42
60
 
61
+ export interface LlmOperateHooks {
62
+ afterEachModelResponse?: (context: HookContext) => unknown | Promise<unknown>;
63
+ afterEachTool?: (context: ToolHookContext) => unknown | Promise<unknown>;
64
+ beforeEachModelRequest?: (context: HookContext) => unknown | Promise<unknown>;
65
+ beforeEachTool?: (context: ToolHookContext) => unknown | Promise<unknown>;
66
+ onRetryableModelError?: (context: ErrorHookContext) => unknown | Promise<unknown>;
67
+ onToolError?: (context: ToolErrorContext) => unknown | Promise<unknown>;
68
+ onUnrecoverableModelError?: (context: ErrorHookContext) => unknown | Promise<unknown>;
69
+ }
70
+
43
71
  export interface LlmOperateResponse {
44
72
  content?: string | JsonObject;
45
73
  error?: LlmError;
46
74
  history: LlmHistory;
75
+ model?: string;
47
76
  output: LlmOutput;
77
+ provider?: string;
78
+ reasoning: string[];
48
79
  responses: JsonReturn[];
49
80
  status: LlmResponseStatus;
50
81
  usage: LlmUsage;
51
82
  }
52
83
 
53
- interface LlmUsage {
84
+ // LlmUsage is an array of usage items (one per model call in multi-turn)
85
+ type LlmUsage = LlmUsageItem[];
86
+
87
+ interface LlmUsageItem {
54
88
  input: number;
55
89
  output: number;
56
90
  reasoning: number;
57
91
  total: number;
92
+ model?: string;
93
+ provider?: string;
58
94
  }
59
95
  ```
60
96
 
@@ -68,6 +104,63 @@ const llm = new Llm();
68
104
  const result = await llm.operate("Give me advice on Yahtzee");
69
105
  ```
70
106
 
107
+ ## Providers and Models
108
+
109
+ Available providers: `anthropic`, `gemini`, `openai`, `openrouter`
110
+
111
+ ```typescript
112
+ import { Llm, PROVIDER } from "jaypie";
113
+
114
+ // Using provider name (uses provider's default model)
115
+ const llm = new Llm("anthropic");
116
+
117
+ // Using model name directly (provider auto-detected)
118
+ const llm2 = new Llm("claude-sonnet-4-0");
119
+ const llm3 = new Llm("gpt-4.1");
120
+ const llm4 = new Llm("gemini-2.5-flash");
121
+
122
+ // Using provider with specific model
123
+ const llm5 = new Llm("openai", { model: "gpt-4.1" });
124
+
125
+ // Using constants
126
+ const llm6 = new Llm(PROVIDER.OPENAI.NAME, {
127
+ model: PROVIDER.OPENAI.MODEL.LARGE
128
+ });
129
+ ```
130
+
131
+ ### Model Aliases
132
+
133
+ Each provider has standard aliases: `DEFAULT`, `SMALL`, `LARGE`, `TINY`
134
+
135
+ | Provider | DEFAULT | LARGE | SMALL | TINY |
136
+ |----------|---------|-------|-------|------|
137
+ | anthropic | claude-opus-4-1 | claude-opus-4-1 | claude-sonnet-4-0 | claude-3-5-haiku-latest |
138
+ | gemini | gemini-3-pro-preview | gemini-3-pro-preview | gemini-3-flash-preview | gemini-2.0-flash-lite |
139
+ | openai | gpt-4.1 | gpt-4.1 | gpt-4.1-mini | gpt-4.1-nano |
140
+ | openrouter | z-ai/glm-4.7 | z-ai/glm-4.7 | z-ai/glm-4.7 | z-ai/glm-4.7 |
141
+
142
+ ### Provider Constants
143
+
144
+ ```typescript
145
+ import { PROVIDER } from "jaypie";
146
+
147
+ // Anthropic models
148
+ PROVIDER.ANTHROPIC.MODEL.CLAUDE_OPUS_4 // claude-opus-4-1
149
+ PROVIDER.ANTHROPIC.MODEL.CLAUDE_SONNET_4 // claude-sonnet-4-0
150
+ PROVIDER.ANTHROPIC.MODEL.CLAUDE_3_HAIKU // claude-3-5-haiku-latest
151
+
152
+ // Gemini models
153
+ PROVIDER.GEMINI.MODEL.GEMINI_3_PRO_PREVIEW // gemini-3-pro-preview
154
+ PROVIDER.GEMINI.MODEL.GEMINI_2_5_FLASH // gemini-2.5-flash
155
+ PROVIDER.GEMINI.MODEL.GEMINI_2_0_FLASH // gemini-2.0-flash
156
+
157
+ // OpenAI models
158
+ PROVIDER.OPENAI.MODEL.GPT_4_1 // gpt-4.1
159
+ PROVIDER.OPENAI.MODEL.GPT_4_O // gpt-4o
160
+ PROVIDER.OPENAI.MODEL.O3 // o3
161
+ PROVIDER.OPENAI.MODEL.O4_MINI // o4-mini
162
+ ```
163
+
71
164
  ## "Operating" an Llm
72
165
 
73
166
  operate takes an optional second object of options
@@ -106,6 +199,172 @@ error will include any errors.
106
199
  output is just the output components of full responses.
107
200
  responses are the complete responses.
108
201
 
202
+ ## Files and Images
203
+
204
+ Use `LlmOperateInput` array syntax to send files and images with automatic loading and provider translation:
205
+
206
+ ```javascript
207
+ import { Llm } from "jaypie";
208
+
209
+ const llm = new Llm("openai");
210
+
211
+ // Image from local filesystem
212
+ const imageResult = await llm.operate([
213
+ "Extract text from this image",
214
+ { image: "/path/to/photo.png" }
215
+ ]);
216
+
217
+ // PDF from local filesystem
218
+ const pdfResult = await llm.operate([
219
+ "Summarize this document",
220
+ { file: "/path/to/document.pdf" }
221
+ ]);
222
+
223
+ // From S3 bucket (uses CDK_ENV_BUCKET if bucket omitted)
224
+ const s3Result = await llm.operate([
225
+ "Analyze this file",
226
+ { file: "documents/report.pdf", bucket: "my-bucket" }
227
+ ]);
228
+
229
+ // Extract specific PDF pages
230
+ const pagesResult = await llm.operate([
231
+ "Read pages 1-3",
232
+ { file: "large-doc.pdf", pages: [1, 2, 3] }
233
+ ]);
234
+
235
+ // With pre-loaded base64 data (skips file loading)
236
+ const base64Result = await llm.operate([
237
+ "Describe this image",
238
+ { image: "photo.jpg", data: base64String }
239
+ ]);
240
+
241
+ // Multiple files and text
242
+ const multiResult = await llm.operate([
243
+ "Compare these documents",
244
+ { file: "doc1.pdf" },
245
+ { file: "doc2.pdf" },
246
+ "Focus on the methodology section"
247
+ ]);
248
+ ```
249
+
250
+ ### File Resolution Order
251
+
252
+ 1. If `data` is present → uses base64 directly
253
+ 2. If `bucket` is present → loads from S3
254
+ 3. If `CDK_ENV_BUCKET` env var exists → loads from that S3 bucket
255
+ 4. Otherwise → loads from local filesystem (relative to process.cwd())
256
+
257
+ ### Supported Image Extensions
258
+
259
+ Files with these extensions are treated as images: `png`, `jpg`, `jpeg`, `gif`, `webp`, `svg`, `bmp`, `ico`, `tiff`, `avif`
260
+
261
+ ## Streaming
262
+
263
+ Use `Llm.stream()` for real-time streaming responses:
264
+
265
+ ```javascript
266
+ import { Llm } from "jaypie";
267
+
268
+ const llm = new Llm("anthropic");
269
+
270
+ // Basic streaming
271
+ for await (const chunk of llm.stream("Tell me a story")) {
272
+ if (chunk.type === "text") {
273
+ process.stdout.write(chunk.content);
274
+ }
275
+ }
276
+
277
+ // Streaming with tools
278
+ for await (const chunk of llm.stream("Roll 3d6", { tools: [roll] })) {
279
+ switch (chunk.type) {
280
+ case "text":
281
+ console.log("Text:", chunk.content);
282
+ break;
283
+ case "tool_call":
284
+ console.log("Calling tool:", chunk.toolCall.name);
285
+ break;
286
+ case "tool_result":
287
+ console.log("Tool result:", chunk.toolResult.result);
288
+ break;
289
+ case "done":
290
+ console.log("Usage:", chunk.usage);
291
+ break;
292
+ case "error":
293
+ console.error("Error:", chunk.error);
294
+ break;
295
+ }
296
+ }
297
+
298
+ // Static method
299
+ for await (const chunk of Llm.stream("Hello", { llm: "openai" })) {
300
+ // ...
301
+ }
302
+ ```
303
+
304
+ ### Stream Chunk Types
305
+
306
+ ```typescript
307
+ type LlmStreamChunk =
308
+ | LlmStreamChunkText // { type: "text", content: string }
309
+ | LlmStreamChunkToolCall // { type: "tool_call", toolCall: { id, name, arguments } }
310
+ | LlmStreamChunkToolResult // { type: "tool_result", toolResult: { id, name, result } }
311
+ | LlmStreamChunkDone // { type: "done", usage: LlmUsage }
312
+ | LlmStreamChunkError; // { type: "error", error: { status, title, detail? } }
313
+ ```
314
+
315
+ ## Hooks
316
+
317
+ Use hooks to intercept and observe the LLM lifecycle:
318
+
319
+ ```javascript
320
+ const result = await llm.operate("Process this", {
321
+ hooks: {
322
+ beforeEachModelRequest: ({ input, options, providerRequest }) => {
323
+ console.log("About to call model with:", providerRequest);
324
+ },
325
+ afterEachModelResponse: ({ content, usage, providerResponse }) => {
326
+ console.log("Model responded:", content);
327
+ console.log("Tokens used:", usage);
328
+ },
329
+ beforeEachTool: ({ toolName, args }) => {
330
+ console.log(`Calling tool ${toolName} with:`, args);
331
+ },
332
+ afterEachTool: ({ toolName, result }) => {
333
+ console.log(`Tool ${toolName} returned:`, result);
334
+ },
335
+ onToolError: ({ toolName, error }) => {
336
+ console.error(`Tool ${toolName} failed:`, error);
337
+ },
338
+ onRetryableModelError: ({ error }) => {
339
+ console.warn("Retrying after error:", error);
340
+ },
341
+ onUnrecoverableModelError: ({ error }) => {
342
+ console.error("Fatal error:", error);
343
+ },
344
+ },
345
+ });
346
+ ```
347
+
348
+ ## Toolkit
349
+
350
+ Group tools with `Toolkit` for additional features:
351
+
352
+ ```javascript
353
+ import { Llm, Toolkit } from "jaypie";
354
+
355
+ const toolkit = new Toolkit([roll, weather, time], {
356
+ explain: true, // Add __Explanation param to tools
357
+ log: true, // Log tool calls (default)
358
+ });
359
+
360
+ // Extend toolkit with more tools
361
+ toolkit.extend([anotherTool], { replace: true });
362
+
363
+ const result = await llm.operate("Roll dice and check weather", {
364
+ tools: toolkit,
365
+ });
366
+ ```
367
+
109
368
  ## Footnotes
110
369
 
111
370
  Llm.operate(input, options)
@@ -104,17 +104,25 @@ Implement robust error handling to prevent crashes and provide meaningful messag
104
104
  import { Llm } from "jaypie";
105
105
  import { roll } from "./tools/roll.js";
106
106
 
107
- const llm = new Llm({
108
- provider: "openai",
109
- model: "gpt-4o"
107
+ // Create Llm instance
108
+ const llm = new Llm("openai", { model: "gpt-4o" });
109
+
110
+ // Use tools with operate
111
+ const response = await llm.operate("Roll 3d20 and tell me the result", {
112
+ tools: [roll],
110
113
  });
111
114
 
112
- const response = await llm.operate([
113
- { role: "user", content: "Roll 3d20 and tell me the result" },
114
- {
115
- tools: [roll],
116
- },
117
- ]);
115
+ // Or use Toolkit for additional features
116
+ import { Toolkit } from "jaypie";
117
+
118
+ const toolkit = new Toolkit([roll], {
119
+ explain: true, // Requires model to explain why it's calling tools
120
+ log: true, // Log tool calls (default)
121
+ });
122
+
123
+ const result = await llm.operate("Roll some dice", {
124
+ tools: toolkit,
125
+ });
118
126
  ```
119
127
 
120
128
  ## References