@jaypie/mcp 0.3.2 → 0.4.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (79) hide show
  1. package/dist/createMcpServer.d.ts +7 -1
  2. package/dist/index.js +26 -3135
  3. package/dist/index.js.map +1 -1
  4. package/dist/suite.d.ts +1 -0
  5. package/dist/suite.js +2442 -0
  6. package/dist/suite.js.map +1 -0
  7. package/package.json +8 -3
  8. package/release-notes/constructs/1.2.17.md +11 -0
  9. package/release-notes/fabric/0.1.2.md +11 -0
  10. package/release-notes/fabric/0.1.3.md +25 -0
  11. package/release-notes/fabric/0.1.4.md +42 -0
  12. package/release-notes/mcp/0.3.3.md +12 -0
  13. package/release-notes/mcp/0.3.4.md +36 -0
  14. package/release-notes/mcp/0.4.0.md +27 -0
  15. package/release-notes/testkit/1.2.15.md +23 -0
  16. package/skills/agents.md +25 -0
  17. package/skills/aws.md +107 -0
  18. package/skills/cdk.md +141 -0
  19. package/skills/cicd.md +152 -0
  20. package/skills/datadog.md +129 -0
  21. package/skills/debugging.md +148 -0
  22. package/skills/dns.md +134 -0
  23. package/skills/dynamodb.md +140 -0
  24. package/skills/errors.md +142 -0
  25. package/skills/fabric.md +191 -0
  26. package/skills/index.md +7 -0
  27. package/skills/jaypie.md +100 -0
  28. package/skills/legacy.md +97 -0
  29. package/skills/logs.md +160 -0
  30. package/skills/mocks.md +174 -0
  31. package/skills/models.md +195 -0
  32. package/skills/releasenotes.md +94 -0
  33. package/skills/secrets.md +155 -0
  34. package/skills/services.md +175 -0
  35. package/skills/style.md +190 -0
  36. package/skills/tests.md +209 -0
  37. package/skills/tools.md +127 -0
  38. package/skills/topics.md +116 -0
  39. package/skills/variables.md +146 -0
  40. package/skills/writing.md +153 -0
  41. package/prompts/Branch_Management.md +0 -34
  42. package/prompts/Development_Process.md +0 -89
  43. package/prompts/Jaypie_Agent_Rules.md +0 -110
  44. package/prompts/Jaypie_Auth0_Express_Mongoose.md +0 -736
  45. package/prompts/Jaypie_Browser_and_Frontend_Web_Packages.md +0 -18
  46. package/prompts/Jaypie_CDK_Constructs_and_Patterns.md +0 -430
  47. package/prompts/Jaypie_CICD_with_GitHub_Actions.md +0 -371
  48. package/prompts/Jaypie_Commander_CLI_Package.md +0 -166
  49. package/prompts/Jaypie_Core_Errors_and_Logging.md +0 -39
  50. package/prompts/Jaypie_DynamoDB_Package.md +0 -774
  51. package/prompts/Jaypie_Eslint_NPM_Package.md +0 -78
  52. package/prompts/Jaypie_Express_Package.md +0 -630
  53. package/prompts/Jaypie_Fabric_Commander.md +0 -411
  54. package/prompts/Jaypie_Fabric_LLM.md +0 -312
  55. package/prompts/Jaypie_Fabric_Lambda.md +0 -308
  56. package/prompts/Jaypie_Fabric_MCP.md +0 -316
  57. package/prompts/Jaypie_Fabric_Package.md +0 -513
  58. package/prompts/Jaypie_Fabricator.md +0 -617
  59. package/prompts/Jaypie_Ideal_Project_Structure.md +0 -78
  60. package/prompts/Jaypie_Init_CICD_with_GitHub_Actions.md +0 -1186
  61. package/prompts/Jaypie_Init_Express_on_Lambda.md +0 -115
  62. package/prompts/Jaypie_Init_Jaypie_CDK_Package.md +0 -35
  63. package/prompts/Jaypie_Init_Lambda_Package.md +0 -505
  64. package/prompts/Jaypie_Init_Monorepo_Project.md +0 -44
  65. package/prompts/Jaypie_Init_Project_Subpackage.md +0 -65
  66. package/prompts/Jaypie_Legacy_Patterns.md +0 -15
  67. package/prompts/Jaypie_Llm_Calls.md +0 -449
  68. package/prompts/Jaypie_Llm_Tools.md +0 -155
  69. package/prompts/Jaypie_MCP_Package.md +0 -281
  70. package/prompts/Jaypie_Mocks_and_Testkit.md +0 -137
  71. package/prompts/Jaypie_Repokit.md +0 -103
  72. package/prompts/Jaypie_Scrub.md +0 -177
  73. package/prompts/Jaypie_Streaming.md +0 -467
  74. package/prompts/Templates_CDK_Subpackage.md +0 -115
  75. package/prompts/Templates_Express_Subpackage.md +0 -187
  76. package/prompts/Templates_Project_Monorepo.md +0 -326
  77. package/prompts/Templates_Project_Subpackage.md +0 -93
  78. package/prompts/Write_Efficient_Prompt_Guides.md +0 -48
  79. package/prompts/Write_and_Maintain_Engaging_Readme.md +0 -67
@@ -1,65 +0,0 @@
1
- ---
2
- description: Create a subpackage within an existing monorepo project
3
- ---
4
-
5
- # Jaypie Initialize Project Subpackage
6
-
7
- Create a subpackage within an existing monorepo project.
8
-
9
- ## Goal
10
-
11
- * TypeScript subpackage with Vite/Vitest
12
- * Standard Jaypie project structure
13
- * NPM workspace integration
14
- * ESLint configuration inheritance
15
-
16
- ## Guidelines
17
-
18
- * Follow Jaypie_Ideal_Project_Structure.md conventions
19
- * Subpackage names follow "@project-org/package-name" pattern (example: "@jaypie/errors")
20
- * Use `"version": "0.0.1"`, `"type": "module"`, and `"private": true`
21
- * Place packages in `packages/<package-name>/` directory
22
- * Build tool: Use Vite for new TypeScript packages (template uses Vite). Some older packages use Rollup.
23
-
24
- ## Process
25
-
26
- 1. Create package directory structure:
27
- ```
28
- packages/<package-name>/
29
- ├── src/
30
- ├── package.json
31
- └── tsconfig.json
32
- ```
33
-
34
- 2. Create template files using Templates_Project_Subpackage.md:
35
- * Create `packages/<package-name>/package.json` from template
36
- * Create `packages/<package-name>/tsconfig.json` from template
37
- * Create `packages/<package-name>/vite.config.ts` from template
38
- * Create `packages/<package-name>/vitest.config.ts` from template
39
- * Create `packages/<package-name>/vitest.setup.ts` from template
40
-
41
- 3. Update package.json:
42
- * Edit name from "@project-org/project-name" to "@project-org/<package-name>"
43
- * Keep all other fields as-is from template
44
-
45
- 4. Create basic src structure:
46
- * Create `src/index.ts` with basic export
47
- * Create `src/__tests__/` directory for tests
48
- * Create `src/__tests__/index.spec.ts` with basic test (use sections: Base Cases, Happy Paths)
49
-
50
- 5. Update workspace configuration:
51
- * Add package path to `test.projects` array in root `vitest.config.ts` (format: "packages/<package-name>")
52
- * Ensure packages are listed explicitly (no wildcards)
53
- * Example: Add "packages/new-package" to the projects array
54
-
55
- 6. Install dependencies for the new package:
56
- * Use `npm install <package-name> --workspace ./packages/<package-name>` to add dependencies
57
- * Use `npm install <package-name> --workspace ./packages/<package-name> --save-dev` for dev dependencies
58
- * Never manually edit `package.json` - always use npm commands to maintain package-lock.json
59
-
60
- ## Context
61
-
62
- prompts/Jaypie_Ideal_Project_Structure.md
63
- prompts/Templates_Project_Subpackage.md
64
- package.json
65
- vitest.config.ts
@@ -1,15 +0,0 @@
1
- ---
2
- description: Old conventions repositories should update and the new conventions they should follow
3
- ---
4
-
5
- # Jaypie Legacy Patterns
6
-
7
- ## Mongoose
8
-
9
- Use MongoDB directly or something else
10
-
11
- ## TypeScript, Not Vanilla JavaScript
12
-
13
- ## Vite for Builds
14
-
15
- Subpackage `build` script should run `vite build && tsc --emitDeclarationOnly`
@@ -1,449 +0,0 @@
1
- ---
2
- trigger: model_decision
3
- description: Calling OpenAI and other provider LLM functions from Jaypie, specifically using Jaypie's Llm class and Llm.operate() function
4
- ---
5
-
6
- # LLM Calls with Jaypie 🗣️
7
-
8
- Streamline API calls with multi-model capabilities
9
-
10
- ## Types
11
-
12
- ```
13
- export interface LlmProvider {
14
- operate(
15
- input: string | LlmHistory | LlmInputMessage | LlmOperateInput,
16
- options?: LlmOperateOptions,
17
- ): Promise<LlmOperateResponse>;
18
- send(
19
- message: string,
20
- options?: LlmMessageOptions,
21
- ): Promise<string | JsonObject>;
22
- }
23
-
24
- // Simplified input for files and images
25
- type LlmOperateInput = LlmOperateInputContent[];
26
- type LlmOperateInputContent = string | LlmOperateInputFile | LlmOperateInputImage;
27
-
28
- interface LlmOperateInputFile {
29
- file: string; // Path or filename
30
- bucket?: string; // S3 bucket (uses CDK_ENV_BUCKET if omitted)
31
- pages?: number[]; // Extract specific PDF pages (omit = all)
32
- data?: string; // Base64 data (skips file loading)
33
- }
34
-
35
- interface LlmOperateInputImage {
36
- image: string; // Path or filename
37
- bucket?: string; // S3 bucket (uses CDK_ENV_BUCKET if omitted)
38
- data?: string; // Base64 data (skips file loading)
39
- }
40
-
41
- export interface LlmOperateOptions {
42
- data?: NaturalMap;
43
- explain?: boolean;
44
- format?: JsonObject | NaturalSchema | z.ZodType;
45
- history?: LlmHistory;
46
- hooks?: LlmOperateHooks;
47
- instructions?: string;
48
- model?: string;
49
- placeholders?: {
50
- input?: boolean;
51
- instructions?: boolean;
52
- system?: boolean;
53
- };
54
- providerOptions?: JsonObject;
55
- system?: string;
56
- tools?: LlmTool[] | Toolkit;
57
- turns?: boolean | number;
58
- user?: string;
59
- }
60
-
61
- export interface LlmOperateHooks {
62
- afterEachModelResponse?: (context: HookContext) => unknown | Promise<unknown>;
63
- afterEachTool?: (context: ToolHookContext) => unknown | Promise<unknown>;
64
- beforeEachModelRequest?: (context: HookContext) => unknown | Promise<unknown>;
65
- beforeEachTool?: (context: ToolHookContext) => unknown | Promise<unknown>;
66
- onRetryableModelError?: (context: ErrorHookContext) => unknown | Promise<unknown>;
67
- onToolError?: (context: ToolErrorContext) => unknown | Promise<unknown>;
68
- onUnrecoverableModelError?: (context: ErrorHookContext) => unknown | Promise<unknown>;
69
- }
70
-
71
- export interface LlmOperateResponse {
72
- content?: string | JsonObject;
73
- error?: LlmError;
74
- history: LlmHistory;
75
- model?: string;
76
- output: LlmOutput;
77
- provider?: string;
78
- reasoning: string[];
79
- responses: JsonReturn[];
80
- status: LlmResponseStatus;
81
- usage: LlmUsage;
82
- }
83
-
84
- // LlmUsage is an array of usage items (one per model call in multi-turn)
85
- type LlmUsage = LlmUsageItem[];
86
-
87
- interface LlmUsageItem {
88
- input: number;
89
- output: number;
90
- reasoning: number;
91
- total: number;
92
- model?: string;
93
- provider?: string;
94
- }
95
- ```
96
-
97
- ## Declaring an Llm
98
-
99
- ```
100
- import { Llm } from "jaypie";
101
-
102
- const llm = new Llm();
103
-
104
- const result = await llm.operate("Give me advice on Yahtzee");
105
- ```
106
-
107
- ## Providers and Models
108
-
109
- Available providers: `anthropic`, `gemini`, `openai`, `openrouter`
110
-
111
- ```typescript
112
- import { Llm, PROVIDER } from "jaypie";
113
-
114
- // Using provider name (uses provider's default model)
115
- const llm = new Llm("anthropic");
116
-
117
- // Using model name directly (provider auto-detected)
118
- const llm2 = new Llm("claude-sonnet-4-0");
119
- const llm3 = new Llm("gpt-4.1");
120
- const llm4 = new Llm("gemini-2.5-flash");
121
-
122
- // Using provider with specific model
123
- const llm5 = new Llm("openai", { model: "gpt-4.1" });
124
-
125
- // Using constants
126
- const llm6 = new Llm(PROVIDER.OPENAI.NAME, {
127
- model: PROVIDER.OPENAI.MODEL.LARGE
128
- });
129
- ```
130
-
131
- ### Model Aliases
132
-
133
- Each provider has standard aliases: `DEFAULT`, `SMALL`, `LARGE`, `TINY`
134
-
135
- | Provider | DEFAULT | LARGE | SMALL | TINY |
136
- |----------|---------|-------|-------|------|
137
- | anthropic | claude-opus-4-1 | claude-opus-4-1 | claude-sonnet-4-0 | claude-3-5-haiku-latest |
138
- | gemini | gemini-3-pro-preview | gemini-3-pro-preview | gemini-3-flash-preview | gemini-2.0-flash-lite |
139
- | openai | gpt-4.1 | gpt-4.1 | gpt-4.1-mini | gpt-4.1-nano |
140
- | openrouter | z-ai/glm-4.7 | z-ai/glm-4.7 | z-ai/glm-4.7 | z-ai/glm-4.7 |
141
-
142
- ### Provider Constants
143
-
144
- ```typescript
145
- import { PROVIDER } from "jaypie";
146
-
147
- // Anthropic models
148
- PROVIDER.ANTHROPIC.MODEL.CLAUDE_OPUS_4 // claude-opus-4-1
149
- PROVIDER.ANTHROPIC.MODEL.CLAUDE_SONNET_4 // claude-sonnet-4-0
150
- PROVIDER.ANTHROPIC.MODEL.CLAUDE_3_HAIKU // claude-3-5-haiku-latest
151
-
152
- // Gemini models
153
- PROVIDER.GEMINI.MODEL.GEMINI_3_PRO_PREVIEW // gemini-3-pro-preview
154
- PROVIDER.GEMINI.MODEL.GEMINI_2_5_FLASH // gemini-2.5-flash
155
- PROVIDER.GEMINI.MODEL.GEMINI_2_0_FLASH // gemini-2.0-flash
156
-
157
- // OpenAI models
158
- PROVIDER.OPENAI.MODEL.GPT_4_1 // gpt-4.1
159
- PROVIDER.OPENAI.MODEL.GPT_4_O // gpt-4o
160
- PROVIDER.OPENAI.MODEL.O3 // o3
161
- PROVIDER.OPENAI.MODEL.O4_MINI // o4-mini
162
- ```
163
-
164
- ## "Operating" an Llm
165
-
166
- operate takes an optional second object of options
167
-
168
- ```
169
- import { Llm, toolkit } from "jaypie";
170
-
171
- const result = await llm.operate("Take a Yahtzee turn and report the results", {
172
- format: {
173
- throws: Array,
174
- score: Number,
175
- category: String,
176
- },
177
- tools: [toolkit.roll]
178
- });
179
- ```
180
-
181
- data is an object that will be used for variable replacements in input, instruction, and system.
182
- explain will pass a rationale explaining its choice to the tool call.
183
- format causes structured output to follow the provided schema.
184
- history is an existing llm history.
185
- Calls to the same instance automatically pass history.
186
- instructions are one-time instructions.
187
- placeholders object toggles what data applies to.
188
- providerOptions passes additional options to the provider.
189
- system is a permanent starting instruction.
190
- See ./Llm_Tool_with_Jaypie.md for tool formats.
191
- turns disables or restricts the number of turns that can be taken.
192
- user tracks the end user
193
-
194
- ## Response
195
-
196
- content is a convenience string for the model's response.
197
- content will be an object when format was passed and the provider supports structured data.
198
- error will include any errors.
199
- output is just the output components of full responses.
200
- responses are the complete responses.
201
-
202
- ## Files and Images
203
-
204
- Use `LlmOperateInput` array syntax to send files and images with automatic loading and provider translation:
205
-
206
- ```javascript
207
- import { Llm } from "jaypie";
208
-
209
- const llm = new Llm("openai");
210
-
211
- // Image from local filesystem
212
- const imageResult = await llm.operate([
213
- "Extract text from this image",
214
- { image: "/path/to/photo.png" }
215
- ]);
216
-
217
- // PDF from local filesystem
218
- const pdfResult = await llm.operate([
219
- "Summarize this document",
220
- { file: "/path/to/document.pdf" }
221
- ]);
222
-
223
- // From S3 bucket (uses CDK_ENV_BUCKET if bucket omitted)
224
- const s3Result = await llm.operate([
225
- "Analyze this file",
226
- { file: "documents/report.pdf", bucket: "my-bucket" }
227
- ]);
228
-
229
- // Extract specific PDF pages
230
- const pagesResult = await llm.operate([
231
- "Read pages 1-3",
232
- { file: "large-doc.pdf", pages: [1, 2, 3] }
233
- ]);
234
-
235
- // With pre-loaded base64 data (skips file loading)
236
- const base64Result = await llm.operate([
237
- "Describe this image",
238
- { image: "photo.jpg", data: base64String }
239
- ]);
240
-
241
- // Multiple files and text
242
- const multiResult = await llm.operate([
243
- "Compare these documents",
244
- { file: "doc1.pdf" },
245
- { file: "doc2.pdf" },
246
- "Focus on the methodology section"
247
- ]);
248
- ```
249
-
250
- ### File Resolution Order
251
-
252
- 1. If `data` is present → uses base64 directly
253
- 2. If `bucket` is present → loads from S3
254
- 3. If `CDK_ENV_BUCKET` env var exists → loads from that S3 bucket
255
- 4. Otherwise → loads from local filesystem (relative to process.cwd())
256
-
257
- ### Supported Image Extensions
258
-
259
- Files with these extensions are treated as images: `png`, `jpg`, `jpeg`, `gif`, `webp`, `svg`, `bmp`, `ico`, `tiff`, `avif`
260
-
261
- ## Streaming
262
-
263
- Use `Llm.stream()` for real-time streaming responses:
264
-
265
- ```javascript
266
- import { Llm } from "jaypie";
267
-
268
- const llm = new Llm("anthropic");
269
-
270
- // Basic streaming
271
- for await (const chunk of llm.stream("Tell me a story")) {
272
- if (chunk.type === "text") {
273
- process.stdout.write(chunk.content);
274
- }
275
- }
276
-
277
- // Streaming with tools
278
- for await (const chunk of llm.stream("Roll 3d6", { tools: [roll] })) {
279
- switch (chunk.type) {
280
- case "text":
281
- console.log("Text:", chunk.content);
282
- break;
283
- case "tool_call":
284
- console.log("Calling tool:", chunk.toolCall.name);
285
- break;
286
- case "tool_result":
287
- console.log("Tool result:", chunk.toolResult.result);
288
- break;
289
- case "done":
290
- console.log("Usage:", chunk.usage);
291
- break;
292
- case "error":
293
- console.error("Error:", chunk.error);
294
- break;
295
- }
296
- }
297
-
298
- // Static method
299
- for await (const chunk of Llm.stream("Hello", { llm: "openai" })) {
300
- // ...
301
- }
302
- ```
303
-
304
- ### Stream Chunk Types
305
-
306
- ```typescript
307
- type LlmStreamChunk =
308
- | LlmStreamChunkText // { type: "text", content: string }
309
- | LlmStreamChunkToolCall // { type: "tool_call", toolCall: { id, name, arguments } }
310
- | LlmStreamChunkToolResult // { type: "tool_result", toolResult: { id, name, result } }
311
- | LlmStreamChunkDone // { type: "done", usage: LlmUsage }
312
- | LlmStreamChunkError; // { type: "error", error: { status, title, detail? } }
313
- ```
314
-
315
- ### Streaming to Express
316
-
317
- Use `createExpressStream` to pipe LLM streams to Express responses:
318
-
319
- ```javascript
320
- import { expressStreamHandler, Llm, createExpressStream } from "jaypie";
321
-
322
- const chatRoute = expressStreamHandler(async (req, res) => {
323
- const llm = new Llm("anthropic");
324
- const stream = llm.stream(req.body.prompt);
325
- await createExpressStream(stream, res);
326
- });
327
-
328
- app.post("/chat", chatRoute);
329
- ```
330
-
331
- ### Streaming to Lambda
332
-
333
- Use `createLambdaStream` with Lambda Response Streaming:
334
-
335
- ```javascript
336
- import { lambdaStreamHandler, Llm, createLambdaStream } from "jaypie";
337
-
338
- const handler = awslambda.streamifyResponse(
339
- lambdaStreamHandler(async (event, context) => {
340
- const llm = new Llm("openai");
341
- const stream = llm.stream(event.prompt);
342
- await createLambdaStream(stream, context.responseStream);
343
- })
344
- );
345
- ```
346
-
347
- ### JaypieStream Wrapper
348
-
349
- Use `JaypieStream` or `createJaypieStream` for fluent piping:
350
-
351
- ```javascript
352
- import { createJaypieStream, Llm } from "jaypie";
353
-
354
- const llm = new Llm("gemini");
355
- const stream = createJaypieStream(llm.stream("Hello"));
356
-
357
- // Pipe to Express
358
- await stream.toExpress(res);
359
-
360
- // Or pipe to Lambda
361
- await stream.toLambda(responseStream);
362
-
363
- // Or iterate manually
364
- for await (const chunk of stream) {
365
- console.log(chunk);
366
- }
367
- ```
368
-
369
- ## Hooks
370
-
371
- Use hooks to intercept and observe the LLM lifecycle:
372
-
373
- ```javascript
374
- const result = await llm.operate("Process this", {
375
- hooks: {
376
- beforeEachModelRequest: ({ input, options, providerRequest }) => {
377
- console.log("About to call model with:", providerRequest);
378
- },
379
- afterEachModelResponse: ({ content, usage, providerResponse }) => {
380
- console.log("Model responded:", content);
381
- console.log("Tokens used:", usage);
382
- },
383
- beforeEachTool: ({ toolName, args }) => {
384
- console.log(`Calling tool ${toolName} with:`, args);
385
- },
386
- afterEachTool: ({ toolName, result }) => {
387
- console.log(`Tool ${toolName} returned:`, result);
388
- },
389
- onToolError: ({ toolName, error }) => {
390
- console.error(`Tool ${toolName} failed:`, error);
391
- },
392
- onRetryableModelError: ({ error }) => {
393
- console.warn("Retrying after error:", error);
394
- },
395
- onUnrecoverableModelError: ({ error }) => {
396
- console.error("Fatal error:", error);
397
- },
398
- },
399
- });
400
- ```
401
-
402
- ## Toolkit
403
-
404
- Group tools with `Toolkit` for additional features:
405
-
406
- ```javascript
407
- import { Llm, Toolkit } from "jaypie";
408
-
409
- const toolkit = new Toolkit([roll, weather, time], {
410
- explain: true, // Add __Explanation param to tools
411
- log: true, // Log tool calls (default)
412
- });
413
-
414
- // Extend toolkit with more tools
415
- toolkit.extend([anotherTool], { replace: true });
416
-
417
- const result = await llm.operate("Roll dice and check weather", {
418
- tools: toolkit,
419
- });
420
- ```
421
-
422
- ### Zod Schema Support
423
-
424
- Tool parameters can be defined using Zod schemas instead of JSON Schema:
425
-
426
- ```javascript
427
- import { z } from "zod/v4";
428
- import { Llm, Toolkit } from "jaypie";
429
-
430
- const weatherTool = {
431
- name: "get_weather",
432
- description: "Get weather for a city",
433
- parameters: z.object({
434
- city: z.string().describe("City name"),
435
- unit: z.enum(["celsius", "fahrenheit"]),
436
- }),
437
- type: "function",
438
- call: async ({ city, unit }) => ({ city, temp: 72, unit }),
439
- };
440
-
441
- const toolkit = new Toolkit([weatherTool]);
442
- // Zod schemas are automatically converted to JSON Schema
443
- ```
444
-
445
- ## Footnotes
446
-
447
- Llm.operate(input, options)
448
- The Llm.send function is an older version replaced by operate.
449
- Llm.send's `response` option is `format` in operate.
@@ -1,155 +0,0 @@
1
- ---
2
- trigger: glob
3
- globs: packages/tools/*
4
- ---
5
-
6
- # LLM Tools with Jaypie 🔧
7
-
8
- Extend LLM capabilities with tools for external actions and data retrieval
9
-
10
- ## Goal
11
-
12
- Create and integrate tools that enable LLMs to perform specific functions beyond their training data
13
-
14
- ## Interface
15
-
16
- Implement the `LlmTool` interface:
17
-
18
- ```typescript
19
- import { z } from "zod/v4";
20
-
21
- interface LlmTool {
22
- description: string;
23
- name: string;
24
- parameters: JsonObject | z.ZodType; // JSON Schema or Zod schema
25
- type: "function" | string;
26
- call: (args?: JsonObject) => Promise<AnyValue> | AnyValue;
27
- }
28
- ```
29
-
30
- Properties:
31
- - `description`: Clear explanation of tool functionality
32
- - `name`: Unique identifier
33
- - `parameters`: JSON Schema or Zod schema defining input parameters
34
- - `type`: Usually "function" (OpenAI convention)
35
- - `call`: Implementation function executed on invocation
36
-
37
- ## Example: Dice Roller (JSON Schema)
38
-
39
- ```typescript
40
- import { LlmTool } from "../types/LlmTool.interface.js";
41
- import { log, random, tryParseNumber } from "../util";
42
-
43
- export const roll: LlmTool = {
44
- description: "Roll one or more dice with a specified number of sides",
45
- name: "roll",
46
- parameters: {
47
- type: "object",
48
- properties: {
49
- number: {
50
- type: "number",
51
- description: "Number of dice to roll. Default: 1",
52
- },
53
- sides: {
54
- type: "number",
55
- description: "Number of sides on each die. Default: 6",
56
- },
57
- },
58
- required: ["number", "sides"],
59
- },
60
- type: "function",
61
- call: ({ number = 1, sides = 6 } = {}): {
62
- rolls: number[];
63
- total: number;
64
- } => {
65
- const rng = random();
66
- const rolls: number[] = [];
67
- let total = 0;
68
-
69
- const parsedNumber = tryParseNumber(number, {
70
- defaultValue: 1,
71
- warnFunction: log.warn,
72
- }) as number;
73
- const parsedSides = tryParseNumber(sides, {
74
- defaultValue: 6,
75
- warnFunction: log.warn,
76
- }) as number;
77
-
78
- for (let i = 0; i < parsedNumber; i++) {
79
- const rollValue = rng({ min: 1, max: parsedSides, integer: true });
80
- rolls.push(rollValue);
81
- total += rollValue;
82
- }
83
-
84
- return { rolls, total };
85
- },
86
- };
87
- ```
88
-
89
- ## Example: Weather Tool (Zod Schema)
90
-
91
- ```typescript
92
- import { z } from "zod/v4";
93
- import { LlmTool } from "jaypie";
94
-
95
- export const getWeather: LlmTool = {
96
- description: "Get current weather for a city",
97
- name: "get_weather",
98
- parameters: z.object({
99
- city: z.string().describe("City name"),
100
- unit: z.enum(["celsius", "fahrenheit"]).describe("Temperature unit"),
101
- }),
102
- type: "function",
103
- call: async ({ city, unit }) => {
104
- // Implementation here
105
- return { city, temperature: 72, unit };
106
- },
107
- };
108
- ```
109
-
110
- ## Best Practices
111
-
112
- ### Input Validation
113
- Validate and sanitize parameters with utilities like `tryParseNumber`.
114
-
115
- ### Clear Descriptions
116
- Write precise descriptions for tools and parameters to guide LLM usage.
117
-
118
- ### Consistent Returns
119
- Return consistent data structures for predictable LLM interpretation.
120
-
121
- ### Error Handling
122
- Implement robust error handling to prevent crashes and provide meaningful messages.
123
-
124
- ## Integration
125
-
126
- ```typescript
127
- import { Llm } from "jaypie";
128
- import { roll } from "./tools/roll.js";
129
-
130
- // Create Llm instance
131
- const llm = new Llm("openai", { model: "gpt-4o" });
132
-
133
- // Use tools with operate
134
- const response = await llm.operate("Roll 3d20 and tell me the result", {
135
- tools: [roll],
136
- });
137
-
138
- // Or use Toolkit for additional features
139
- import { Toolkit } from "jaypie";
140
-
141
- const toolkit = new Toolkit([roll], {
142
- explain: true, // Requires model to explain why it's calling tools
143
- log: true, // Log tool calls (default)
144
- });
145
-
146
- const result = await llm.operate("Roll some dice", {
147
- tools: toolkit,
148
- });
149
- ```
150
-
151
- ## References
152
-
153
- - [Jaypie Library](https://github.com/finlaysonstudio/jaypie)
154
- - [OpenAI Function Calling](https://platform.openai.com/docs/guides/function-calling)
155
- - [Jaypie_Llm_Calls.md](./Jaypie_Llm_Calls.md) to better understand Llm.operate()