llm-fns 1.0.8 → 1.0.10

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -23,7 +23,8 @@ export interface CreateJsonSchemaLlmClientParams {
23
23
  disableJsonFixer?: boolean;
24
24
  }
25
25
  export declare function createJsonSchemaLlmClient(params: CreateJsonSchemaLlmClientParams): {
26
- promptJson: <T>(messages: OpenAI.Chat.Completions.ChatCompletionMessageParam[], schema: Record<string, any>, validator: (data: any) => T, options?: JsonSchemaLlmClientOptions) => Promise<T>;
26
+ promptJson: <T>(messages: OpenAI.Chat.Completions.ChatCompletionMessageParam[], schema: Record<string, any>, options?: JsonSchemaLlmClientOptions) => Promise<T>;
27
27
  isPromptJsonCached: (messages: OpenAI.Chat.Completions.ChatCompletionMessageParam[], schema: Record<string, any>, options?: JsonSchemaLlmClientOptions) => Promise<boolean>;
28
28
  };
29
29
  export type JsonSchemaClient = ReturnType<typeof createJsonSchemaLlmClient>;
30
+ export type PromptJsonFunction = JsonSchemaClient['promptJson'];
@@ -1,10 +1,15 @@
1
1
  "use strict";
2
+ var __importDefault = (this && this.__importDefault) || function (mod) {
3
+ return (mod && mod.__esModule) ? mod : { "default": mod };
4
+ };
2
5
  Object.defineProperty(exports, "__esModule", { value: true });
3
6
  exports.createJsonSchemaLlmClient = createJsonSchemaLlmClient;
7
+ const ajv_1 = __importDefault(require("ajv"));
4
8
  const createLlmRetryClient_js_1 = require("./createLlmRetryClient.js");
5
9
  function createJsonSchemaLlmClient(params) {
6
10
  const { prompt, isPromptCached, fallbackPrompt, disableJsonFixer = false } = params;
7
11
  const llmRetryClient = (0, createLlmRetryClient_js_1.createLlmRetryClient)({ prompt, fallbackPrompt });
12
+ const ajv = new ajv_1.default({ strict: false }); // Initialize AJV
8
13
  async function _tryToFixJson(brokenResponse, schemaJsonString, errorDetails, options) {
9
14
  const fixupPrompt = `
10
15
  An attempt to generate a JSON object resulted in the following output, which is either not valid JSON or does not conform to the required schema.
@@ -140,7 +145,17 @@ ${schemaJsonString}`;
140
145
  : undefined;
141
146
  return { finalMessages, schemaJsonString, response_format };
142
147
  }
143
- async function promptJson(messages, schema, validator, options) {
148
+ async function promptJson(messages, schema, options) {
149
+ // Always validate against the schema using AJV
150
+ const ajvValidator = (data) => {
151
+ const validate = ajv.compile(schema);
152
+ const valid = validate(data);
153
+ if (!valid) {
154
+ const errors = validate.errors?.map(e => `${e.instancePath} ${e.message}`).join(', ');
155
+ throw new Error(`AJV Validation Error: ${errors}`);
156
+ }
157
+ return data;
158
+ };
144
159
  const { finalMessages, schemaJsonString, response_format } = _getJsonPromptConfig(messages, schema, options);
145
160
  const processResponse = async (llmResponseString) => {
146
161
  let jsonData;
@@ -155,7 +170,7 @@ The response provided was not valid JSON. Please correct it.`;
155
170
  throw new createLlmRetryClient_js_1.LlmRetryError(errorMessage, 'JSON_PARSE_ERROR', undefined, llmResponseString);
156
171
  }
157
172
  try {
158
- const validatedData = await _validateOrFix(jsonData, validator, schemaJsonString, options);
173
+ const validatedData = await _validateOrFix(jsonData, ajvValidator, schemaJsonString, options);
159
174
  return validatedData;
160
175
  }
161
176
  catch (validationError) {
@@ -45,3 +45,4 @@ export declare function createLlmRetryClient(params: CreateLlmRetryClientParams)
45
45
  <T = Buffer<ArrayBufferLike>>(options: LlmRetryOptions<T>): Promise<T>;
46
46
  };
47
47
  };
48
+ export type LlmRetryClient = ReturnType<typeof createLlmRetryClient>;
@@ -26,3 +26,5 @@ export declare function createZodLlmClient(params: CreateZodLlmClientParams): {
26
26
  <T extends ZodTypeAny>(mainInstruction: string, userMessagePayload: string | OpenAI.Chat.Completions.ChatCompletionContentPart[], dataExtractionSchema: T, options?: ZodLlmClientOptions): Promise<boolean>;
27
27
  };
28
28
  };
29
+ export type ZodLlmClient = ReturnType<typeof createZodLlmClient>;
30
+ export type PromptZodFunction = ZodLlmClient['promptZod'];
@@ -36,7 +36,6 @@ Object.defineProperty(exports, "__esModule", { value: true });
36
36
  exports.normalizeZodArgs = normalizeZodArgs;
37
37
  exports.createZodLlmClient = createZodLlmClient;
38
38
  const z = __importStar(require("zod"));
39
- const zod_1 = require("zod");
40
39
  function isZodSchema(obj) {
41
40
  return (typeof obj === 'object' &&
42
41
  obj !== null &&
@@ -94,19 +93,11 @@ function createZodLlmClient(params) {
94
93
  const schema = z.toJSONSchema(dataExtractionSchema, {
95
94
  unrepresentable: 'any'
96
95
  });
97
- const validator = (data) => {
98
- try {
99
- return dataExtractionSchema.parse(data);
100
- }
101
- catch (error) {
102
- if (error instanceof zod_1.ZodError) {
103
- // Format the error nicely for the LLM
104
- throw new Error(JSON.stringify(error.format(), null, 2));
105
- }
106
- throw error;
107
- }
108
- };
109
- return jsonSchemaClient.promptJson(messages, schema, validator, options);
96
+ const result = await jsonSchemaClient.promptJson(messages, schema, options);
97
+ // We still parse with Zod to ensure the types are correct and to apply any transformations/refinements
98
+ // that JSON Schema might not capture perfectly, or just to get the typed return.
99
+ // Note: If this fails, it won't trigger a retry in promptJson anymore, because promptJson only retries on AJV errors.
100
+ return dataExtractionSchema.parse(result);
110
101
  }
111
102
  async function isPromptZodCached(arg1, arg2, arg3, arg4) {
112
103
  const { messages, dataExtractionSchema, options } = normalizeZodArgs(arg1, arg2, arg3, arg4);
@@ -14,7 +14,7 @@ export declare function createLlm(params: CreateLlmFactoryParams): {
14
14
  <T extends import("zod").ZodType>(prompt: string, schema: T, options?: import("./createZodLlmClient.js").ZodLlmClientOptions): Promise<boolean>;
15
15
  <T extends import("zod").ZodType>(mainInstruction: string, userMessagePayload: string | import("openai/resources/index.js").ChatCompletionContentPart[], dataExtractionSchema: T, options?: import("./createZodLlmClient.js").ZodLlmClientOptions): Promise<boolean>;
16
16
  };
17
- promptJson: <T>(messages: import("openai/resources/index.js").ChatCompletionMessageParam[], schema: Record<string, any>, validator: (data: any) => T, options?: import("./createJsonSchemaLlmClient.js").JsonSchemaLlmClientOptions) => Promise<T>;
17
+ promptJson: <T>(messages: import("openai/resources/index.js").ChatCompletionMessageParam[], schema: Record<string, any>, options?: import("./createJsonSchemaLlmClient.js").JsonSchemaLlmClientOptions) => Promise<T>;
18
18
  isPromptJsonCached: (messages: import("openai/resources/index.js").ChatCompletionMessageParam[], schema: Record<string, any>, options?: import("./createJsonSchemaLlmClient.js").JsonSchemaLlmClientOptions) => Promise<boolean>;
19
19
  promptRetry: {
20
20
  <T = import("openai/resources/index.js").ChatCompletion>(content: string, options?: Omit<import("./createLlmRetryClient.js").LlmRetryOptions<T>, "messages">): Promise<T>;
@@ -45,3 +45,4 @@ export declare function createLlm(params: CreateLlmFactoryParams): {
45
45
  (options: import("./createLlmClient.js").LlmPromptOptions): Promise<Buffer>;
46
46
  };
47
47
  };
48
+ export type LlmClient = ReturnType<typeof createLlm>;
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "llm-fns",
3
- "version": "1.0.8",
3
+ "version": "1.0.10",
4
4
  "description": "",
5
5
  "main": "dist/index.js",
6
6
  "types": "dist/index.d.ts",
@@ -11,6 +11,7 @@
11
11
  "author": "",
12
12
  "license": "MIT",
13
13
  "dependencies": {
14
+ "ajv": "^8.17.1",
14
15
  "openai": "^6.9.1",
15
16
  "zod": "^4.1.13"
16
17
  },
package/readme.md CHANGED
@@ -7,7 +7,7 @@ Designed for power users who need to switch between simple string prompts and co
7
7
  ## Installation
8
8
 
9
9
  ```bash
10
- npm install openai zod cache-manager p-queue
10
+ npm install openai zod cache-manager p-queue ajv
11
11
  ```
12
12
 
13
13
  ## Quick Start (Factory)
@@ -122,7 +122,7 @@ This is a high-level wrapper that employs a **Re-asking Loop**. If the LLM outpu
122
122
  **Return Type:** `Promise<T>`
123
123
 
124
124
  ### Level 1: Raw JSON Schema (`promptJson`)
125
- Use this if you have a standard JSON Schema object (e.g. from another library or API) and don't want to use Zod.
125
+ Use this if you have a standard JSON Schema object (e.g. from another library or API) and don't want to use Zod. It uses **AJV** internally to validate the response against the schema.
126
126
 
127
127
  ```typescript
128
128
  const MySchema = {
@@ -137,8 +137,7 @@ const MySchema = {
137
137
 
138
138
  const result = await llm.promptJson(
139
139
  [{ role: "user", content: "I love this!" }],
140
- MySchema,
141
- (data) => data // Optional validator function
140
+ MySchema
142
141
  );
143
142
  ```
144
143