llm-fns 1.0.11 → 1.0.16
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/createCachedFetcher.d.ts +45 -0
- package/dist/createCachedFetcher.js +175 -0
- package/dist/createJsonSchemaLlmClient.d.ts +1 -3
- package/dist/createJsonSchemaLlmClient.js +2 -11
- package/dist/createLlmClient.d.ts +3 -11
- package/dist/createLlmClient.js +11 -54
- package/dist/createZodLlmClient.d.ts +0 -6
- package/dist/createZodLlmClient.js +1 -8
- package/dist/index.d.ts +1 -0
- package/dist/index.js +1 -0
- package/dist/llmFactory.d.ts +0 -11
- package/dist/llmFactory.js +1 -2
- package/package.json +3 -1
|
@@ -0,0 +1,45 @@
|
|
|
1
|
+
export interface CacheLike {
|
|
2
|
+
get<T>(key: string): Promise<T | undefined | null>;
|
|
3
|
+
set(key: string, value: any, ttl?: number): Promise<any>;
|
|
4
|
+
}
|
|
5
|
+
export type FetcherOptions = RequestInit & {
|
|
6
|
+
/** Optional TTL override for this specific request, in milliseconds. */
|
|
7
|
+
ttl?: number;
|
|
8
|
+
};
|
|
9
|
+
export type Fetcher = (url: string | URL | Request, options?: FetcherOptions) => Promise<Response>;
|
|
10
|
+
export interface CreateFetcherDependencies {
|
|
11
|
+
/** The cache instance (e.g., from cache-manager). */
|
|
12
|
+
cache?: CacheLike;
|
|
13
|
+
/** A prefix for all cache keys to avoid collisions. Defaults to 'http-cache'. */
|
|
14
|
+
prefix?: string;
|
|
15
|
+
/** Time-to-live for cache entries, in milliseconds. */
|
|
16
|
+
ttl?: number;
|
|
17
|
+
/** Request timeout in milliseconds. If not provided, no timeout is applied.**Restoring Corrected File**
|
|
18
|
+
|
|
19
|
+
I'm now generating the corrected version of `src/createCachedFetcher.ts`. The primary fix is removing the extraneous text from the `set` method signature within the `CacheLike` interface. I've ensured the syntax is correct, and I'm confident the test run should now pass. After this is output, I plan to assess its integration within the wider project.
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
*/
|
|
23
|
+
timeout?: number;
|
|
24
|
+
/** User-Agent string for requests. */
|
|
25
|
+
userAgent?: string;
|
|
26
|
+
/** Optional custom fetch implementation. Defaults to global fetch. */
|
|
27
|
+
fetch?: (url: string | URL | Request, init?: RequestInit) => Promise<Response>;
|
|
28
|
+
/**
|
|
29
|
+
* Optional callback to determine if a response should be cached.
|
|
30
|
+
* It receives a cloned response that can be read (e.g. .json()).
|
|
31
|
+
* If it returns false, the response is not cached.
|
|
32
|
+
*/
|
|
33
|
+
shouldCache?: (response: Response) => Promise<boolean> | boolean;
|
|
34
|
+
}
|
|
35
|
+
export declare class CachedResponse extends Response {
|
|
36
|
+
#private;
|
|
37
|
+
constructor(body: BodyInit | null, init: ResponseInit, finalUrl: string);
|
|
38
|
+
get url(): string;
|
|
39
|
+
}
|
|
40
|
+
/**
|
|
41
|
+
* Factory function that creates a `fetch` replacement with a caching layer.
|
|
42
|
+
* @param deps - Dependencies including the cache instance, prefix, TTL, and timeout.
|
|
43
|
+
* @returns A function with the same signature as native `fetch`.
|
|
44
|
+
*/
|
|
45
|
+
export declare function createCachedFetcher(deps: CreateFetcherDependencies): Fetcher;
|
|
@@ -0,0 +1,175 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
// src/createCachedFetcher.ts
|
|
3
|
+
var __importDefault = (this && this.__importDefault) || function (mod) {
|
|
4
|
+
return (mod && mod.__esModule) ? mod : { "default": mod };
|
|
5
|
+
};
|
|
6
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
7
|
+
exports.CachedResponse = void 0;
|
|
8
|
+
exports.createCachedFetcher = createCachedFetcher;
|
|
9
|
+
const crypto_1 = __importDefault(require("crypto"));
|
|
10
|
+
// A custom Response class to correctly handle the `.url` property on cache HITs.
|
|
11
|
+
// This is an implementation detail and doesn't need to be exported.
|
|
12
|
+
class CachedResponse extends Response {
|
|
13
|
+
#finalUrl;
|
|
14
|
+
constructor(body, init, finalUrl) {
|
|
15
|
+
super(body, init);
|
|
16
|
+
this.#finalUrl = finalUrl;
|
|
17
|
+
}
|
|
18
|
+
// Override the read-only `url` property
|
|
19
|
+
get url() {
|
|
20
|
+
return this.#finalUrl;
|
|
21
|
+
}
|
|
22
|
+
}
|
|
23
|
+
exports.CachedResponse = CachedResponse;
|
|
24
|
+
/**
|
|
25
|
+
* Factory function that creates a `fetch` replacement with a caching layer.
|
|
26
|
+
* @param deps - Dependencies including the cache instance, prefix, TTL, and timeout.
|
|
27
|
+
* @returns A function with the same signature as native `fetch`.
|
|
28
|
+
*/
|
|
29
|
+
function createCachedFetcher(deps) {
|
|
30
|
+
const { cache, prefix = 'http-cache', ttl, timeout, userAgent, fetch: customFetch, shouldCache } = deps;
|
|
31
|
+
const fetchImpl = customFetch ?? fetch;
|
|
32
|
+
const fetchWithTimeout = async (url, options) => {
|
|
33
|
+
// Correctly merge headers using Headers API to handle various input formats (plain object, Headers instance, array)
|
|
34
|
+
// and avoid issues with spreading Headers objects which can lead to lost headers or Symbol errors.
|
|
35
|
+
const headers = new Headers(options?.headers);
|
|
36
|
+
if (userAgent) {
|
|
37
|
+
headers.set('User-Agent', userAgent);
|
|
38
|
+
}
|
|
39
|
+
const finalOptions = {
|
|
40
|
+
...options,
|
|
41
|
+
headers,
|
|
42
|
+
};
|
|
43
|
+
if (!timeout) {
|
|
44
|
+
try {
|
|
45
|
+
return await fetchImpl(url, finalOptions);
|
|
46
|
+
}
|
|
47
|
+
catch (error) {
|
|
48
|
+
throw error;
|
|
49
|
+
}
|
|
50
|
+
}
|
|
51
|
+
const controller = new AbortController();
|
|
52
|
+
const timeoutId = setTimeout(() => {
|
|
53
|
+
const urlString = typeof url === 'string' ? url : url.toString();
|
|
54
|
+
console.log(`[Fetch Timeout] Request timed out after ${timeout}ms for: ${urlString}`);
|
|
55
|
+
controller.abort();
|
|
56
|
+
}, timeout);
|
|
57
|
+
finalOptions.signal = controller.signal;
|
|
58
|
+
try {
|
|
59
|
+
const response = await fetchImpl(url, finalOptions);
|
|
60
|
+
return response;
|
|
61
|
+
}
|
|
62
|
+
catch (error) {
|
|
63
|
+
if (error instanceof Error && error.name === 'AbortError') {
|
|
64
|
+
const urlString = typeof url === 'string' ? url : url.toString();
|
|
65
|
+
throw new Error(`Request to ${urlString} timed out after ${timeout}ms`);
|
|
66
|
+
}
|
|
67
|
+
throw error;
|
|
68
|
+
}
|
|
69
|
+
finally {
|
|
70
|
+
clearTimeout(timeoutId);
|
|
71
|
+
}
|
|
72
|
+
};
|
|
73
|
+
// This is the actual fetcher implementation, returned by the factory.
|
|
74
|
+
// It "closes over" the dependencies provided to the factory.
|
|
75
|
+
return async (url, options) => {
|
|
76
|
+
// Determine the request method. Default to GET for fetch.
|
|
77
|
+
let method = 'GET';
|
|
78
|
+
if (options?.method) {
|
|
79
|
+
method = options.method;
|
|
80
|
+
}
|
|
81
|
+
else if (url instanceof Request) {
|
|
82
|
+
method = url.method;
|
|
83
|
+
}
|
|
84
|
+
const urlString = typeof url === 'string' ? url : url.toString();
|
|
85
|
+
if (!cache) {
|
|
86
|
+
console.log(`[Cache SKIP] Cache not configured for request to: ${urlString}`);
|
|
87
|
+
return fetchWithTimeout(url, options);
|
|
88
|
+
}
|
|
89
|
+
let cacheKey = `${prefix}:${urlString}`;
|
|
90
|
+
// If POST (or others with body), append hash of body to cache key
|
|
91
|
+
if (method.toUpperCase() === 'POST' && options?.body) {
|
|
92
|
+
let bodyStr = '';
|
|
93
|
+
if (typeof options.body === 'string') {
|
|
94
|
+
bodyStr = options.body;
|
|
95
|
+
}
|
|
96
|
+
else if (options.body instanceof URLSearchParams) {
|
|
97
|
+
bodyStr = options.body.toString();
|
|
98
|
+
}
|
|
99
|
+
else {
|
|
100
|
+
// Fallback for other types, though mostly we expect string/JSON here
|
|
101
|
+
try {
|
|
102
|
+
bodyStr = JSON.stringify(options.body);
|
|
103
|
+
}
|
|
104
|
+
catch (e) {
|
|
105
|
+
bodyStr = 'unserializable';
|
|
106
|
+
}
|
|
107
|
+
}
|
|
108
|
+
const hash = crypto_1.default.createHash('md5').update(bodyStr).digest('hex');
|
|
109
|
+
cacheKey += `:${hash}`;
|
|
110
|
+
}
|
|
111
|
+
// 1. Check the cache
|
|
112
|
+
const cachedItem = await cache.get(cacheKey);
|
|
113
|
+
if (cachedItem) {
|
|
114
|
+
// Decode the base64 body back into a Buffer.
|
|
115
|
+
const body = Buffer.from(cachedItem.bodyBase64, 'base64');
|
|
116
|
+
return new CachedResponse(body, {
|
|
117
|
+
status: cachedItem.status,
|
|
118
|
+
headers: cachedItem.headers,
|
|
119
|
+
}, cachedItem.finalUrl);
|
|
120
|
+
}
|
|
121
|
+
// 2. Perform the actual fetch if not in cache
|
|
122
|
+
const fetchAndCache = async () => {
|
|
123
|
+
const response = await fetchWithTimeout(url, options);
|
|
124
|
+
// 3. Store in cache on success
|
|
125
|
+
if (response.ok) {
|
|
126
|
+
let isCacheable = true;
|
|
127
|
+
if (shouldCache) {
|
|
128
|
+
const checkClone = response.clone();
|
|
129
|
+
try {
|
|
130
|
+
isCacheable = await shouldCache(checkClone);
|
|
131
|
+
}
|
|
132
|
+
catch (e) {
|
|
133
|
+
console.warn('[Cache Check Error] shouldCache threw an error, skipping cache', e);
|
|
134
|
+
isCacheable = false;
|
|
135
|
+
}
|
|
136
|
+
}
|
|
137
|
+
else {
|
|
138
|
+
// Default behavior: check for .error in JSON responses
|
|
139
|
+
const contentType = response.headers.get('content-type');
|
|
140
|
+
if (contentType && contentType.includes('application/json')) {
|
|
141
|
+
const checkClone = response.clone();
|
|
142
|
+
try {
|
|
143
|
+
const body = await checkClone.json();
|
|
144
|
+
if (body && typeof body === 'object' && 'error' in body) {
|
|
145
|
+
console.log(`[Cache SKIP] JSON response contains .error property for: ${urlString}`);
|
|
146
|
+
isCacheable = false;
|
|
147
|
+
}
|
|
148
|
+
}
|
|
149
|
+
catch (e) {
|
|
150
|
+
// Ignore JSON parse errors, assume cacheable if status is OK
|
|
151
|
+
}
|
|
152
|
+
}
|
|
153
|
+
}
|
|
154
|
+
if (isCacheable) {
|
|
155
|
+
const responseClone = response.clone();
|
|
156
|
+
const bodyBuffer = await responseClone.arrayBuffer();
|
|
157
|
+
// Convert ArrayBuffer to a base64 string for safe JSON serialization.
|
|
158
|
+
const bodyBase64 = Buffer.from(bodyBuffer).toString('base64');
|
|
159
|
+
const headers = Object.fromEntries(response.headers.entries());
|
|
160
|
+
const itemToCache = {
|
|
161
|
+
bodyBase64,
|
|
162
|
+
headers,
|
|
163
|
+
status: response.status,
|
|
164
|
+
finalUrl: response.url,
|
|
165
|
+
};
|
|
166
|
+
await cache.set(cacheKey, itemToCache, options?.ttl ?? ttl);
|
|
167
|
+
console.log(`[Cache SET] for: ${cacheKey}`);
|
|
168
|
+
}
|
|
169
|
+
}
|
|
170
|
+
// 4. Return the original response
|
|
171
|
+
return response;
|
|
172
|
+
};
|
|
173
|
+
return fetchAndCache();
|
|
174
|
+
};
|
|
175
|
+
}
|
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
import OpenAI from 'openai';
|
|
2
|
-
import { PromptFunction, LlmPromptOptions
|
|
2
|
+
import { PromptFunction, LlmPromptOptions } from "./createLlmClient.js";
|
|
3
3
|
export type JsonSchemaLlmClientOptions = Omit<LlmPromptOptions, 'messages' | 'response_format'> & {
|
|
4
4
|
maxRetries?: number;
|
|
5
5
|
/**
|
|
@@ -25,13 +25,11 @@ export type JsonSchemaLlmClientOptions = Omit<LlmPromptOptions, 'messages' | 're
|
|
|
25
25
|
};
|
|
26
26
|
export interface CreateJsonSchemaLlmClientParams {
|
|
27
27
|
prompt: PromptFunction;
|
|
28
|
-
isPromptCached: IsPromptCachedFunction;
|
|
29
28
|
fallbackPrompt?: PromptFunction;
|
|
30
29
|
disableJsonFixer?: boolean;
|
|
31
30
|
}
|
|
32
31
|
export declare function createJsonSchemaLlmClient(params: CreateJsonSchemaLlmClientParams): {
|
|
33
32
|
promptJson: <T>(messages: OpenAI.Chat.Completions.ChatCompletionMessageParam[], schema: Record<string, any>, options?: JsonSchemaLlmClientOptions) => Promise<T>;
|
|
34
|
-
isPromptJsonCached: (messages: OpenAI.Chat.Completions.ChatCompletionMessageParam[], schema: Record<string, any>, options?: JsonSchemaLlmClientOptions) => Promise<boolean>;
|
|
35
33
|
};
|
|
36
34
|
export type JsonSchemaClient = ReturnType<typeof createJsonSchemaLlmClient>;
|
|
37
35
|
export type PromptJsonFunction = JsonSchemaClient['promptJson'];
|
|
@@ -7,7 +7,7 @@ exports.createJsonSchemaLlmClient = createJsonSchemaLlmClient;
|
|
|
7
7
|
const ajv_1 = __importDefault(require("ajv"));
|
|
8
8
|
const createLlmRetryClient_js_1 = require("./createLlmRetryClient.js");
|
|
9
9
|
function createJsonSchemaLlmClient(params) {
|
|
10
|
-
const { prompt,
|
|
10
|
+
const { prompt, fallbackPrompt, disableJsonFixer = false } = params;
|
|
11
11
|
const llmRetryClient = (0, createLlmRetryClient_js_1.createLlmRetryClient)({ prompt, fallbackPrompt });
|
|
12
12
|
const ajv = new ajv_1.default({ strict: false }); // Initialize AJV
|
|
13
13
|
async function _tryToFixJson(brokenResponse, schemaJsonString, errorDetails, options) {
|
|
@@ -198,14 +198,5 @@ The response was valid JSON but did not conform to the required schema. Please r
|
|
|
198
198
|
};
|
|
199
199
|
return llmRetryClient.promptTextRetry(retryOptions);
|
|
200
200
|
}
|
|
201
|
-
|
|
202
|
-
const { finalMessages, response_format } = _getJsonPromptConfig(messages, schema, options);
|
|
203
|
-
const { maxRetries, useResponseFormat: _u, beforeValidation, validator, ...restOptions } = options || {};
|
|
204
|
-
return isPromptCached({
|
|
205
|
-
messages: finalMessages,
|
|
206
|
-
response_format,
|
|
207
|
-
...restOptions
|
|
208
|
-
});
|
|
209
|
-
}
|
|
210
|
-
return { promptJson, isPromptJsonCached };
|
|
201
|
+
return { promptJson };
|
|
211
202
|
}
|
|
@@ -1,5 +1,4 @@
|
|
|
1
1
|
import OpenAI from "openai";
|
|
2
|
-
import type { Cache } from 'cache-manager';
|
|
3
2
|
import type PQueue from 'p-queue';
|
|
4
3
|
export declare function countChars(message: OpenAI.Chat.Completions.ChatCompletionMessageParam): number;
|
|
5
4
|
export declare function truncateSingleMessage(message: OpenAI.Chat.Completions.ChatCompletionMessageParam, charLimit: number): OpenAI.Chat.Completions.ChatCompletionMessageParam;
|
|
@@ -29,7 +28,6 @@ export type OpenRouterResponseFormat = {
|
|
|
29
28
|
export interface LlmPromptOptions extends Omit<OpenAI.Chat.Completions.ChatCompletionCreateParamsNonStreaming, 'model' | 'response_format' | 'modalities' | 'messages'> {
|
|
30
29
|
messages: string | OpenAI.Chat.Completions.ChatCompletionMessageParam[];
|
|
31
30
|
model?: ModelConfig;
|
|
32
|
-
ttl?: number;
|
|
33
31
|
retries?: number;
|
|
34
32
|
/** @deprecated Use `reasoning` object instead. */
|
|
35
33
|
response_format?: OpenRouterResponseFormat;
|
|
@@ -44,26 +42,21 @@ export interface LlmPromptOptions extends Omit<OpenAI.Chat.Completions.ChatCompl
|
|
|
44
42
|
*/
|
|
45
43
|
export interface CreateLlmClientParams {
|
|
46
44
|
openai: OpenAI;
|
|
47
|
-
cache?: Cache;
|
|
48
45
|
defaultModel: ModelConfig;
|
|
49
46
|
maxConversationChars?: number;
|
|
50
47
|
queue?: PQueue;
|
|
51
48
|
}
|
|
52
49
|
export declare function normalizeOptions(arg1: string | LlmPromptOptions, arg2?: Omit<LlmPromptOptions, 'messages'>): LlmPromptOptions;
|
|
53
50
|
/**
|
|
54
|
-
* Factory function that creates a GPT "prompt" function
|
|
55
|
-
* @param params - The core dependencies (API key, base URL, default model
|
|
56
|
-
* @returns An async function `prompt` ready to make OpenAI calls
|
|
51
|
+
* Factory function that creates a GPT "prompt" function.
|
|
52
|
+
* @param params - The core dependencies (API key, base URL, default model).
|
|
53
|
+
* @returns An async function `prompt` ready to make OpenAI calls.
|
|
57
54
|
*/
|
|
58
55
|
export declare function createLlmClient(params: CreateLlmClientParams): {
|
|
59
56
|
prompt: {
|
|
60
57
|
(content: string, options?: Omit<LlmPromptOptions, "messages">): Promise<OpenAI.Chat.Completions.ChatCompletion>;
|
|
61
58
|
(options: LlmPromptOptions): Promise<OpenAI.Chat.Completions.ChatCompletion>;
|
|
62
59
|
};
|
|
63
|
-
isPromptCached: {
|
|
64
|
-
(content: string, options?: Omit<LlmPromptOptions, "messages">): Promise<boolean>;
|
|
65
|
-
(options: LlmPromptOptions): Promise<boolean>;
|
|
66
|
-
};
|
|
67
60
|
promptText: {
|
|
68
61
|
(content: string, options?: Omit<LlmPromptOptions, "messages">): Promise<string>;
|
|
69
62
|
(options: LlmPromptOptions): Promise<string>;
|
|
@@ -74,6 +67,5 @@ export declare function createLlmClient(params: CreateLlmClientParams): {
|
|
|
74
67
|
};
|
|
75
68
|
};
|
|
76
69
|
export type PromptFunction = ReturnType<typeof createLlmClient>['prompt'];
|
|
77
|
-
export type IsPromptCachedFunction = ReturnType<typeof createLlmClient>['isPromptCached'];
|
|
78
70
|
export type PromptTextFunction = ReturnType<typeof createLlmClient>['promptText'];
|
|
79
71
|
export type PromptImageFunction = ReturnType<typeof createLlmClient>['promptImage'];
|
package/dist/createLlmClient.js
CHANGED
|
@@ -1,14 +1,10 @@
|
|
|
1
1
|
"use strict";
|
|
2
|
-
var __importDefault = (this && this.__importDefault) || function (mod) {
|
|
3
|
-
return (mod && mod.__esModule) ? mod : { "default": mod };
|
|
4
|
-
};
|
|
5
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
6
3
|
exports.countChars = countChars;
|
|
7
4
|
exports.truncateSingleMessage = truncateSingleMessage;
|
|
8
5
|
exports.truncateMessages = truncateMessages;
|
|
9
6
|
exports.normalizeOptions = normalizeOptions;
|
|
10
7
|
exports.createLlmClient = createLlmClient;
|
|
11
|
-
const crypto_1 = __importDefault(require("crypto"));
|
|
12
8
|
const retryUtils_js_1 = require("./retryUtils.js");
|
|
13
9
|
function countChars(message) {
|
|
14
10
|
if (!message.content)
|
|
@@ -170,14 +166,14 @@ function normalizeOptions(arg1, arg2) {
|
|
|
170
166
|
return options;
|
|
171
167
|
}
|
|
172
168
|
/**
|
|
173
|
-
* Factory function that creates a GPT "prompt" function
|
|
174
|
-
* @param params - The core dependencies (API key, base URL, default model
|
|
175
|
-
* @returns An async function `prompt` ready to make OpenAI calls
|
|
169
|
+
* Factory function that creates a GPT "prompt" function.
|
|
170
|
+
* @param params - The core dependencies (API key, base URL, default model).
|
|
171
|
+
* @returns An async function `prompt` ready to make OpenAI calls.
|
|
176
172
|
*/
|
|
177
173
|
function createLlmClient(params) {
|
|
178
|
-
const { openai,
|
|
179
|
-
const
|
|
180
|
-
const {
|
|
174
|
+
const { openai, defaultModel: factoryDefaultModel, maxConversationChars, queue } = params;
|
|
175
|
+
const getCompletionParams = (options) => {
|
|
176
|
+
const { model: callSpecificModel, messages, reasoning_effort, retries, ...restApiOptions } = options;
|
|
181
177
|
// Ensure messages is an array (it should be if normalized, but for safety/types)
|
|
182
178
|
const messagesArray = typeof messages === 'string'
|
|
183
179
|
? [{ role: 'user', content: messages }]
|
|
@@ -200,29 +196,13 @@ function createLlmClient(params) {
|
|
|
200
196
|
messages: finalMessages,
|
|
201
197
|
...restApiOptions,
|
|
202
198
|
};
|
|
203
|
-
|
|
204
|
-
if (cacheInstance) {
|
|
205
|
-
const cacheKeyString = JSON.stringify(completionParams);
|
|
206
|
-
cacheKey = `gptask:${crypto_1.default.createHash('md5').update(cacheKeyString).digest('hex')}`;
|
|
207
|
-
}
|
|
208
|
-
return { completionParams, cacheKey, ttl, modelToUse, finalMessages, retries };
|
|
199
|
+
return { completionParams, modelToUse, finalMessages, retries };
|
|
209
200
|
};
|
|
210
201
|
async function prompt(arg1, arg2) {
|
|
211
202
|
const options = normalizeOptions(arg1, arg2);
|
|
212
|
-
const { completionParams,
|
|
213
|
-
if (cacheInstance && cacheKey) {
|
|
214
|
-
try {
|
|
215
|
-
const cachedResponse = await cacheInstance.get(cacheKey);
|
|
216
|
-
if (cachedResponse !== undefined && cachedResponse !== null) {
|
|
217
|
-
return JSON.parse(cachedResponse);
|
|
218
|
-
}
|
|
219
|
-
}
|
|
220
|
-
catch (error) {
|
|
221
|
-
console.warn("Cache get error:", error);
|
|
222
|
-
}
|
|
223
|
-
}
|
|
203
|
+
const { completionParams, finalMessages, retries } = getCompletionParams(options);
|
|
224
204
|
const promptSummary = getPromptSummary(finalMessages);
|
|
225
|
-
const
|
|
205
|
+
const apiCall = async () => {
|
|
226
206
|
const task = () => (0, retryUtils_js_1.executeWithRetry)(async () => {
|
|
227
207
|
return openai.chat.completions.create(completionParams);
|
|
228
208
|
}, async (completion) => {
|
|
@@ -235,32 +215,9 @@ function createLlmClient(params) {
|
|
|
235
215
|
return true;
|
|
236
216
|
});
|
|
237
217
|
const response = (await (queue ? queue.add(task, { id: promptSummary, messages: finalMessages }) : task()));
|
|
238
|
-
if (cacheInstance && response && cacheKey) {
|
|
239
|
-
try {
|
|
240
|
-
await cacheInstance.set(cacheKey, JSON.stringify(response), ttl);
|
|
241
|
-
}
|
|
242
|
-
catch (error) {
|
|
243
|
-
console.warn("Cache set error:", error);
|
|
244
|
-
}
|
|
245
|
-
}
|
|
246
218
|
return response;
|
|
247
219
|
};
|
|
248
|
-
return
|
|
249
|
-
}
|
|
250
|
-
async function isPromptCached(arg1, arg2) {
|
|
251
|
-
const options = normalizeOptions(arg1, arg2);
|
|
252
|
-
const { cacheKey } = getCompletionParamsAndCacheKey(options);
|
|
253
|
-
if (!cacheInstance || !cacheKey) {
|
|
254
|
-
return false;
|
|
255
|
-
}
|
|
256
|
-
try {
|
|
257
|
-
const cachedResponse = await cacheInstance.get(cacheKey);
|
|
258
|
-
return cachedResponse !== undefined && cachedResponse !== null;
|
|
259
|
-
}
|
|
260
|
-
catch (error) {
|
|
261
|
-
console.warn("Cache get error:", error);
|
|
262
|
-
return false;
|
|
263
|
-
}
|
|
220
|
+
return apiCall();
|
|
264
221
|
}
|
|
265
222
|
async function promptText(arg1, arg2) {
|
|
266
223
|
const options = normalizeOptions(arg1, arg2);
|
|
@@ -291,5 +248,5 @@ function createLlmClient(params) {
|
|
|
291
248
|
}
|
|
292
249
|
throw new Error("LLM returned no image content.");
|
|
293
250
|
}
|
|
294
|
-
return { prompt,
|
|
251
|
+
return { prompt, promptText, promptImage };
|
|
295
252
|
}
|
|
@@ -19,12 +19,6 @@ export declare function createZodLlmClient(params: CreateZodLlmClientParams): {
|
|
|
19
19
|
<T extends ZodTypeAny>(prompt: string, schema: T, options?: ZodLlmClientOptions): Promise<z.infer<T>>;
|
|
20
20
|
<T extends ZodTypeAny>(mainInstruction: string, userMessagePayload: string | OpenAI.Chat.Completions.ChatCompletionContentPart[], dataExtractionSchema: T, options?: ZodLlmClientOptions): Promise<z.infer<T>>;
|
|
21
21
|
};
|
|
22
|
-
isPromptZodCached: {
|
|
23
|
-
<T extends ZodTypeAny>(schema: T, options?: ZodLlmClientOptions): Promise<boolean>;
|
|
24
|
-
<T extends ZodTypeAny>(messages: OpenAI.Chat.Completions.ChatCompletionMessageParam[], schema: T, options?: ZodLlmClientOptions): Promise<boolean>;
|
|
25
|
-
<T extends ZodTypeAny>(prompt: string, schema: T, options?: ZodLlmClientOptions): Promise<boolean>;
|
|
26
|
-
<T extends ZodTypeAny>(mainInstruction: string, userMessagePayload: string | OpenAI.Chat.Completions.ChatCompletionContentPart[], dataExtractionSchema: T, options?: ZodLlmClientOptions): Promise<boolean>;
|
|
27
|
-
};
|
|
28
22
|
};
|
|
29
23
|
export type ZodLlmClient = ReturnType<typeof createZodLlmClient>;
|
|
30
24
|
export type PromptZodFunction = ZodLlmClient['promptZod'];
|
|
@@ -102,12 +102,5 @@ function createZodLlmClient(params) {
|
|
|
102
102
|
});
|
|
103
103
|
return result;
|
|
104
104
|
}
|
|
105
|
-
|
|
106
|
-
const { messages, dataExtractionSchema, options } = normalizeZodArgs(arg1, arg2, arg3, arg4);
|
|
107
|
-
const schema = z.toJSONSchema(dataExtractionSchema, {
|
|
108
|
-
unrepresentable: 'any'
|
|
109
|
-
});
|
|
110
|
-
return jsonSchemaClient.isPromptJsonCached(messages, schema, options);
|
|
111
|
-
}
|
|
112
|
-
return { promptZod, isPromptZodCached };
|
|
105
|
+
return { promptZod };
|
|
113
106
|
}
|
package/dist/index.d.ts
CHANGED
package/dist/index.js
CHANGED
|
@@ -20,3 +20,4 @@ __exportStar(require("./createZodLlmClient.js"), exports);
|
|
|
20
20
|
__exportStar(require("./createJsonSchemaLlmClient.js"), exports);
|
|
21
21
|
__exportStar(require("./llmFactory.js"), exports);
|
|
22
22
|
__exportStar(require("./retryUtils.js"), exports);
|
|
23
|
+
__exportStar(require("./createCachedFetcher.js"), exports);
|
package/dist/llmFactory.d.ts
CHANGED
|
@@ -8,14 +8,7 @@ export declare function createLlm(params: CreateLlmFactoryParams): {
|
|
|
8
8
|
<T extends import("zod").ZodType>(prompt: string, schema: T, options?: import("./createZodLlmClient.js").ZodLlmClientOptions): Promise<import("zod").infer<T>>;
|
|
9
9
|
<T extends import("zod").ZodType>(mainInstruction: string, userMessagePayload: string | import("openai/resources/index.js").ChatCompletionContentPart[], dataExtractionSchema: T, options?: import("./createZodLlmClient.js").ZodLlmClientOptions): Promise<import("zod").infer<T>>;
|
|
10
10
|
};
|
|
11
|
-
isPromptZodCached: {
|
|
12
|
-
<T extends import("zod").ZodType>(schema: T, options?: import("./createZodLlmClient.js").ZodLlmClientOptions): Promise<boolean>;
|
|
13
|
-
<T extends import("zod").ZodType>(messages: import("openai/resources/index.js").ChatCompletionMessageParam[], schema: T, options?: import("./createZodLlmClient.js").ZodLlmClientOptions): Promise<boolean>;
|
|
14
|
-
<T extends import("zod").ZodType>(prompt: string, schema: T, options?: import("./createZodLlmClient.js").ZodLlmClientOptions): Promise<boolean>;
|
|
15
|
-
<T extends import("zod").ZodType>(mainInstruction: string, userMessagePayload: string | import("openai/resources/index.js").ChatCompletionContentPart[], dataExtractionSchema: T, options?: import("./createZodLlmClient.js").ZodLlmClientOptions): Promise<boolean>;
|
|
16
|
-
};
|
|
17
11
|
promptJson: <T>(messages: import("openai/resources/index.js").ChatCompletionMessageParam[], schema: Record<string, any>, options?: import("./createJsonSchemaLlmClient.js").JsonSchemaLlmClientOptions) => Promise<T>;
|
|
18
|
-
isPromptJsonCached: (messages: import("openai/resources/index.js").ChatCompletionMessageParam[], schema: Record<string, any>, options?: import("./createJsonSchemaLlmClient.js").JsonSchemaLlmClientOptions) => Promise<boolean>;
|
|
19
12
|
promptRetry: {
|
|
20
13
|
<T = import("openai/resources/index.js").ChatCompletion>(content: string, options?: Omit<import("./createLlmRetryClient.js").LlmRetryOptions<T>, "messages">): Promise<T>;
|
|
21
14
|
<T = import("openai/resources/index.js").ChatCompletion>(options: import("./createLlmRetryClient.js").LlmRetryOptions<T>): Promise<T>;
|
|
@@ -32,10 +25,6 @@ export declare function createLlm(params: CreateLlmFactoryParams): {
|
|
|
32
25
|
(content: string, options?: Omit<import("./createLlmClient.js").LlmPromptOptions, "messages">): Promise<import("openai/resources/index.js").ChatCompletion>;
|
|
33
26
|
(options: import("./createLlmClient.js").LlmPromptOptions): Promise<import("openai/resources/index.js").ChatCompletion>;
|
|
34
27
|
};
|
|
35
|
-
isPromptCached: {
|
|
36
|
-
(content: string, options?: Omit<import("./createLlmClient.js").LlmPromptOptions, "messages">): Promise<boolean>;
|
|
37
|
-
(options: import("./createLlmClient.js").LlmPromptOptions): Promise<boolean>;
|
|
38
|
-
};
|
|
39
28
|
promptText: {
|
|
40
29
|
(content: string, options?: Omit<import("./createLlmClient.js").LlmPromptOptions, "messages">): Promise<string>;
|
|
41
30
|
(options: import("./createLlmClient.js").LlmPromptOptions): Promise<string>;
|
package/dist/llmFactory.js
CHANGED
|
@@ -11,8 +11,7 @@ function createLlm(params) {
|
|
|
11
11
|
prompt: baseClient.prompt
|
|
12
12
|
});
|
|
13
13
|
const jsonSchemaClient = (0, createJsonSchemaLlmClient_js_1.createJsonSchemaLlmClient)({
|
|
14
|
-
prompt: baseClient.prompt
|
|
15
|
-
isPromptCached: baseClient.isPromptCached
|
|
14
|
+
prompt: baseClient.prompt
|
|
16
15
|
});
|
|
17
16
|
const zodClient = (0, createZodLlmClient_js_1.createZodLlmClient)({
|
|
18
17
|
jsonSchemaClient
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "llm-fns",
|
|
3
|
-
"version": "1.0.
|
|
3
|
+
"version": "1.0.16",
|
|
4
4
|
"description": "",
|
|
5
5
|
"main": "dist/index.js",
|
|
6
6
|
"types": "dist/index.d.ts",
|
|
@@ -13,6 +13,7 @@
|
|
|
13
13
|
"dependencies": {
|
|
14
14
|
"ajv": "^8.17.1",
|
|
15
15
|
"openai": "^6.9.1",
|
|
16
|
+
"undici": "^7.16.0",
|
|
16
17
|
"zod": "^4.1.13"
|
|
17
18
|
},
|
|
18
19
|
"devDependencies": {
|
|
@@ -20,6 +21,7 @@
|
|
|
20
21
|
"@types/node": "^20.11.0",
|
|
21
22
|
"cache-manager": "^7.2.5",
|
|
22
23
|
"dotenv": "^16.6.1",
|
|
24
|
+
"keyv": "^5.5.5",
|
|
23
25
|
"p-queue": "^9.0.1",
|
|
24
26
|
"typescript": "^5.9.3",
|
|
25
27
|
"vitest": "^1.2.1"
|