llm-fns 1.0.11 → 1.0.12

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,34 @@
1
+ export interface CacheLike {
2
+ get<T>(key: string): Promise<T | undefined | null>;
3
+ set(key: string, value: any, ttl?: number): Promise<any>;
4
+ }
5
+ export type FetcherOptions = RequestInit & {
6
+ /** Optional TTL override for this specific request, in milliseconds. */
7
+ ttl?: number;
8
+ };
9
+ export type Fetcher = (url: string | URL | Request, options?: FetcherOptions) => Promise<Response>;
10
+ export interface CreateFetcherDependencies {
11
+ /** The cache instance (e.g., from cache-manager). */
12
+ cache?: CacheLike;
13
+ /** A prefix for all cache keys to avoid collisions. Defaults to 'http-cache'. */
14
+ prefix?: string;
15
+ /** Time-to-live for cache entries, in milliseconds. */
16
+ ttl?: number;
17
+ /** Request timeout in milliseconds. If not provided, no timeout is applied. */
18
+ timeout?: number;
19
+ /** User-Agent string for requests. */
20
+ userAgent?: string;
21
+ /** Optional custom fetch implementation. Defaults to global fetch. */
22
+ fetch?: (url: string | URL | Request, init?: RequestInit) => Promise<Response>;
23
+ }
24
+ export declare class CachedResponse extends Response {
25
+ #private;
26
+ constructor(body: BodyInit | null, init: ResponseInit, finalUrl: string);
27
+ get url(): string;
28
+ }
29
+ /**
30
+ * Factory function that creates a `fetch` replacement with a caching layer.
31
+ * @param deps - Dependencies including the cache instance, prefix, TTL, and timeout.
32
+ * @returns A function with the same signature as native `fetch`.
33
+ */
34
+ export declare function createCachedFetcher(deps: CreateFetcherDependencies): Fetcher;
@@ -0,0 +1,145 @@
1
+ "use strict";
2
+ // src/createCachedFetcher.ts
3
+ var __importDefault = (this && this.__importDefault) || function (mod) {
4
+ return (mod && mod.__esModule) ? mod : { "default": mod };
5
+ };
6
+ Object.defineProperty(exports, "__esModule", { value: true });
7
+ exports.CachedResponse = void 0;
8
+ exports.createCachedFetcher = createCachedFetcher;
9
+ const crypto_1 = __importDefault(require("crypto"));
10
+ // A custom Response class to correctly handle the `.url` property on cache HITs.
11
+ // This is an implementation detail and doesn't need to be exported.
12
+ class CachedResponse extends Response {
13
+ #finalUrl;
14
+ constructor(body, init, finalUrl) {
15
+ super(body, init);
16
+ this.#finalUrl = finalUrl;
17
+ }
18
+ // Override the read-only `url` property
19
+ get url() {
20
+ return this.#finalUrl;
21
+ }
22
+ }
23
+ exports.CachedResponse = CachedResponse;
24
+ /**
25
+ * Factory function that creates a `fetch` replacement with a caching layer.
26
+ * @param deps - Dependencies including the cache instance, prefix, TTL, and timeout.
27
+ * @returns A function with the same signature as native `fetch`.
28
+ */
29
+ function createCachedFetcher(deps) {
30
+ const { cache, prefix = 'http-cache', ttl, timeout, userAgent, fetch: customFetch } = deps;
31
+ const fetchImpl = customFetch ?? fetch;
32
+ const fetchWithTimeout = async (url, options) => {
33
+ // Correctly merge headers using Headers API to handle various input formats (plain object, Headers instance, array)
34
+ // and avoid issues with spreading Headers objects which can lead to lost headers or Symbol errors.
35
+ const headers = new Headers(options?.headers);
36
+ if (userAgent) {
37
+ headers.set('User-Agent', userAgent);
38
+ }
39
+ const finalOptions = {
40
+ ...options,
41
+ headers,
42
+ };
43
+ if (!timeout) {
44
+ try {
45
+ return await fetchImpl(url, finalOptions);
46
+ }
47
+ catch (error) {
48
+ throw error;
49
+ }
50
+ }
51
+ const controller = new AbortController();
52
+ const timeoutId = setTimeout(() => {
53
+ const urlString = typeof url === 'string' ? url : url.toString();
54
+ console.log(`[Fetch Timeout] Request timed out after ${timeout}ms for: ${urlString}`);
55
+ controller.abort();
56
+ }, timeout);
57
+ finalOptions.signal = controller.signal;
58
+ try {
59
+ const response = await fetchImpl(url, finalOptions);
60
+ return response;
61
+ }
62
+ catch (error) {
63
+ if (error instanceof Error && error.name === 'AbortError') {
64
+ const urlString = typeof url === 'string' ? url : url.toString();
65
+ throw new Error(`Request to ${urlString} timed out after ${timeout}ms`);
66
+ }
67
+ throw error;
68
+ }
69
+ finally {
70
+ clearTimeout(timeoutId);
71
+ }
72
+ };
73
+ // This is the actual fetcher implementation, returned by the factory.
74
+ // It "closes over" the dependencies provided to the factory.
75
+ return async (url, options) => {
76
+ // Determine the request method. Default to GET for fetch.
77
+ let method = 'GET';
78
+ if (options?.method) {
79
+ method = options.method;
80
+ }
81
+ else if (url instanceof Request) {
82
+ method = url.method;
83
+ }
84
+ const urlString = typeof url === 'string' ? url : url.toString();
85
+ if (!cache) {
86
+ console.log(`[Cache SKIP] Cache not configured for request to: ${urlString}`);
87
+ return fetchWithTimeout(url, options);
88
+ }
89
+ let cacheKey = `${prefix}:${urlString}`;
90
+ // If POST (or others with body), append hash of body to cache key
91
+ if (method.toUpperCase() === 'POST' && options?.body) {
92
+ let bodyStr = '';
93
+ if (typeof options.body === 'string') {
94
+ bodyStr = options.body;
95
+ }
96
+ else if (options.body instanceof URLSearchParams) {
97
+ bodyStr = options.body.toString();
98
+ }
99
+ else {
100
+ // Fallback for other types, though mostly we expect string/JSON here
101
+ try {
102
+ bodyStr = JSON.stringify(options.body);
103
+ }
104
+ catch (e) {
105
+ bodyStr = 'unserializable';
106
+ }
107
+ }
108
+ const hash = crypto_1.default.createHash('md5').update(bodyStr).digest('hex');
109
+ cacheKey += `:${hash}`;
110
+ }
111
+ // 1. Check the cache
112
+ const cachedItem = await cache.get(cacheKey);
113
+ if (cachedItem) {
114
+ // Decode the base64 body back into a Buffer.
115
+ const body = Buffer.from(cachedItem.bodyBase64, 'base64');
116
+ return new CachedResponse(body, {
117
+ status: cachedItem.status,
118
+ headers: cachedItem.headers,
119
+ }, cachedItem.finalUrl);
120
+ }
121
+ // 2. Perform the actual fetch if not in cache
122
+ const fetchAndCache = async () => {
123
+ const response = await fetchWithTimeout(url, options);
124
+ // 3. Store in cache on success
125
+ if (response.ok) {
126
+ const responseClone = response.clone();
127
+ const bodyBuffer = await responseClone.arrayBuffer();
128
+ // Convert ArrayBuffer to a base64 string for safe JSON serialization.
129
+ const bodyBase64 = Buffer.from(bodyBuffer).toString('base64');
130
+ const headers = Object.fromEntries(response.headers.entries());
131
+ const itemToCache = {
132
+ bodyBase64,
133
+ headers,
134
+ status: response.status,
135
+ finalUrl: response.url,
136
+ };
137
+ await cache.set(cacheKey, itemToCache, options?.ttl ?? ttl);
138
+ console.log(`[Cache SET] for: ${cacheKey}`);
139
+ }
140
+ // 4. Return the original response
141
+ return response;
142
+ };
143
+ return fetchAndCache();
144
+ };
145
+ }
@@ -1,5 +1,5 @@
1
1
  import OpenAI from 'openai';
2
- import { PromptFunction, LlmPromptOptions, IsPromptCachedFunction } from "./createLlmClient.js";
2
+ import { PromptFunction, LlmPromptOptions } from "./createLlmClient.js";
3
3
  export type JsonSchemaLlmClientOptions = Omit<LlmPromptOptions, 'messages' | 'response_format'> & {
4
4
  maxRetries?: number;
5
5
  /**
@@ -25,13 +25,11 @@ export type JsonSchemaLlmClientOptions = Omit<LlmPromptOptions, 'messages' | 're
25
25
  };
26
26
  export interface CreateJsonSchemaLlmClientParams {
27
27
  prompt: PromptFunction;
28
- isPromptCached: IsPromptCachedFunction;
29
28
  fallbackPrompt?: PromptFunction;
30
29
  disableJsonFixer?: boolean;
31
30
  }
32
31
  export declare function createJsonSchemaLlmClient(params: CreateJsonSchemaLlmClientParams): {
33
32
  promptJson: <T>(messages: OpenAI.Chat.Completions.ChatCompletionMessageParam[], schema: Record<string, any>, options?: JsonSchemaLlmClientOptions) => Promise<T>;
34
- isPromptJsonCached: (messages: OpenAI.Chat.Completions.ChatCompletionMessageParam[], schema: Record<string, any>, options?: JsonSchemaLlmClientOptions) => Promise<boolean>;
35
33
  };
36
34
  export type JsonSchemaClient = ReturnType<typeof createJsonSchemaLlmClient>;
37
35
  export type PromptJsonFunction = JsonSchemaClient['promptJson'];
@@ -7,7 +7,7 @@ exports.createJsonSchemaLlmClient = createJsonSchemaLlmClient;
7
7
  const ajv_1 = __importDefault(require("ajv"));
8
8
  const createLlmRetryClient_js_1 = require("./createLlmRetryClient.js");
9
9
  function createJsonSchemaLlmClient(params) {
10
- const { prompt, isPromptCached, fallbackPrompt, disableJsonFixer = false } = params;
10
+ const { prompt, fallbackPrompt, disableJsonFixer = false } = params;
11
11
  const llmRetryClient = (0, createLlmRetryClient_js_1.createLlmRetryClient)({ prompt, fallbackPrompt });
12
12
  const ajv = new ajv_1.default({ strict: false }); // Initialize AJV
13
13
  async function _tryToFixJson(brokenResponse, schemaJsonString, errorDetails, options) {
@@ -198,14 +198,5 @@ The response was valid JSON but did not conform to the required schema. Please r
198
198
  };
199
199
  return llmRetryClient.promptTextRetry(retryOptions);
200
200
  }
201
- async function isPromptJsonCached(messages, schema, options) {
202
- const { finalMessages, response_format } = _getJsonPromptConfig(messages, schema, options);
203
- const { maxRetries, useResponseFormat: _u, beforeValidation, validator, ...restOptions } = options || {};
204
- return isPromptCached({
205
- messages: finalMessages,
206
- response_format,
207
- ...restOptions
208
- });
209
- }
210
- return { promptJson, isPromptJsonCached };
201
+ return { promptJson };
211
202
  }
@@ -1,5 +1,4 @@
1
1
  import OpenAI from "openai";
2
- import type { Cache } from 'cache-manager';
3
2
  import type PQueue from 'p-queue';
4
3
  export declare function countChars(message: OpenAI.Chat.Completions.ChatCompletionMessageParam): number;
5
4
  export declare function truncateSingleMessage(message: OpenAI.Chat.Completions.ChatCompletionMessageParam, charLimit: number): OpenAI.Chat.Completions.ChatCompletionMessageParam;
@@ -29,7 +28,6 @@ export type OpenRouterResponseFormat = {
29
28
  export interface LlmPromptOptions extends Omit<OpenAI.Chat.Completions.ChatCompletionCreateParamsNonStreaming, 'model' | 'response_format' | 'modalities' | 'messages'> {
30
29
  messages: string | OpenAI.Chat.Completions.ChatCompletionMessageParam[];
31
30
  model?: ModelConfig;
32
- ttl?: number;
33
31
  retries?: number;
34
32
  /** @deprecated Use `reasoning` object instead. */
35
33
  response_format?: OpenRouterResponseFormat;
@@ -44,26 +42,21 @@ export interface LlmPromptOptions extends Omit<OpenAI.Chat.Completions.ChatCompl
44
42
  */
45
43
  export interface CreateLlmClientParams {
46
44
  openai: OpenAI;
47
- cache?: Cache;
48
45
  defaultModel: ModelConfig;
49
46
  maxConversationChars?: number;
50
47
  queue?: PQueue;
51
48
  }
52
49
  export declare function normalizeOptions(arg1: string | LlmPromptOptions, arg2?: Omit<LlmPromptOptions, 'messages'>): LlmPromptOptions;
53
50
  /**
54
- * Factory function that creates a GPT "prompt" function, with optional caching.
55
- * @param params - The core dependencies (API key, base URL, default model, and optional cache instance).
56
- * @returns An async function `prompt` ready to make OpenAI calls, with caching if configured.
51
+ * Factory function that creates a GPT "prompt" function.
52
+ * @param params - The core dependencies (API key, base URL, default model).
53
+ * @returns An async function `prompt` ready to make OpenAI calls.
57
54
  */
58
55
  export declare function createLlmClient(params: CreateLlmClientParams): {
59
56
  prompt: {
60
57
  (content: string, options?: Omit<LlmPromptOptions, "messages">): Promise<OpenAI.Chat.Completions.ChatCompletion>;
61
58
  (options: LlmPromptOptions): Promise<OpenAI.Chat.Completions.ChatCompletion>;
62
59
  };
63
- isPromptCached: {
64
- (content: string, options?: Omit<LlmPromptOptions, "messages">): Promise<boolean>;
65
- (options: LlmPromptOptions): Promise<boolean>;
66
- };
67
60
  promptText: {
68
61
  (content: string, options?: Omit<LlmPromptOptions, "messages">): Promise<string>;
69
62
  (options: LlmPromptOptions): Promise<string>;
@@ -74,6 +67,5 @@ export declare function createLlmClient(params: CreateLlmClientParams): {
74
67
  };
75
68
  };
76
69
  export type PromptFunction = ReturnType<typeof createLlmClient>['prompt'];
77
- export type IsPromptCachedFunction = ReturnType<typeof createLlmClient>['isPromptCached'];
78
70
  export type PromptTextFunction = ReturnType<typeof createLlmClient>['promptText'];
79
71
  export type PromptImageFunction = ReturnType<typeof createLlmClient>['promptImage'];
@@ -1,14 +1,10 @@
1
1
  "use strict";
2
- var __importDefault = (this && this.__importDefault) || function (mod) {
3
- return (mod && mod.__esModule) ? mod : { "default": mod };
4
- };
5
2
  Object.defineProperty(exports, "__esModule", { value: true });
6
3
  exports.countChars = countChars;
7
4
  exports.truncateSingleMessage = truncateSingleMessage;
8
5
  exports.truncateMessages = truncateMessages;
9
6
  exports.normalizeOptions = normalizeOptions;
10
7
  exports.createLlmClient = createLlmClient;
11
- const crypto_1 = __importDefault(require("crypto"));
12
8
  const retryUtils_js_1 = require("./retryUtils.js");
13
9
  function countChars(message) {
14
10
  if (!message.content)
@@ -170,14 +166,14 @@ function normalizeOptions(arg1, arg2) {
170
166
  return options;
171
167
  }
172
168
  /**
173
- * Factory function that creates a GPT "prompt" function, with optional caching.
174
- * @param params - The core dependencies (API key, base URL, default model, and optional cache instance).
175
- * @returns An async function `prompt` ready to make OpenAI calls, with caching if configured.
169
+ * Factory function that creates a GPT "prompt" function.
170
+ * @param params - The core dependencies (API key, base URL, default model).
171
+ * @returns An async function `prompt` ready to make OpenAI calls.
176
172
  */
177
173
  function createLlmClient(params) {
178
- const { openai, cache: cacheInstance, defaultModel: factoryDefaultModel, maxConversationChars, queue } = params;
179
- const getCompletionParamsAndCacheKey = (options) => {
180
- const { ttl, model: callSpecificModel, messages, reasoning_effort, retries, ...restApiOptions } = options;
174
+ const { openai, defaultModel: factoryDefaultModel, maxConversationChars, queue } = params;
175
+ const getCompletionParams = (options) => {
176
+ const { model: callSpecificModel, messages, reasoning_effort, retries, ...restApiOptions } = options;
181
177
  // Ensure messages is an array (it should be if normalized, but for safety/types)
182
178
  const messagesArray = typeof messages === 'string'
183
179
  ? [{ role: 'user', content: messages }]
@@ -200,29 +196,13 @@ function createLlmClient(params) {
200
196
  messages: finalMessages,
201
197
  ...restApiOptions,
202
198
  };
203
- let cacheKey;
204
- if (cacheInstance) {
205
- const cacheKeyString = JSON.stringify(completionParams);
206
- cacheKey = `gptask:${crypto_1.default.createHash('md5').update(cacheKeyString).digest('hex')}`;
207
- }
208
- return { completionParams, cacheKey, ttl, modelToUse, finalMessages, retries };
199
+ return { completionParams, modelToUse, finalMessages, retries };
209
200
  };
210
201
  async function prompt(arg1, arg2) {
211
202
  const options = normalizeOptions(arg1, arg2);
212
- const { completionParams, cacheKey, ttl, modelToUse, finalMessages, retries } = getCompletionParamsAndCacheKey(options);
213
- if (cacheInstance && cacheKey) {
214
- try {
215
- const cachedResponse = await cacheInstance.get(cacheKey);
216
- if (cachedResponse !== undefined && cachedResponse !== null) {
217
- return JSON.parse(cachedResponse);
218
- }
219
- }
220
- catch (error) {
221
- console.warn("Cache get error:", error);
222
- }
223
- }
203
+ const { completionParams, finalMessages, retries } = getCompletionParams(options);
224
204
  const promptSummary = getPromptSummary(finalMessages);
225
- const apiCallAndCache = async () => {
205
+ const apiCall = async () => {
226
206
  const task = () => (0, retryUtils_js_1.executeWithRetry)(async () => {
227
207
  return openai.chat.completions.create(completionParams);
228
208
  }, async (completion) => {
@@ -235,32 +215,9 @@ function createLlmClient(params) {
235
215
  return true;
236
216
  });
237
217
  const response = (await (queue ? queue.add(task, { id: promptSummary, messages: finalMessages }) : task()));
238
- if (cacheInstance && response && cacheKey) {
239
- try {
240
- await cacheInstance.set(cacheKey, JSON.stringify(response), ttl);
241
- }
242
- catch (error) {
243
- console.warn("Cache set error:", error);
244
- }
245
- }
246
218
  return response;
247
219
  };
248
- return apiCallAndCache();
249
- }
250
- async function isPromptCached(arg1, arg2) {
251
- const options = normalizeOptions(arg1, arg2);
252
- const { cacheKey } = getCompletionParamsAndCacheKey(options);
253
- if (!cacheInstance || !cacheKey) {
254
- return false;
255
- }
256
- try {
257
- const cachedResponse = await cacheInstance.get(cacheKey);
258
- return cachedResponse !== undefined && cachedResponse !== null;
259
- }
260
- catch (error) {
261
- console.warn("Cache get error:", error);
262
- return false;
263
- }
220
+ return apiCall();
264
221
  }
265
222
  async function promptText(arg1, arg2) {
266
223
  const options = normalizeOptions(arg1, arg2);
@@ -291,5 +248,5 @@ function createLlmClient(params) {
291
248
  }
292
249
  throw new Error("LLM returned no image content.");
293
250
  }
294
- return { prompt, isPromptCached, promptText, promptImage };
251
+ return { prompt, promptText, promptImage };
295
252
  }
@@ -19,12 +19,6 @@ export declare function createZodLlmClient(params: CreateZodLlmClientParams): {
19
19
  <T extends ZodTypeAny>(prompt: string, schema: T, options?: ZodLlmClientOptions): Promise<z.infer<T>>;
20
20
  <T extends ZodTypeAny>(mainInstruction: string, userMessagePayload: string | OpenAI.Chat.Completions.ChatCompletionContentPart[], dataExtractionSchema: T, options?: ZodLlmClientOptions): Promise<z.infer<T>>;
21
21
  };
22
- isPromptZodCached: {
23
- <T extends ZodTypeAny>(schema: T, options?: ZodLlmClientOptions): Promise<boolean>;
24
- <T extends ZodTypeAny>(messages: OpenAI.Chat.Completions.ChatCompletionMessageParam[], schema: T, options?: ZodLlmClientOptions): Promise<boolean>;
25
- <T extends ZodTypeAny>(prompt: string, schema: T, options?: ZodLlmClientOptions): Promise<boolean>;
26
- <T extends ZodTypeAny>(mainInstruction: string, userMessagePayload: string | OpenAI.Chat.Completions.ChatCompletionContentPart[], dataExtractionSchema: T, options?: ZodLlmClientOptions): Promise<boolean>;
27
- };
28
22
  };
29
23
  export type ZodLlmClient = ReturnType<typeof createZodLlmClient>;
30
24
  export type PromptZodFunction = ZodLlmClient['promptZod'];
@@ -102,12 +102,5 @@ function createZodLlmClient(params) {
102
102
  });
103
103
  return result;
104
104
  }
105
- async function isPromptZodCached(arg1, arg2, arg3, arg4) {
106
- const { messages, dataExtractionSchema, options } = normalizeZodArgs(arg1, arg2, arg3, arg4);
107
- const schema = z.toJSONSchema(dataExtractionSchema, {
108
- unrepresentable: 'any'
109
- });
110
- return jsonSchemaClient.isPromptJsonCached(messages, schema, options);
111
- }
112
- return { promptZod, isPromptZodCached };
105
+ return { promptZod };
113
106
  }
package/dist/index.d.ts CHANGED
@@ -4,3 +4,4 @@ export * from './createZodLlmClient.js';
4
4
  export * from './createJsonSchemaLlmClient.js';
5
5
  export * from './llmFactory.js';
6
6
  export * from './retryUtils.js';
7
+ export * from './createCachedFetcher.js';
package/dist/index.js CHANGED
@@ -20,3 +20,4 @@ __exportStar(require("./createZodLlmClient.js"), exports);
20
20
  __exportStar(require("./createJsonSchemaLlmClient.js"), exports);
21
21
  __exportStar(require("./llmFactory.js"), exports);
22
22
  __exportStar(require("./retryUtils.js"), exports);
23
+ __exportStar(require("./createCachedFetcher.js"), exports);
@@ -8,14 +8,7 @@ export declare function createLlm(params: CreateLlmFactoryParams): {
8
8
  <T extends import("zod").ZodType>(prompt: string, schema: T, options?: import("./createZodLlmClient.js").ZodLlmClientOptions): Promise<import("zod").infer<T>>;
9
9
  <T extends import("zod").ZodType>(mainInstruction: string, userMessagePayload: string | import("openai/resources/index.js").ChatCompletionContentPart[], dataExtractionSchema: T, options?: import("./createZodLlmClient.js").ZodLlmClientOptions): Promise<import("zod").infer<T>>;
10
10
  };
11
- isPromptZodCached: {
12
- <T extends import("zod").ZodType>(schema: T, options?: import("./createZodLlmClient.js").ZodLlmClientOptions): Promise<boolean>;
13
- <T extends import("zod").ZodType>(messages: import("openai/resources/index.js").ChatCompletionMessageParam[], schema: T, options?: import("./createZodLlmClient.js").ZodLlmClientOptions): Promise<boolean>;
14
- <T extends import("zod").ZodType>(prompt: string, schema: T, options?: import("./createZodLlmClient.js").ZodLlmClientOptions): Promise<boolean>;
15
- <T extends import("zod").ZodType>(mainInstruction: string, userMessagePayload: string | import("openai/resources/index.js").ChatCompletionContentPart[], dataExtractionSchema: T, options?: import("./createZodLlmClient.js").ZodLlmClientOptions): Promise<boolean>;
16
- };
17
11
  promptJson: <T>(messages: import("openai/resources/index.js").ChatCompletionMessageParam[], schema: Record<string, any>, options?: import("./createJsonSchemaLlmClient.js").JsonSchemaLlmClientOptions) => Promise<T>;
18
- isPromptJsonCached: (messages: import("openai/resources/index.js").ChatCompletionMessageParam[], schema: Record<string, any>, options?: import("./createJsonSchemaLlmClient.js").JsonSchemaLlmClientOptions) => Promise<boolean>;
19
12
  promptRetry: {
20
13
  <T = import("openai/resources/index.js").ChatCompletion>(content: string, options?: Omit<import("./createLlmRetryClient.js").LlmRetryOptions<T>, "messages">): Promise<T>;
21
14
  <T = import("openai/resources/index.js").ChatCompletion>(options: import("./createLlmRetryClient.js").LlmRetryOptions<T>): Promise<T>;
@@ -32,10 +25,6 @@ export declare function createLlm(params: CreateLlmFactoryParams): {
32
25
  (content: string, options?: Omit<import("./createLlmClient.js").LlmPromptOptions, "messages">): Promise<import("openai/resources/index.js").ChatCompletion>;
33
26
  (options: import("./createLlmClient.js").LlmPromptOptions): Promise<import("openai/resources/index.js").ChatCompletion>;
34
27
  };
35
- isPromptCached: {
36
- (content: string, options?: Omit<import("./createLlmClient.js").LlmPromptOptions, "messages">): Promise<boolean>;
37
- (options: import("./createLlmClient.js").LlmPromptOptions): Promise<boolean>;
38
- };
39
28
  promptText: {
40
29
  (content: string, options?: Omit<import("./createLlmClient.js").LlmPromptOptions, "messages">): Promise<string>;
41
30
  (options: import("./createLlmClient.js").LlmPromptOptions): Promise<string>;
@@ -11,8 +11,7 @@ function createLlm(params) {
11
11
  prompt: baseClient.prompt
12
12
  });
13
13
  const jsonSchemaClient = (0, createJsonSchemaLlmClient_js_1.createJsonSchemaLlmClient)({
14
- prompt: baseClient.prompt,
15
- isPromptCached: baseClient.isPromptCached
14
+ prompt: baseClient.prompt
16
15
  });
17
16
  const zodClient = (0, createZodLlmClient_js_1.createZodLlmClient)({
18
17
  jsonSchemaClient
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "llm-fns",
3
- "version": "1.0.11",
3
+ "version": "1.0.12",
4
4
  "description": "",
5
5
  "main": "dist/index.js",
6
6
  "types": "dist/index.d.ts",
@@ -13,6 +13,7 @@
13
13
  "dependencies": {
14
14
  "ajv": "^8.17.1",
15
15
  "openai": "^6.9.1",
16
+ "undici": "^7.16.0",
16
17
  "zod": "^4.1.13"
17
18
  },
18
19
  "devDependencies": {
@@ -20,6 +21,7 @@
20
21
  "@types/node": "^20.11.0",
21
22
  "cache-manager": "^7.2.5",
22
23
  "dotenv": "^16.6.1",
24
+ "keyv": "^5.5.5",
23
25
  "p-queue": "^9.0.1",
24
26
  "typescript": "^5.9.3",
25
27
  "vitest": "^1.2.1"