llm-fns 1.0.2 → 1.0.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,79 @@
1
+ import OpenAI from "openai";
2
+ import type { Cache } from 'cache-manager';
3
+ import type PQueue from 'p-queue';
4
+ export declare function countChars(message: OpenAI.Chat.Completions.ChatCompletionMessageParam): number;
5
+ export declare function truncateSingleMessage(message: OpenAI.Chat.Completions.ChatCompletionMessageParam, charLimit: number): OpenAI.Chat.Completions.ChatCompletionMessageParam;
6
+ export declare function truncateMessages(messages: OpenAI.Chat.Completions.ChatCompletionMessageParam[], limit: number): OpenAI.Chat.Completions.ChatCompletionMessageParam[];
7
+ /**
8
+ * The response format for OpenAI and OpenRouter.
9
+ * OpenRouter extends this with 'json_schema'.
10
+ */
11
+ export type ModelConfig = string | ({
12
+ model?: string;
13
+ } & Record<string, any>);
14
+ export type OpenRouterResponseFormat = {
15
+ type: 'text' | 'json_object';
16
+ } | {
17
+ type: 'json_schema';
18
+ json_schema: {
19
+ name: string;
20
+ strict?: boolean;
21
+ schema: object;
22
+ };
23
+ };
24
+ /**
25
+ * Options for the individual "prompt" function calls.
26
+ * These can override defaults or add call-specific parameters.
27
+ * 'messages' is a required property, inherited from OpenAI.Chat.Completions.ChatCompletionCreateParamsNonStreaming.
28
+ */
29
+ export interface LlmPromptOptions extends Omit<OpenAI.Chat.Completions.ChatCompletionCreateParamsNonStreaming, 'model' | 'response_format' | 'modalities' | 'messages'> {
30
+ messages: string | OpenAI.Chat.Completions.ChatCompletionMessageParam[];
31
+ model?: ModelConfig;
32
+ ttl?: number;
33
+ retries?: number;
34
+ /** @deprecated Use `reasoning` object instead. */
35
+ response_format?: OpenRouterResponseFormat;
36
+ modalities?: string[];
37
+ image_config?: {
38
+ aspect_ratio?: string;
39
+ };
40
+ }
41
+ /**
42
+ * Options required to create an instance of the LlmClient.
43
+ * These are the core dependencies.
44
+ */
45
+ export interface CreateLlmClientParams {
46
+ openai: OpenAI;
47
+ cache?: Cache;
48
+ defaultModel: ModelConfig;
49
+ maxConversationChars?: number;
50
+ queue?: PQueue;
51
+ }
52
+ export declare function normalizeOptions(arg1: string | LlmPromptOptions, arg2?: Omit<LlmPromptOptions, 'messages'>): LlmPromptOptions;
53
+ /**
54
+ * Factory function that creates a GPT "prompt" function, with optional caching.
55
+ * @param params - The core dependencies (API key, base URL, default model, and optional cache instance).
56
+ * @returns An async function `prompt` ready to make OpenAI calls, with caching if configured.
57
+ */
58
+ export declare function createLlmClient(params: CreateLlmClientParams): {
59
+ prompt: {
60
+ (content: string, options?: Omit<LlmPromptOptions, "messages">): Promise<OpenAI.Chat.Completions.ChatCompletion>;
61
+ (options: LlmPromptOptions): Promise<OpenAI.Chat.Completions.ChatCompletion>;
62
+ };
63
+ isPromptCached: {
64
+ (content: string, options?: Omit<LlmPromptOptions, "messages">): Promise<boolean>;
65
+ (options: LlmPromptOptions): Promise<boolean>;
66
+ };
67
+ promptText: {
68
+ (content: string, options?: Omit<LlmPromptOptions, "messages">): Promise<string>;
69
+ (options: LlmPromptOptions): Promise<string>;
70
+ };
71
+ promptImage: {
72
+ (content: string, options?: Omit<LlmPromptOptions, "messages">): Promise<Buffer>;
73
+ (options: LlmPromptOptions): Promise<Buffer>;
74
+ };
75
+ };
76
+ export type PromptFunction = ReturnType<typeof createLlmClient>['prompt'];
77
+ export type IsPromptCachedFunction = ReturnType<typeof createLlmClient>['isPromptCached'];
78
+ export type PromptTextFunction = ReturnType<typeof createLlmClient>['promptText'];
79
+ export type PromptImageFunction = ReturnType<typeof createLlmClient>['promptImage'];
@@ -1,11 +1,18 @@
1
- import crypto from 'crypto';
2
- import OpenAI from "openai";
3
- import type { Cache } from 'cache-manager'; // Using Cache from cache-manager
4
- import type PQueue from 'p-queue';
5
- import { executeWithRetry } from './retryUtils.js';
6
-
7
- export function countChars(message: OpenAI.Chat.Completions.ChatCompletionMessageParam): number {
8
- if (!message.content) return 0;
1
+ "use strict";
2
+ var __importDefault = (this && this.__importDefault) || function (mod) {
3
+ return (mod && mod.__esModule) ? mod : { "default": mod };
4
+ };
5
+ Object.defineProperty(exports, "__esModule", { value: true });
6
+ exports.countChars = countChars;
7
+ exports.truncateSingleMessage = truncateSingleMessage;
8
+ exports.truncateMessages = truncateMessages;
9
+ exports.normalizeOptions = normalizeOptions;
10
+ exports.createLlmClient = createLlmClient;
11
+ const crypto_1 = __importDefault(require("crypto"));
12
+ const retryUtils_js_1 = require("./retryUtils.js");
13
+ function countChars(message) {
14
+ if (!message.content)
15
+ return 0;
9
16
  if (typeof message.content === 'string') {
10
17
  return message.content.length;
11
18
  }
@@ -22,97 +29,81 @@ export function countChars(message: OpenAI.Chat.Completions.ChatCompletionMessag
22
29
  }
23
30
  return 0;
24
31
  }
25
-
26
- export function truncateSingleMessage(message: OpenAI.Chat.Completions.ChatCompletionMessageParam, charLimit: number): OpenAI.Chat.Completions.ChatCompletionMessageParam {
32
+ function truncateSingleMessage(message, charLimit) {
27
33
  const TRUNCATION_SUFFIX = '...[truncated]';
28
34
  const messageCopy = JSON.parse(JSON.stringify(message));
29
-
30
35
  if (charLimit <= 0) {
31
36
  messageCopy.content = null;
32
37
  return messageCopy;
33
38
  }
34
-
35
39
  if (!messageCopy.content || countChars(messageCopy) <= charLimit) {
36
40
  return messageCopy;
37
41
  }
38
-
39
42
  if (typeof messageCopy.content === 'string') {
40
43
  let newContent = messageCopy.content;
41
44
  if (newContent.length > charLimit) {
42
45
  if (charLimit > TRUNCATION_SUFFIX.length) {
43
46
  newContent = newContent.substring(0, charLimit - TRUNCATION_SUFFIX.length) + TRUNCATION_SUFFIX;
44
- } else {
47
+ }
48
+ else {
45
49
  newContent = newContent.substring(0, charLimit);
46
50
  }
47
51
  }
48
52
  messageCopy.content = newContent;
49
53
  return messageCopy;
50
54
  }
51
-
52
55
  if (Array.isArray(messageCopy.content)) {
53
56
  // Complex case: multipart message.
54
57
  // Strategy: consolidate text, remove images if needed, then truncate text.
55
- const textParts = messageCopy.content.filter((p: any) => p.type === 'text');
56
- const imageParts = messageCopy.content.filter((p: any) => p.type === 'image_url');
57
- let combinedText = textParts.map((p: any) => p.text).join('\n');
58
+ const textParts = messageCopy.content.filter((p) => p.type === 'text');
59
+ const imageParts = messageCopy.content.filter((p) => p.type === 'image_url');
60
+ let combinedText = textParts.map((p) => p.text).join('\n');
58
61
  let keptImages = [...imageParts];
59
-
60
62
  while (combinedText.length + (keptImages.length * 2500) > charLimit && keptImages.length > 0) {
61
63
  keptImages.pop(); // remove images from the end
62
64
  }
63
-
64
65
  const imageChars = keptImages.length * 2500;
65
66
  const textCharLimit = charLimit - imageChars;
66
-
67
67
  if (combinedText.length > textCharLimit) {
68
68
  if (textCharLimit > TRUNCATION_SUFFIX.length) {
69
69
  combinedText = combinedText.substring(0, textCharLimit - TRUNCATION_SUFFIX.length) + TRUNCATION_SUFFIX;
70
- } else if (textCharLimit >= 0) {
70
+ }
71
+ else if (textCharLimit >= 0) {
71
72
  combinedText = combinedText.substring(0, textCharLimit);
72
- } else {
73
+ }
74
+ else {
73
75
  combinedText = "";
74
76
  }
75
77
  }
76
-
77
- const newContent: OpenAI.Chat.Completions.ChatCompletionContentPart[] = [];
78
+ const newContent = [];
78
79
  if (combinedText) {
79
80
  newContent.push({ type: 'text', text: combinedText });
80
81
  }
81
82
  newContent.push(...keptImages);
82
83
  messageCopy.content = newContent;
83
84
  }
84
-
85
85
  return messageCopy;
86
86
  }
87
-
88
-
89
- export function truncateMessages(messages: OpenAI.Chat.Completions.ChatCompletionMessageParam[], limit: number): OpenAI.Chat.Completions.ChatCompletionMessageParam[] {
87
+ function truncateMessages(messages, limit) {
90
88
  const systemMessage = messages.find(m => m.role === 'system');
91
89
  const otherMessages = messages.filter(m => m.role !== 'system');
92
-
93
- let totalChars = otherMessages.reduce((sum: number, msg) => sum + countChars(msg), 0);
94
-
90
+ let totalChars = otherMessages.reduce((sum, msg) => sum + countChars(msg), 0);
95
91
  if (totalChars <= limit) {
96
92
  return messages;
97
93
  }
98
-
99
- const mutableOtherMessages: OpenAI.Chat.Completions.ChatCompletionMessageParam[] = JSON.parse(JSON.stringify(otherMessages));
94
+ const mutableOtherMessages = JSON.parse(JSON.stringify(otherMessages));
100
95
  let excessChars = totalChars - limit;
101
-
102
96
  // Truncate messages starting from the second one.
103
97
  for (let i = 1; i < mutableOtherMessages.length; i++) {
104
- if (excessChars <= 0) break;
105
-
98
+ if (excessChars <= 0)
99
+ break;
106
100
  const message = mutableOtherMessages[i];
107
101
  const messageChars = countChars(message);
108
102
  const charsToCut = Math.min(excessChars, messageChars);
109
-
110
103
  const newCharCount = messageChars - charsToCut;
111
104
  mutableOtherMessages[i] = truncateSingleMessage(message, newCharCount);
112
-
113
105
  excessChars -= charsToCut;
114
106
  }
115
-
116
107
  // If still over limit, truncate the first message.
117
108
  if (excessChars > 0) {
118
109
  const firstMessage = mutableOtherMessages[0];
@@ -121,24 +112,23 @@ export function truncateMessages(messages: OpenAI.Chat.Completions.ChatCompletio
121
112
  const newCharCount = firstMessageChars - charsToCut;
122
113
  mutableOtherMessages[0] = truncateSingleMessage(firstMessage, newCharCount);
123
114
  }
124
-
125
115
  // Filter out empty messages (char count is 0)
126
116
  const finalMessages = mutableOtherMessages.filter(msg => countChars(msg) > 0);
127
-
128
117
  return systemMessage ? [systemMessage, ...finalMessages] : finalMessages;
129
118
  }
130
-
131
- function concatMessageText(messages: OpenAI.Chat.Completions.ChatCompletionMessageParam[]): string {
132
- const textParts: string[] = [];
119
+ function concatMessageText(messages) {
120
+ const textParts = [];
133
121
  for (const message of messages) {
134
122
  if (message.content) {
135
123
  if (typeof message.content === 'string') {
136
124
  textParts.push(message.content);
137
- } else if (Array.isArray(message.content)) {
125
+ }
126
+ else if (Array.isArray(message.content)) {
138
127
  for (const part of message.content) {
139
128
  if (part.type === 'text') {
140
129
  textParts.push(part.text);
141
- } else if (part.type === 'image_url') {
130
+ }
131
+ else if (part.type === 'image_url') {
142
132
  textParts.push('[IMAGE]');
143
133
  }
144
134
  }
@@ -147,8 +137,7 @@ function concatMessageText(messages: OpenAI.Chat.Completions.ChatCompletionMessa
147
137
  }
148
138
  return textParts.join(' ');
149
139
  }
150
-
151
- function getPromptSummary(messages: OpenAI.Chat.Completions.ChatCompletionMessageParam[]): string {
140
+ function getPromptSummary(messages) {
152
141
  const fullText = concatMessageText(messages);
153
142
  // Replace multiple whitespace chars with a single space and trim.
154
143
  const cleanedText = fullText.replace(/\s+/g, ' ').trim();
@@ -159,56 +148,7 @@ function getPromptSummary(messages: OpenAI.Chat.Completions.ChatCompletionMessag
159
148
  }
160
149
  return cleanedText;
161
150
  }
162
-
163
- /**
164
- * The response format for OpenAI and OpenRouter.
165
- * OpenRouter extends this with 'json_schema'.
166
- */
167
- export type ModelConfig = string | ({ model?: string } & Record<string, any>);
168
-
169
- export type OpenRouterResponseFormat =
170
- | { type: 'text' | 'json_object' }
171
- | {
172
- type: 'json_schema';
173
- json_schema: {
174
- name: string;
175
- strict?: boolean;
176
- schema: object;
177
- };
178
- };
179
-
180
-
181
- /**
182
- * Options for the individual "prompt" function calls.
183
- * These can override defaults or add call-specific parameters.
184
- * 'messages' is a required property, inherited from OpenAI.Chat.Completions.ChatCompletionCreateParamsNonStreaming.
185
- */
186
- export interface LlmPromptOptions extends Omit<OpenAI.Chat.Completions.ChatCompletionCreateParamsNonStreaming, 'model' | 'response_format' | 'modalities' | 'messages'> {
187
- messages: string | OpenAI.Chat.Completions.ChatCompletionMessageParam[];
188
- model?: ModelConfig; // Allow overriding the default model for a specific call
189
- ttl?: number; // Cache TTL in *MILLISECONDS* for this specific call, used if cache is enabled
190
- retries?: number; // Number of retries for the API call.
191
- /** @deprecated Use `reasoning` object instead. */
192
- response_format?: OpenRouterResponseFormat;
193
- modalities?: string[];
194
- image_config?: {
195
- aspect_ratio?: string;
196
- };
197
- }
198
-
199
- /**
200
- * Options required to create an instance of the LlmClient.
201
- * These are the core dependencies.
202
- */
203
- export interface CreateLlmClientParams {
204
- openai: OpenAI;
205
- cache?: Cache; // Cache instance is now optional. Expect a cache-manager compatible instance if provided.
206
- defaultModel: ModelConfig; // The default OpenAI model to use if not overridden in LlmPromptOptions
207
- maxConversationChars?: number;
208
- queue?: PQueue;
209
- }
210
-
211
- export function normalizeOptions(arg1: string | LlmPromptOptions, arg2?: Omit<LlmPromptOptions, 'messages'>): LlmPromptOptions {
151
+ function normalizeOptions(arg1, arg2) {
212
152
  if (typeof arg1 === 'string') {
213
153
  return {
214
154
  messages: [{ role: 'user', content: arg1 }],
@@ -224,130 +164,100 @@ export function normalizeOptions(arg1: string | LlmPromptOptions, arg2?: Omit<Ll
224
164
  }
225
165
  return options;
226
166
  }
227
-
228
167
  /**
229
168
  * Factory function that creates a GPT "prompt" function, with optional caching.
230
169
  * @param params - The core dependencies (API key, base URL, default model, and optional cache instance).
231
170
  * @returns An async function `prompt` ready to make OpenAI calls, with caching if configured.
232
171
  */
233
- export function createLlmClient(params: CreateLlmClientParams) {
172
+ function createLlmClient(params) {
234
173
  const { openai, cache: cacheInstance, defaultModel: factoryDefaultModel, maxConversationChars, queue } = params;
235
-
236
- const getCompletionParamsAndCacheKey = (options: LlmPromptOptions) => {
174
+ const getCompletionParamsAndCacheKey = (options) => {
237
175
  const { ttl, model: callSpecificModel, messages, reasoning_effort, retries, ...restApiOptions } = options;
238
-
239
176
  // Ensure messages is an array (it should be if normalized, but for safety/types)
240
- const messagesArray = typeof messages === 'string'
241
- ? [{ role: 'user', content: messages }] as OpenAI.Chat.Completions.ChatCompletionMessageParam[]
177
+ const messagesArray = typeof messages === 'string'
178
+ ? [{ role: 'user', content: messages }]
242
179
  : messages;
243
-
244
180
  const finalMessages = maxConversationChars ? truncateMessages(messagesArray, maxConversationChars) : messagesArray;
245
-
246
181
  const baseConfig = typeof factoryDefaultModel === 'object' && factoryDefaultModel !== null
247
182
  ? factoryDefaultModel
248
183
  : (typeof factoryDefaultModel === 'string' ? { model: factoryDefaultModel } : {});
249
-
250
184
  const overrideConfig = typeof callSpecificModel === 'object' && callSpecificModel !== null
251
185
  ? callSpecificModel
252
186
  : (typeof callSpecificModel === 'string' ? { model: callSpecificModel } : {});
253
-
254
187
  const modelConfig = { ...baseConfig, ...overrideConfig };
255
-
256
188
  const { model: modelToUse, ...modelParams } = modelConfig;
257
-
258
189
  if (typeof modelToUse !== 'string' || !modelToUse) {
259
190
  throw new Error('A model must be specified either in the default configuration or in the prompt options.');
260
191
  }
261
-
262
192
  const completionParams = {
263
193
  ...modelParams,
264
194
  model: modelToUse,
265
195
  messages: finalMessages,
266
196
  ...restApiOptions,
267
197
  };
268
-
269
- let cacheKey: string | undefined;
198
+ let cacheKey;
270
199
  if (cacheInstance) {
271
200
  const cacheKeyString = JSON.stringify(completionParams);
272
- cacheKey = `gptask:${crypto.createHash('md5').update(cacheKeyString).digest('hex')}`;
201
+ cacheKey = `gptask:${crypto_1.default.createHash('md5').update(cacheKeyString).digest('hex')}`;
273
202
  }
274
-
275
203
  return { completionParams, cacheKey, ttl, modelToUse, finalMessages, retries };
276
204
  };
277
-
278
- async function prompt(content: string, options?: Omit<LlmPromptOptions, 'messages'>): Promise<OpenAI.Chat.Completions.ChatCompletion>;
279
- async function prompt(options: LlmPromptOptions): Promise<OpenAI.Chat.Completions.ChatCompletion>;
280
- async function prompt(arg1: string | LlmPromptOptions, arg2?: Omit<LlmPromptOptions, 'messages'>): Promise<OpenAI.Chat.Completions.ChatCompletion> {
205
+ async function prompt(arg1, arg2) {
281
206
  const options = normalizeOptions(arg1, arg2);
282
207
  const { completionParams, cacheKey, ttl, modelToUse, finalMessages, retries } = getCompletionParamsAndCacheKey(options);
283
-
284
208
  if (cacheInstance && cacheKey) {
285
209
  try {
286
- const cachedResponse = await cacheInstance.get<string>(cacheKey);
210
+ const cachedResponse = await cacheInstance.get(cacheKey);
287
211
  if (cachedResponse !== undefined && cachedResponse !== null) {
288
212
  return JSON.parse(cachedResponse);
289
213
  }
290
- } catch (error) {
214
+ }
215
+ catch (error) {
291
216
  console.warn("Cache get error:", error);
292
217
  }
293
218
  }
294
-
295
219
  const promptSummary = getPromptSummary(finalMessages);
296
-
297
- const apiCallAndCache = async (): Promise<OpenAI.Chat.Completions.ChatCompletion> => {
298
- const task = () => executeWithRetry<OpenAI.Chat.Completions.ChatCompletion, OpenAI.Chat.Completions.ChatCompletion>(
299
- async () => {
300
- return openai.chat.completions.create(completionParams as any);
301
- },
302
- async (completion) => {
303
- return { isValid: true, data: completion };
304
- },
305
- retries ?? 3,
306
- undefined,
307
- (error: any) => {
308
- // Do not retry if the API key is invalid (401) or if the error code explicitly states it.
309
- if (error?.status === 401 || error?.code === 'invalid_api_key') {
310
- return false;
311
- }
312
- return true;
220
+ const apiCallAndCache = async () => {
221
+ const task = () => (0, retryUtils_js_1.executeWithRetry)(async () => {
222
+ return openai.chat.completions.create(completionParams);
223
+ }, async (completion) => {
224
+ return { isValid: true, data: completion };
225
+ }, retries ?? 3, undefined, (error) => {
226
+ // Do not retry if the API key is invalid (401) or if the error code explicitly states it.
227
+ if (error?.status === 401 || error?.code === 'invalid_api_key') {
228
+ return false;
313
229
  }
314
- );
315
-
316
- const response = (await (queue ? queue.add(task, { id: promptSummary } as any) : task())) as OpenAI.Chat.Completions.ChatCompletion;
317
-
230
+ return true;
231
+ });
232
+ const response = (await (queue ? queue.add(task, { id: promptSummary }) : task()));
318
233
  if (cacheInstance && response && cacheKey) {
319
234
  try {
320
235
  await cacheInstance.set(cacheKey, JSON.stringify(response), ttl);
321
- } catch (error) {
236
+ }
237
+ catch (error) {
322
238
  console.warn("Cache set error:", error);
323
239
  }
324
240
  }
325
241
  return response;
326
242
  };
327
-
328
243
  return apiCallAndCache();
329
244
  }
330
-
331
- async function isPromptCached(content: string, options?: Omit<LlmPromptOptions, 'messages'>): Promise<boolean>;
332
- async function isPromptCached(options: LlmPromptOptions): Promise<boolean>;
333
- async function isPromptCached(arg1: string | LlmPromptOptions, arg2?: Omit<LlmPromptOptions, 'messages'>): Promise<boolean> {
245
+ async function isPromptCached(arg1, arg2) {
334
246
  const options = normalizeOptions(arg1, arg2);
335
247
  const { cacheKey } = getCompletionParamsAndCacheKey(options);
336
248
  if (!cacheInstance || !cacheKey) {
337
249
  return false;
338
250
  }
339
251
  try {
340
- const cachedResponse = await cacheInstance.get<string>(cacheKey);
252
+ const cachedResponse = await cacheInstance.get(cacheKey);
341
253
  return cachedResponse !== undefined && cachedResponse !== null;
342
- } catch (error) {
254
+ }
255
+ catch (error) {
343
256
  console.warn("Cache get error:", error);
344
257
  return false;
345
258
  }
346
259
  }
347
-
348
- async function promptText(content: string, options?: Omit<LlmPromptOptions, 'messages'>): Promise<string>;
349
- async function promptText(options: LlmPromptOptions): Promise<string>;
350
- async function promptText(arg1: string | LlmPromptOptions, arg2?: Omit<LlmPromptOptions, 'messages'>): Promise<string> {
260
+ async function promptText(arg1, arg2) {
351
261
  const options = normalizeOptions(arg1, arg2);
352
262
  const response = await prompt(options);
353
263
  const content = response.choices[0]?.message?.content;
@@ -356,14 +266,10 @@ export function createLlmClient(params: CreateLlmClientParams) {
356
266
  }
357
267
  return content;
358
268
  }
359
-
360
- async function promptImage(content: string, options?: Omit<LlmPromptOptions, 'messages'>): Promise<Buffer>;
361
- async function promptImage(options: LlmPromptOptions): Promise<Buffer>;
362
- async function promptImage(arg1: string | LlmPromptOptions, arg2?: Omit<LlmPromptOptions, 'messages'>): Promise<Buffer> {
269
+ async function promptImage(arg1, arg2) {
363
270
  const options = normalizeOptions(arg1, arg2);
364
271
  const response = await prompt(options);
365
- const message = response.choices[0]?.message as any;
366
-
272
+ const message = response.choices[0]?.message;
367
273
  if (message.images && Array.isArray(message.images) && message.images.length > 0) {
368
274
  const imageUrl = message.images[0].image_url.url;
369
275
  if (typeof imageUrl === 'string') {
@@ -371,7 +277,8 @@ export function createLlmClient(params: CreateLlmClientParams) {
371
277
  const imgRes = await fetch(imageUrl);
372
278
  const arrayBuffer = await imgRes.arrayBuffer();
373
279
  return Buffer.from(arrayBuffer);
374
- } else {
280
+ }
281
+ else {
375
282
  const base64Data = imageUrl.replace(/^data:image\/\w+;base64,/, "");
376
283
  return Buffer.from(base64Data, 'base64');
377
284
  }
@@ -379,11 +286,5 @@ export function createLlmClient(params: CreateLlmClientParams) {
379
286
  }
380
287
  throw new Error("LLM returned no image content.");
381
288
  }
382
-
383
289
  return { prompt, isPromptCached, promptText, promptImage };
384
290
  }
385
-
386
- export type PromptFunction = ReturnType<typeof createLlmClient>['prompt'];
387
- export type IsPromptCachedFunction = ReturnType<typeof createLlmClient>['isPromptCached'];
388
- export type PromptTextFunction = ReturnType<typeof createLlmClient>['promptText'];
389
- export type PromptImageFunction = ReturnType<typeof createLlmClient>['promptImage'];
@@ -0,0 +1 @@
1
+ export {};
@@ -0,0 +1,40 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ const vitest_1 = require("vitest");
4
+ const createLlmClient_js_1 = require("./createLlmClient.js");
5
+ (0, vitest_1.describe)('normalizeOptions', () => {
6
+ (0, vitest_1.it)('should normalize a simple string prompt', () => {
7
+ const result = (0, createLlmClient_js_1.normalizeOptions)('Hello world');
8
+ (0, vitest_1.expect)(result).toEqual({
9
+ messages: [{ role: 'user', content: 'Hello world' }]
10
+ });
11
+ });
12
+ (0, vitest_1.it)('should normalize a string prompt with options', () => {
13
+ const result = (0, createLlmClient_js_1.normalizeOptions)('Hello world', { temperature: 0.5 });
14
+ (0, vitest_1.expect)(result).toEqual({
15
+ messages: [{ role: 'user', content: 'Hello world' }],
16
+ temperature: 0.5
17
+ });
18
+ });
19
+ (0, vitest_1.it)('should normalize an options object with string messages', () => {
20
+ const result = (0, createLlmClient_js_1.normalizeOptions)({
21
+ messages: 'Hello world',
22
+ temperature: 0.7
23
+ });
24
+ (0, vitest_1.expect)(result).toEqual({
25
+ messages: [{ role: 'user', content: 'Hello world' }],
26
+ temperature: 0.7
27
+ });
28
+ });
29
+ (0, vitest_1.it)('should pass through an options object with array messages', () => {
30
+ const messages = [{ role: 'user', content: 'Hello' }];
31
+ const result = (0, createLlmClient_js_1.normalizeOptions)({
32
+ messages,
33
+ temperature: 0.7
34
+ });
35
+ (0, vitest_1.expect)(result).toEqual({
36
+ messages,
37
+ temperature: 0.7
38
+ });
39
+ });
40
+ });
@@ -0,0 +1,47 @@
1
+ import OpenAI from 'openai';
2
+ import { PromptFunction, LlmPromptOptions } from "./createLlmClient.js";
3
+ export declare class LlmRetryError extends Error {
4
+ readonly message: string;
5
+ readonly type: 'JSON_PARSE_ERROR' | 'CUSTOM_ERROR';
6
+ readonly details?: any | undefined;
7
+ readonly rawResponse?: string | null | undefined;
8
+ constructor(message: string, type: 'JSON_PARSE_ERROR' | 'CUSTOM_ERROR', details?: any | undefined, rawResponse?: string | null | undefined);
9
+ }
10
+ export declare class LlmRetryExhaustedError extends Error {
11
+ readonly message: string;
12
+ constructor(message: string, options?: ErrorOptions);
13
+ }
14
+ export declare class LlmRetryAttemptError extends Error {
15
+ readonly message: string;
16
+ readonly mode: 'main' | 'fallback';
17
+ readonly conversation: OpenAI.Chat.Completions.ChatCompletionMessageParam[];
18
+ readonly attemptNumber: number;
19
+ constructor(message: string, mode: 'main' | 'fallback', conversation: OpenAI.Chat.Completions.ChatCompletionMessageParam[], attemptNumber: number, options?: ErrorOptions);
20
+ }
21
+ export interface LlmRetryResponseInfo {
22
+ mode: 'main' | 'fallback';
23
+ conversation: OpenAI.Chat.Completions.ChatCompletionMessageParam[];
24
+ attemptNumber: number;
25
+ }
26
+ export type LlmRetryOptions<T = any> = LlmPromptOptions & {
27
+ maxRetries?: number;
28
+ validate?: (response: any, info: LlmRetryResponseInfo) => Promise<T>;
29
+ };
30
+ export interface CreateLlmRetryClientParams {
31
+ prompt: PromptFunction;
32
+ fallbackPrompt?: PromptFunction;
33
+ }
34
+ export declare function createLlmRetryClient(params: CreateLlmRetryClientParams): {
35
+ promptRetry: {
36
+ <T = OpenAI.Chat.Completions.ChatCompletion>(content: string, options?: Omit<LlmRetryOptions<T>, "messages">): Promise<T>;
37
+ <T = OpenAI.Chat.Completions.ChatCompletion>(options: LlmRetryOptions<T>): Promise<T>;
38
+ };
39
+ promptTextRetry: {
40
+ <T = string>(content: string, options?: Omit<LlmRetryOptions<T>, "messages">): Promise<T>;
41
+ <T = string>(options: LlmRetryOptions<T>): Promise<T>;
42
+ };
43
+ promptImageRetry: {
44
+ <T = Buffer<ArrayBufferLike>>(content: string, options?: Omit<LlmRetryOptions<T>, "messages">): Promise<T>;
45
+ <T = Buffer<ArrayBufferLike>>(options: LlmRetryOptions<T>): Promise<T>;
46
+ };
47
+ };