@threaded/ai 1.0.6 → 1.0.8
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.cjs +127 -8
- package/dist/index.cjs.map +1 -1
- package/dist/index.d.cts +21 -2
- package/dist/index.d.ts +21 -2
- package/dist/index.js +126 -8
- package/dist/index.js.map +1 -1
- package/package.json +1 -1
package/dist/index.d.cts
CHANGED
|
@@ -138,6 +138,7 @@ interface ProviderConfig {
|
|
|
138
138
|
model: string;
|
|
139
139
|
instructions?: string;
|
|
140
140
|
schema?: JsonSchema;
|
|
141
|
+
apiKey?: string;
|
|
141
142
|
}
|
|
142
143
|
interface ParsedModel {
|
|
143
144
|
provider: string;
|
|
@@ -162,6 +163,21 @@ interface Thread {
|
|
|
162
163
|
interface RetryOptions {
|
|
163
164
|
times?: number;
|
|
164
165
|
}
|
|
166
|
+
interface ImageConfig {
|
|
167
|
+
n?: number;
|
|
168
|
+
size?: string;
|
|
169
|
+
quality?: "standard" | "hd" | "low" | "medium" | "high" | "auto";
|
|
170
|
+
style?: "vivid" | "natural";
|
|
171
|
+
responseFormat?: "url" | "b64_json";
|
|
172
|
+
aspectRatio?: string;
|
|
173
|
+
outputFormat?: "png" | "jpeg" | "webp";
|
|
174
|
+
outputCompression?: number;
|
|
175
|
+
background?: "transparent" | "opaque" | "auto";
|
|
176
|
+
}
|
|
177
|
+
interface ImageResult {
|
|
178
|
+
data: string;
|
|
179
|
+
revisedPrompt?: string;
|
|
180
|
+
}
|
|
165
181
|
|
|
166
182
|
declare const createMCPTools: (client: Client) => Promise<ToolConfig[]>;
|
|
167
183
|
|
|
@@ -185,6 +201,8 @@ declare const embed: (model: string, text: string, config?: {
|
|
|
185
201
|
dimensions?: number;
|
|
186
202
|
}) => Promise<number[]>;
|
|
187
203
|
|
|
204
|
+
declare const generateImage: (model: string, prompt: string, config?: ImageConfig) => Promise<ImageResult>;
|
|
205
|
+
|
|
188
206
|
/**
|
|
189
207
|
* @example
|
|
190
208
|
* // in-memory (default)
|
|
@@ -252,10 +270,11 @@ declare const tap: (fn: (ctx: ConversationContext) => Promise<void> | void) => S
|
|
|
252
270
|
|
|
253
271
|
declare const when: (condition: (ctx: ConversationContext) => boolean, action: StepFunction) => StepFunction;
|
|
254
272
|
|
|
255
|
-
declare const model: ({ model, schema, system, }?: {
|
|
273
|
+
declare const model: ({ model, schema, system, apiKey, }?: {
|
|
256
274
|
model?: string;
|
|
257
275
|
schema?: JsonSchema | StandardSchema;
|
|
258
276
|
system?: string | ((ctx: ConversationContext) => string);
|
|
277
|
+
apiKey?: string;
|
|
259
278
|
}) => ComposedFunction;
|
|
260
279
|
|
|
261
280
|
/**
|
|
@@ -293,4 +312,4 @@ declare const rateLimited: (config: RateLimitConfig) => <T extends (...args: any
|
|
|
293
312
|
|
|
294
313
|
declare const compose: (...steps: StepFunction[]) => ComposedFunction;
|
|
295
314
|
|
|
296
|
-
export { type ApiKeys, type ApprovalRequest, type ApprovalResponse, type ComposedFunction, type ConversationContext, Inherit, type JsonSchema, type Message, type ParsedModel, type ProviderConfig, type RetryOptions, type SchemaProperty, type ScopeConfig, type StandardSchema, type StepFunction, type StreamEvent, type Thread, type ThreadStore, type ToolCall, type ToolCallResult, type ToolConfig, type ToolDefinition, type ToolExecutionConfig, appendToLastRequest, compose, convertMCPSchemaToToolSchema, convertStandardSchemaToJsonSchema, convertStandardSchemaToSchemaProperties, createMCPTools, embed, everyNMessages, everyNTokens, generateApprovalToken, getKey, getOrCreateThread, isStandardSchema, maxCalls, model, noToolsCalled, normalizeSchema, onApprovalRequested, onApprovalResolved, parseModelName, rateLimited, removeApprovalListener, requestApproval, resolveApproval, retry, scope, setKeys, tap, toolConfigToToolDefinition, toolNotUsedInNTurns, toolWasCalled, when };
|
|
315
|
+
export { type ApiKeys, type ApprovalRequest, type ApprovalResponse, type ComposedFunction, type ConversationContext, type ImageConfig, type ImageResult, Inherit, type JsonSchema, type Message, type ParsedModel, type ProviderConfig, type RetryOptions, type SchemaProperty, type ScopeConfig, type StandardSchema, type StepFunction, type StreamEvent, type Thread, type ThreadStore, type ToolCall, type ToolCallResult, type ToolConfig, type ToolDefinition, type ToolExecutionConfig, appendToLastRequest, compose, convertMCPSchemaToToolSchema, convertStandardSchemaToJsonSchema, convertStandardSchemaToSchemaProperties, createMCPTools, embed, everyNMessages, everyNTokens, generateApprovalToken, generateImage, getKey, getOrCreateThread, isStandardSchema, maxCalls, model, noToolsCalled, normalizeSchema, onApprovalRequested, onApprovalResolved, parseModelName, rateLimited, removeApprovalListener, requestApproval, resolveApproval, retry, scope, setKeys, tap, toolConfigToToolDefinition, toolNotUsedInNTurns, toolWasCalled, when };
|
package/dist/index.d.ts
CHANGED
|
@@ -138,6 +138,7 @@ interface ProviderConfig {
|
|
|
138
138
|
model: string;
|
|
139
139
|
instructions?: string;
|
|
140
140
|
schema?: JsonSchema;
|
|
141
|
+
apiKey?: string;
|
|
141
142
|
}
|
|
142
143
|
interface ParsedModel {
|
|
143
144
|
provider: string;
|
|
@@ -162,6 +163,21 @@ interface Thread {
|
|
|
162
163
|
interface RetryOptions {
|
|
163
164
|
times?: number;
|
|
164
165
|
}
|
|
166
|
+
interface ImageConfig {
|
|
167
|
+
n?: number;
|
|
168
|
+
size?: string;
|
|
169
|
+
quality?: "standard" | "hd" | "low" | "medium" | "high" | "auto";
|
|
170
|
+
style?: "vivid" | "natural";
|
|
171
|
+
responseFormat?: "url" | "b64_json";
|
|
172
|
+
aspectRatio?: string;
|
|
173
|
+
outputFormat?: "png" | "jpeg" | "webp";
|
|
174
|
+
outputCompression?: number;
|
|
175
|
+
background?: "transparent" | "opaque" | "auto";
|
|
176
|
+
}
|
|
177
|
+
interface ImageResult {
|
|
178
|
+
data: string;
|
|
179
|
+
revisedPrompt?: string;
|
|
180
|
+
}
|
|
165
181
|
|
|
166
182
|
declare const createMCPTools: (client: Client) => Promise<ToolConfig[]>;
|
|
167
183
|
|
|
@@ -185,6 +201,8 @@ declare const embed: (model: string, text: string, config?: {
|
|
|
185
201
|
dimensions?: number;
|
|
186
202
|
}) => Promise<number[]>;
|
|
187
203
|
|
|
204
|
+
declare const generateImage: (model: string, prompt: string, config?: ImageConfig) => Promise<ImageResult>;
|
|
205
|
+
|
|
188
206
|
/**
|
|
189
207
|
* @example
|
|
190
208
|
* // in-memory (default)
|
|
@@ -252,10 +270,11 @@ declare const tap: (fn: (ctx: ConversationContext) => Promise<void> | void) => S
|
|
|
252
270
|
|
|
253
271
|
declare const when: (condition: (ctx: ConversationContext) => boolean, action: StepFunction) => StepFunction;
|
|
254
272
|
|
|
255
|
-
declare const model: ({ model, schema, system, }?: {
|
|
273
|
+
declare const model: ({ model, schema, system, apiKey, }?: {
|
|
256
274
|
model?: string;
|
|
257
275
|
schema?: JsonSchema | StandardSchema;
|
|
258
276
|
system?: string | ((ctx: ConversationContext) => string);
|
|
277
|
+
apiKey?: string;
|
|
259
278
|
}) => ComposedFunction;
|
|
260
279
|
|
|
261
280
|
/**
|
|
@@ -293,4 +312,4 @@ declare const rateLimited: (config: RateLimitConfig) => <T extends (...args: any
|
|
|
293
312
|
|
|
294
313
|
declare const compose: (...steps: StepFunction[]) => ComposedFunction;
|
|
295
314
|
|
|
296
|
-
export { type ApiKeys, type ApprovalRequest, type ApprovalResponse, type ComposedFunction, type ConversationContext, Inherit, type JsonSchema, type Message, type ParsedModel, type ProviderConfig, type RetryOptions, type SchemaProperty, type ScopeConfig, type StandardSchema, type StepFunction, type StreamEvent, type Thread, type ThreadStore, type ToolCall, type ToolCallResult, type ToolConfig, type ToolDefinition, type ToolExecutionConfig, appendToLastRequest, compose, convertMCPSchemaToToolSchema, convertStandardSchemaToJsonSchema, convertStandardSchemaToSchemaProperties, createMCPTools, embed, everyNMessages, everyNTokens, generateApprovalToken, getKey, getOrCreateThread, isStandardSchema, maxCalls, model, noToolsCalled, normalizeSchema, onApprovalRequested, onApprovalResolved, parseModelName, rateLimited, removeApprovalListener, requestApproval, resolveApproval, retry, scope, setKeys, tap, toolConfigToToolDefinition, toolNotUsedInNTurns, toolWasCalled, when };
|
|
315
|
+
export { type ApiKeys, type ApprovalRequest, type ApprovalResponse, type ComposedFunction, type ConversationContext, type ImageConfig, type ImageResult, Inherit, type JsonSchema, type Message, type ParsedModel, type ProviderConfig, type RetryOptions, type SchemaProperty, type ScopeConfig, type StandardSchema, type StepFunction, type StreamEvent, type Thread, type ThreadStore, type ToolCall, type ToolCallResult, type ToolConfig, type ToolDefinition, type ToolExecutionConfig, appendToLastRequest, compose, convertMCPSchemaToToolSchema, convertStandardSchemaToJsonSchema, convertStandardSchemaToSchemaProperties, createMCPTools, embed, everyNMessages, everyNTokens, generateApprovalToken, generateImage, getKey, getOrCreateThread, isStandardSchema, maxCalls, model, noToolsCalled, normalizeSchema, onApprovalRequested, onApprovalResolved, parseModelName, rateLimited, removeApprovalListener, requestApproval, resolveApproval, retry, scope, setKeys, tap, toolConfigToToolDefinition, toolNotUsedInNTurns, toolWasCalled, when };
|
package/dist/index.js
CHANGED
|
@@ -190,6 +190,122 @@ var embed = async (model2, text, config) => {
|
|
|
190
190
|
}
|
|
191
191
|
};
|
|
192
192
|
|
|
193
|
+
// src/image.ts
|
|
194
|
+
var providerKeyEnvVars = {
|
|
195
|
+
openai: "OPENAI_API_KEY",
|
|
196
|
+
xai: "XAI_API_KEY",
|
|
197
|
+
google: "GEMINI_API_KEY"
|
|
198
|
+
};
|
|
199
|
+
var getApiKey = (provider) => {
|
|
200
|
+
try {
|
|
201
|
+
return getKey(provider);
|
|
202
|
+
} catch {
|
|
203
|
+
const envVar = providerKeyEnvVars[provider];
|
|
204
|
+
const key = envVar ? process.env[envVar] || "" : "";
|
|
205
|
+
if (!key) throw new Error(`No API key found for provider: ${provider}`);
|
|
206
|
+
return key;
|
|
207
|
+
}
|
|
208
|
+
};
|
|
209
|
+
var generateOpenAICompatible = async (endpoint, modelName, prompt, apiKey, config) => {
|
|
210
|
+
const isGptImage = modelName.startsWith("gpt-image");
|
|
211
|
+
const body = {
|
|
212
|
+
model: modelName,
|
|
213
|
+
prompt
|
|
214
|
+
};
|
|
215
|
+
if (!isGptImage) {
|
|
216
|
+
body.response_format = config?.responseFormat || "b64_json";
|
|
217
|
+
}
|
|
218
|
+
if (config?.n) body.n = config.n;
|
|
219
|
+
if (config?.size) body.size = config.size;
|
|
220
|
+
if (config?.quality) body.quality = config.quality;
|
|
221
|
+
if (config?.style && !isGptImage) body.style = config.style;
|
|
222
|
+
if (isGptImage) {
|
|
223
|
+
if (config?.outputFormat) body.output_format = config.outputFormat;
|
|
224
|
+
if (config?.outputCompression != null) body.output_compression = config.outputCompression;
|
|
225
|
+
if (config?.background) body.background = config.background;
|
|
226
|
+
}
|
|
227
|
+
const response = await fetch(endpoint, {
|
|
228
|
+
method: "POST",
|
|
229
|
+
headers: {
|
|
230
|
+
"Content-Type": "application/json",
|
|
231
|
+
Authorization: `Bearer ${apiKey}`
|
|
232
|
+
},
|
|
233
|
+
body: JSON.stringify(body)
|
|
234
|
+
});
|
|
235
|
+
if (!response.ok) {
|
|
236
|
+
const error = await response.text();
|
|
237
|
+
throw new Error(`API error: ${error}`);
|
|
238
|
+
}
|
|
239
|
+
const data = await response.json();
|
|
240
|
+
const image = data.data[0];
|
|
241
|
+
return {
|
|
242
|
+
data: image.b64_json || image.url,
|
|
243
|
+
revisedPrompt: image.revised_prompt
|
|
244
|
+
};
|
|
245
|
+
};
|
|
246
|
+
var generateGoogle = async (modelName, prompt, apiKey, config) => {
|
|
247
|
+
const endpoint = `https://generativelanguage.googleapis.com/v1beta/models/${modelName}:generateContent`;
|
|
248
|
+
const body = {
|
|
249
|
+
contents: [{ parts: [{ text: prompt }] }],
|
|
250
|
+
generationConfig: {
|
|
251
|
+
responseModalities: ["TEXT", "IMAGE"]
|
|
252
|
+
}
|
|
253
|
+
};
|
|
254
|
+
if (config?.aspectRatio) {
|
|
255
|
+
body.generationConfig.aspectRatio = config.aspectRatio;
|
|
256
|
+
}
|
|
257
|
+
const response = await fetch(endpoint, {
|
|
258
|
+
method: "POST",
|
|
259
|
+
headers: {
|
|
260
|
+
"Content-Type": "application/json",
|
|
261
|
+
"x-goog-api-key": apiKey
|
|
262
|
+
},
|
|
263
|
+
body: JSON.stringify(body)
|
|
264
|
+
});
|
|
265
|
+
if (!response.ok) {
|
|
266
|
+
const error = await response.text();
|
|
267
|
+
throw new Error(`Google API error: ${error}`);
|
|
268
|
+
}
|
|
269
|
+
const data = await response.json();
|
|
270
|
+
const parts = data.candidates?.[0]?.content?.parts || [];
|
|
271
|
+
const imagePart = parts.find((p) => p.inlineData);
|
|
272
|
+
const textPart = parts.find((p) => p.text);
|
|
273
|
+
if (!imagePart?.inlineData?.data) {
|
|
274
|
+
throw new Error("No image data in response");
|
|
275
|
+
}
|
|
276
|
+
return {
|
|
277
|
+
data: imagePart.inlineData.data,
|
|
278
|
+
revisedPrompt: textPart?.text
|
|
279
|
+
};
|
|
280
|
+
};
|
|
281
|
+
var generateImage = async (model2, prompt, config) => {
|
|
282
|
+
const { provider, model: modelName } = parseModelName(model2);
|
|
283
|
+
const providerLower = provider.toLowerCase();
|
|
284
|
+
const apiKey = getApiKey(providerLower);
|
|
285
|
+
switch (providerLower) {
|
|
286
|
+
case "openai":
|
|
287
|
+
return generateOpenAICompatible(
|
|
288
|
+
"https://api.openai.com/v1/images/generations",
|
|
289
|
+
modelName,
|
|
290
|
+
prompt,
|
|
291
|
+
apiKey,
|
|
292
|
+
config
|
|
293
|
+
);
|
|
294
|
+
case "xai":
|
|
295
|
+
return generateOpenAICompatible(
|
|
296
|
+
"https://api.x.ai/v1/images/generations",
|
|
297
|
+
modelName,
|
|
298
|
+
prompt,
|
|
299
|
+
apiKey,
|
|
300
|
+
config
|
|
301
|
+
);
|
|
302
|
+
case "google":
|
|
303
|
+
return generateGoogle(modelName, prompt, apiKey, config);
|
|
304
|
+
default:
|
|
305
|
+
throw new Error(`Unsupported image generation provider: ${provider}`);
|
|
306
|
+
}
|
|
307
|
+
};
|
|
308
|
+
|
|
193
309
|
// src/providers/openai.ts
|
|
194
310
|
var appendToolCalls = (toolCalls, tcchunklist) => {
|
|
195
311
|
for (const tcchunk of tcchunklist) {
|
|
@@ -208,8 +324,8 @@ var appendToolCalls = (toolCalls, tcchunklist) => {
|
|
|
208
324
|
return toolCalls;
|
|
209
325
|
};
|
|
210
326
|
var callOpenAI = async (config, ctx) => {
|
|
211
|
-
const { model: model2, instructions, schema } = config;
|
|
212
|
-
const apiKey = getKey("openai") || process.env.OPENAI_API_KEY;
|
|
327
|
+
const { model: model2, instructions, schema, apiKey: configApiKey } = config;
|
|
328
|
+
const apiKey = configApiKey || getKey("openai") || process.env.OPENAI_API_KEY;
|
|
213
329
|
if (!apiKey) {
|
|
214
330
|
throw new Error("OpenAI API key not found");
|
|
215
331
|
}
|
|
@@ -375,8 +491,8 @@ var convertToAnthropicFormat = (messages) => {
|
|
|
375
491
|
return result;
|
|
376
492
|
};
|
|
377
493
|
var callAnthropic = async (config, ctx) => {
|
|
378
|
-
const { model: model2, instructions, schema } = config;
|
|
379
|
-
const apiKey = getKey("anthropic") || process.env.ANTHROPIC_API_KEY;
|
|
494
|
+
const { model: model2, instructions, schema, apiKey: configApiKey } = config;
|
|
495
|
+
const apiKey = configApiKey || getKey("anthropic") || process.env.ANTHROPIC_API_KEY;
|
|
380
496
|
if (!apiKey) {
|
|
381
497
|
throw new Error("Anthropic API key not found");
|
|
382
498
|
}
|
|
@@ -525,8 +641,8 @@ var handleAnthropicStream = async (response, ctx) => {
|
|
|
525
641
|
|
|
526
642
|
// src/providers/google.ts
|
|
527
643
|
var callGoogle = async (config, ctx) => {
|
|
528
|
-
const { model: model2, instructions } = config;
|
|
529
|
-
const apiKey = getKey("google") || process.env.GOOGLE_AI_API_KEY;
|
|
644
|
+
const { model: model2, instructions, apiKey: configApiKey } = config;
|
|
645
|
+
const apiKey = configApiKey || getKey("google") || process.env.GOOGLE_AI_API_KEY;
|
|
530
646
|
if (!apiKey) {
|
|
531
647
|
throw new Error("Google API key not found");
|
|
532
648
|
}
|
|
@@ -731,7 +847,8 @@ var removeApprovalListener = (event, listener) => {
|
|
|
731
847
|
var model = ({
|
|
732
848
|
model: model2 = "openai/gpt-4o-mini",
|
|
733
849
|
schema,
|
|
734
|
-
system
|
|
850
|
+
system,
|
|
851
|
+
apiKey
|
|
735
852
|
} = {}) => {
|
|
736
853
|
return async (ctxOrMessage) => {
|
|
737
854
|
const ctx = typeof ctxOrMessage === "string" ? (
|
|
@@ -768,7 +885,7 @@ var model = ({
|
|
|
768
885
|
break;
|
|
769
886
|
}
|
|
770
887
|
currentCtx = await callProvider(
|
|
771
|
-
{ model: model2, instructions, schema: normalizedSchema },
|
|
888
|
+
{ model: model2, instructions, schema: normalizedSchema, apiKey },
|
|
772
889
|
currentCtx
|
|
773
890
|
);
|
|
774
891
|
if (currentCtx.lastResponse?.tool_calls && currentCtx.tools?.length) {
|
|
@@ -1295,6 +1412,7 @@ export {
|
|
|
1295
1412
|
everyNMessages,
|
|
1296
1413
|
everyNTokens,
|
|
1297
1414
|
generateApprovalToken,
|
|
1415
|
+
generateImage,
|
|
1298
1416
|
getKey,
|
|
1299
1417
|
getOrCreateThread,
|
|
1300
1418
|
isStandardSchema,
|