@threaded/ai 1.0.7 → 1.0.9
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.cjs +294 -12
- package/dist/index.cjs.map +1 -1
- package/dist/index.d.cts +18 -1
- package/dist/index.d.ts +18 -1
- package/dist/index.js +293 -12
- package/dist/index.js.map +1 -1
- package/package.json +1 -1
package/dist/index.cjs
CHANGED
|
@@ -41,6 +41,7 @@ __export(index_exports, {
|
|
|
41
41
|
everyNMessages: () => everyNMessages,
|
|
42
42
|
everyNTokens: () => everyNTokens,
|
|
43
43
|
generateApprovalToken: () => generateApprovalToken,
|
|
44
|
+
generateImage: () => generateImage,
|
|
44
45
|
getKey: () => getKey,
|
|
45
46
|
getOrCreateThread: () => getOrCreateThread,
|
|
46
47
|
isStandardSchema: () => isStandardSchema,
|
|
@@ -258,7 +259,133 @@ var embed = async (model2, text, config) => {
|
|
|
258
259
|
}
|
|
259
260
|
};
|
|
260
261
|
|
|
262
|
+
// src/image.ts
|
|
263
|
+
var providerKeyEnvVars = {
|
|
264
|
+
openai: "OPENAI_API_KEY",
|
|
265
|
+
xai: "XAI_API_KEY",
|
|
266
|
+
google: "GEMINI_API_KEY"
|
|
267
|
+
};
|
|
268
|
+
var getApiKey = (provider) => {
|
|
269
|
+
try {
|
|
270
|
+
return getKey(provider);
|
|
271
|
+
} catch {
|
|
272
|
+
const envVar = providerKeyEnvVars[provider];
|
|
273
|
+
const key = envVar ? process.env[envVar] || "" : "";
|
|
274
|
+
if (!key) throw new Error(`No API key found for provider: ${provider}`);
|
|
275
|
+
return key;
|
|
276
|
+
}
|
|
277
|
+
};
|
|
278
|
+
var generateOpenAICompatible = async (endpoint, modelName, prompt, apiKey, config) => {
|
|
279
|
+
const isGptImage = modelName.startsWith("gpt-image");
|
|
280
|
+
const body = {
|
|
281
|
+
model: modelName,
|
|
282
|
+
prompt
|
|
283
|
+
};
|
|
284
|
+
if (!isGptImage) {
|
|
285
|
+
body.response_format = config?.responseFormat || "b64_json";
|
|
286
|
+
}
|
|
287
|
+
if (config?.n) body.n = config.n;
|
|
288
|
+
if (config?.size) body.size = config.size;
|
|
289
|
+
if (config?.quality) body.quality = config.quality;
|
|
290
|
+
if (config?.style && !isGptImage) body.style = config.style;
|
|
291
|
+
if (isGptImage) {
|
|
292
|
+
if (config?.outputFormat) body.output_format = config.outputFormat;
|
|
293
|
+
if (config?.outputCompression != null) body.output_compression = config.outputCompression;
|
|
294
|
+
if (config?.background) body.background = config.background;
|
|
295
|
+
}
|
|
296
|
+
const response = await fetch(endpoint, {
|
|
297
|
+
method: "POST",
|
|
298
|
+
headers: {
|
|
299
|
+
"Content-Type": "application/json",
|
|
300
|
+
Authorization: `Bearer ${apiKey}`
|
|
301
|
+
},
|
|
302
|
+
body: JSON.stringify(body)
|
|
303
|
+
});
|
|
304
|
+
if (!response.ok) {
|
|
305
|
+
const error = await response.text();
|
|
306
|
+
throw new Error(`API error: ${error}`);
|
|
307
|
+
}
|
|
308
|
+
const data = await response.json();
|
|
309
|
+
const image = data.data[0];
|
|
310
|
+
return {
|
|
311
|
+
data: image.b64_json || image.url,
|
|
312
|
+
revisedPrompt: image.revised_prompt
|
|
313
|
+
};
|
|
314
|
+
};
|
|
315
|
+
var generateGoogle = async (modelName, prompt, apiKey, config) => {
|
|
316
|
+
const endpoint = `https://generativelanguage.googleapis.com/v1beta/models/${modelName}:generateContent`;
|
|
317
|
+
const body = {
|
|
318
|
+
contents: [{ parts: [{ text: prompt }] }],
|
|
319
|
+
generationConfig: {
|
|
320
|
+
responseModalities: ["TEXT", "IMAGE"]
|
|
321
|
+
}
|
|
322
|
+
};
|
|
323
|
+
if (config?.aspectRatio) {
|
|
324
|
+
body.generationConfig.aspectRatio = config.aspectRatio;
|
|
325
|
+
}
|
|
326
|
+
const response = await fetch(endpoint, {
|
|
327
|
+
method: "POST",
|
|
328
|
+
headers: {
|
|
329
|
+
"Content-Type": "application/json",
|
|
330
|
+
"x-goog-api-key": apiKey
|
|
331
|
+
},
|
|
332
|
+
body: JSON.stringify(body)
|
|
333
|
+
});
|
|
334
|
+
if (!response.ok) {
|
|
335
|
+
const error = await response.text();
|
|
336
|
+
throw new Error(`Google API error: ${error}`);
|
|
337
|
+
}
|
|
338
|
+
const data = await response.json();
|
|
339
|
+
const parts = data.candidates?.[0]?.content?.parts || [];
|
|
340
|
+
const imagePart = parts.find((p) => p.inlineData);
|
|
341
|
+
const textPart = parts.find((p) => p.text);
|
|
342
|
+
if (!imagePart?.inlineData?.data) {
|
|
343
|
+
throw new Error("No image data in response");
|
|
344
|
+
}
|
|
345
|
+
return {
|
|
346
|
+
data: imagePart.inlineData.data,
|
|
347
|
+
revisedPrompt: textPart?.text
|
|
348
|
+
};
|
|
349
|
+
};
|
|
350
|
+
var generateImage = async (model2, prompt, config) => {
|
|
351
|
+
const { provider, model: modelName } = parseModelName(model2);
|
|
352
|
+
const providerLower = provider.toLowerCase();
|
|
353
|
+
const apiKey = getApiKey(providerLower);
|
|
354
|
+
switch (providerLower) {
|
|
355
|
+
case "openai":
|
|
356
|
+
return generateOpenAICompatible(
|
|
357
|
+
"https://api.openai.com/v1/images/generations",
|
|
358
|
+
modelName,
|
|
359
|
+
prompt,
|
|
360
|
+
apiKey,
|
|
361
|
+
config
|
|
362
|
+
);
|
|
363
|
+
case "xai":
|
|
364
|
+
return generateOpenAICompatible(
|
|
365
|
+
"https://api.x.ai/v1/images/generations",
|
|
366
|
+
modelName,
|
|
367
|
+
prompt,
|
|
368
|
+
apiKey,
|
|
369
|
+
config
|
|
370
|
+
);
|
|
371
|
+
case "google":
|
|
372
|
+
return generateGoogle(modelName, prompt, apiKey, config);
|
|
373
|
+
default:
|
|
374
|
+
throw new Error(`Unsupported image generation provider: ${provider}`);
|
|
375
|
+
}
|
|
376
|
+
};
|
|
377
|
+
|
|
261
378
|
// src/providers/openai.ts
|
|
379
|
+
var getApiKey2 = (configApiKey) => {
|
|
380
|
+
if (configApiKey) return configApiKey;
|
|
381
|
+
try {
|
|
382
|
+
return getKey("openai");
|
|
383
|
+
} catch {
|
|
384
|
+
const key = process.env.OPENAI_API_KEY || "";
|
|
385
|
+
if (!key) throw new Error("OpenAI API key not found");
|
|
386
|
+
return key;
|
|
387
|
+
}
|
|
388
|
+
};
|
|
262
389
|
var appendToolCalls = (toolCalls, tcchunklist) => {
|
|
263
390
|
for (const tcchunk of tcchunklist) {
|
|
264
391
|
while (toolCalls.length <= tcchunk.index) {
|
|
@@ -277,10 +404,7 @@ var appendToolCalls = (toolCalls, tcchunklist) => {
|
|
|
277
404
|
};
|
|
278
405
|
var callOpenAI = async (config, ctx) => {
|
|
279
406
|
const { model: model2, instructions, schema, apiKey: configApiKey } = config;
|
|
280
|
-
const apiKey = configApiKey
|
|
281
|
-
if (!apiKey) {
|
|
282
|
-
throw new Error("OpenAI API key not found");
|
|
283
|
-
}
|
|
407
|
+
const apiKey = getApiKey2(configApiKey);
|
|
284
408
|
const messages = [];
|
|
285
409
|
if (instructions) {
|
|
286
410
|
messages.push({ role: "system", content: instructions });
|
|
@@ -393,6 +517,16 @@ var handleOpenAIStream = async (response, ctx) => {
|
|
|
393
517
|
};
|
|
394
518
|
|
|
395
519
|
// src/providers/anthropic.ts
|
|
520
|
+
var getApiKey3 = (configApiKey) => {
|
|
521
|
+
if (configApiKey) return configApiKey;
|
|
522
|
+
try {
|
|
523
|
+
return getKey("anthropic");
|
|
524
|
+
} catch {
|
|
525
|
+
const key = process.env.ANTHROPIC_API_KEY || "";
|
|
526
|
+
if (!key) throw new Error("Anthropic API key not found");
|
|
527
|
+
return key;
|
|
528
|
+
}
|
|
529
|
+
};
|
|
396
530
|
var convertToAnthropicFormat = (messages) => {
|
|
397
531
|
const result = [];
|
|
398
532
|
let i = 0;
|
|
@@ -444,10 +578,7 @@ var convertToAnthropicFormat = (messages) => {
|
|
|
444
578
|
};
|
|
445
579
|
var callAnthropic = async (config, ctx) => {
|
|
446
580
|
const { model: model2, instructions, schema, apiKey: configApiKey } = config;
|
|
447
|
-
const apiKey = configApiKey
|
|
448
|
-
if (!apiKey) {
|
|
449
|
-
throw new Error("Anthropic API key not found");
|
|
450
|
-
}
|
|
581
|
+
const apiKey = getApiKey3(configApiKey);
|
|
451
582
|
let system = instructions;
|
|
452
583
|
if (ctx.history[0]?.role === "system") {
|
|
453
584
|
system = ctx.history[0].content;
|
|
@@ -592,12 +723,19 @@ var handleAnthropicStream = async (response, ctx) => {
|
|
|
592
723
|
};
|
|
593
724
|
|
|
594
725
|
// src/providers/google.ts
|
|
726
|
+
var getApiKey4 = (configApiKey) => {
|
|
727
|
+
if (configApiKey) return configApiKey;
|
|
728
|
+
try {
|
|
729
|
+
return getKey("google");
|
|
730
|
+
} catch {
|
|
731
|
+
const key = process.env.GEMINI_API_KEY || process.env.GOOGLE_AI_API_KEY || "";
|
|
732
|
+
if (!key) throw new Error("Google API key not found");
|
|
733
|
+
return key;
|
|
734
|
+
}
|
|
735
|
+
};
|
|
595
736
|
var callGoogle = async (config, ctx) => {
|
|
596
737
|
const { model: model2, instructions, apiKey: configApiKey } = config;
|
|
597
|
-
const apiKey = configApiKey
|
|
598
|
-
if (!apiKey) {
|
|
599
|
-
throw new Error("Google API key not found");
|
|
600
|
-
}
|
|
738
|
+
const apiKey = getApiKey4(configApiKey);
|
|
601
739
|
const contents = [];
|
|
602
740
|
if (instructions) {
|
|
603
741
|
contents.push({
|
|
@@ -743,6 +881,147 @@ var callHuggingFace = async (config, ctx) => {
|
|
|
743
881
|
);
|
|
744
882
|
};
|
|
745
883
|
|
|
884
|
+
// src/providers/xai.ts
|
|
885
|
+
var appendToolCalls2 = (toolCalls, tcchunklist) => {
|
|
886
|
+
for (const tcchunk of tcchunklist) {
|
|
887
|
+
while (toolCalls.length <= tcchunk.index) {
|
|
888
|
+
toolCalls.push({
|
|
889
|
+
id: "",
|
|
890
|
+
type: "function",
|
|
891
|
+
function: { name: "", arguments: "" }
|
|
892
|
+
});
|
|
893
|
+
}
|
|
894
|
+
const tc = toolCalls[tcchunk.index];
|
|
895
|
+
tc.id += tcchunk.id || "";
|
|
896
|
+
tc.function.name += tcchunk.function?.name || "";
|
|
897
|
+
tc.function.arguments += tcchunk.function?.arguments || "";
|
|
898
|
+
}
|
|
899
|
+
return toolCalls;
|
|
900
|
+
};
|
|
901
|
+
var getApiKey5 = (configApiKey) => {
|
|
902
|
+
if (configApiKey) return configApiKey;
|
|
903
|
+
try {
|
|
904
|
+
return getKey("xai");
|
|
905
|
+
} catch {
|
|
906
|
+
const key = process.env.XAI_API_KEY || "";
|
|
907
|
+
if (!key) throw new Error("xAI API key not found");
|
|
908
|
+
return key;
|
|
909
|
+
}
|
|
910
|
+
};
|
|
911
|
+
var callXAI = async (config, ctx) => {
|
|
912
|
+
const { model: model2, instructions, schema, apiKey: configApiKey } = config;
|
|
913
|
+
const apiKey = getApiKey5(configApiKey);
|
|
914
|
+
const messages = [];
|
|
915
|
+
if (instructions) {
|
|
916
|
+
messages.push({ role: "system", content: instructions });
|
|
917
|
+
}
|
|
918
|
+
messages.push(...ctx.history);
|
|
919
|
+
const body = {
|
|
920
|
+
model: model2,
|
|
921
|
+
messages,
|
|
922
|
+
stream: !!ctx.stream
|
|
923
|
+
};
|
|
924
|
+
if (schema) {
|
|
925
|
+
body.response_format = {
|
|
926
|
+
type: "json_schema",
|
|
927
|
+
json_schema: {
|
|
928
|
+
name: schema.name,
|
|
929
|
+
schema: { ...schema.schema, additionalProperties: false },
|
|
930
|
+
strict: true
|
|
931
|
+
}
|
|
932
|
+
};
|
|
933
|
+
}
|
|
934
|
+
if (ctx.tools && ctx.tools.length > 0) {
|
|
935
|
+
body.tools = ctx.tools;
|
|
936
|
+
body.tool_choice = "auto";
|
|
937
|
+
}
|
|
938
|
+
const response = await fetch("https://api.x.ai/v1/chat/completions", {
|
|
939
|
+
method: "POST",
|
|
940
|
+
headers: {
|
|
941
|
+
"Content-Type": "application/json",
|
|
942
|
+
Authorization: `Bearer ${apiKey}`
|
|
943
|
+
},
|
|
944
|
+
body: JSON.stringify(body),
|
|
945
|
+
signal: ctx.abortSignal
|
|
946
|
+
});
|
|
947
|
+
if (!response.ok) {
|
|
948
|
+
const error = await response.text();
|
|
949
|
+
throw new Error(`xAI API error: ${error}`);
|
|
950
|
+
}
|
|
951
|
+
if (ctx.stream) {
|
|
952
|
+
return handleXAIStream(response, ctx);
|
|
953
|
+
}
|
|
954
|
+
const data = await response.json();
|
|
955
|
+
const choice = data.choices[0];
|
|
956
|
+
const { message } = choice;
|
|
957
|
+
const msg = {
|
|
958
|
+
role: "assistant",
|
|
959
|
+
content: message.content || ""
|
|
960
|
+
};
|
|
961
|
+
if (message.tool_calls) {
|
|
962
|
+
msg.tool_calls = message.tool_calls;
|
|
963
|
+
}
|
|
964
|
+
return {
|
|
965
|
+
...ctx,
|
|
966
|
+
lastResponse: msg,
|
|
967
|
+
history: [...ctx.history, msg]
|
|
968
|
+
};
|
|
969
|
+
};
|
|
970
|
+
var handleXAIStream = async (response, ctx) => {
|
|
971
|
+
const reader = response.body.getReader();
|
|
972
|
+
const decoder = new TextDecoder();
|
|
973
|
+
let fullContent = "";
|
|
974
|
+
let toolCalls = [];
|
|
975
|
+
let buffer = "";
|
|
976
|
+
try {
|
|
977
|
+
while (true) {
|
|
978
|
+
if (ctx.abortSignal?.aborted) {
|
|
979
|
+
break;
|
|
980
|
+
}
|
|
981
|
+
const { done, value } = await reader.read();
|
|
982
|
+
if (done) break;
|
|
983
|
+
buffer += decoder.decode(value, { stream: true });
|
|
984
|
+
const lines = buffer.split("\n");
|
|
985
|
+
buffer = lines.pop() || "";
|
|
986
|
+
for (const line of lines) {
|
|
987
|
+
if (line.startsWith("data: ")) {
|
|
988
|
+
const data = line.slice(6).trim();
|
|
989
|
+
if (data === "[DONE]") continue;
|
|
990
|
+
if (!data) continue;
|
|
991
|
+
try {
|
|
992
|
+
const parsed = JSON.parse(data);
|
|
993
|
+
const delta = parsed.choices?.[0]?.delta;
|
|
994
|
+
if (delta?.content) {
|
|
995
|
+
fullContent += delta.content;
|
|
996
|
+
if (ctx.stream) {
|
|
997
|
+
ctx.stream({ type: "content", content: delta.content });
|
|
998
|
+
}
|
|
999
|
+
}
|
|
1000
|
+
if (delta?.tool_calls) {
|
|
1001
|
+
toolCalls = appendToolCalls2(toolCalls, delta.tool_calls);
|
|
1002
|
+
}
|
|
1003
|
+
} catch (e) {
|
|
1004
|
+
}
|
|
1005
|
+
}
|
|
1006
|
+
}
|
|
1007
|
+
}
|
|
1008
|
+
} finally {
|
|
1009
|
+
reader.releaseLock();
|
|
1010
|
+
}
|
|
1011
|
+
const msg = {
|
|
1012
|
+
role: "assistant",
|
|
1013
|
+
content: fullContent
|
|
1014
|
+
};
|
|
1015
|
+
if (toolCalls.length > 0) {
|
|
1016
|
+
msg.tool_calls = toolCalls;
|
|
1017
|
+
}
|
|
1018
|
+
return {
|
|
1019
|
+
...ctx,
|
|
1020
|
+
lastResponse: msg,
|
|
1021
|
+
history: [...ctx.history, msg]
|
|
1022
|
+
};
|
|
1023
|
+
};
|
|
1024
|
+
|
|
746
1025
|
// src/providers/index.ts
|
|
747
1026
|
var callProvider = async (config, ctx) => {
|
|
748
1027
|
const { provider, model: model2 } = parseModelName(config.model);
|
|
@@ -754,6 +1033,8 @@ var callProvider = async (config, ctx) => {
|
|
|
754
1033
|
return callAnthropic(providerConfig, ctx);
|
|
755
1034
|
case "google":
|
|
756
1035
|
return callGoogle(providerConfig, ctx);
|
|
1036
|
+
case "xai":
|
|
1037
|
+
return callXAI(providerConfig, ctx);
|
|
757
1038
|
case "huggingface":
|
|
758
1039
|
default:
|
|
759
1040
|
return callHuggingFace(providerConfig, ctx);
|
|
@@ -1365,6 +1646,7 @@ var rateLimited = (config) => (fn) => {
|
|
|
1365
1646
|
everyNMessages,
|
|
1366
1647
|
everyNTokens,
|
|
1367
1648
|
generateApprovalToken,
|
|
1649
|
+
generateImage,
|
|
1368
1650
|
getKey,
|
|
1369
1651
|
getOrCreateThread,
|
|
1370
1652
|
isStandardSchema,
|