@threaded/ai 1.0.7 → 1.0.9
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.cjs +294 -12
- package/dist/index.cjs.map +1 -1
- package/dist/index.d.cts +18 -1
- package/dist/index.d.ts +18 -1
- package/dist/index.js +293 -12
- package/dist/index.js.map +1 -1
- package/package.json +1 -1
package/dist/index.js
CHANGED
|
@@ -190,7 +190,133 @@ var embed = async (model2, text, config) => {
|
|
|
190
190
|
}
|
|
191
191
|
};
|
|
192
192
|
|
|
193
|
+
// src/image.ts
|
|
194
|
+
var providerKeyEnvVars = {
|
|
195
|
+
openai: "OPENAI_API_KEY",
|
|
196
|
+
xai: "XAI_API_KEY",
|
|
197
|
+
google: "GEMINI_API_KEY"
|
|
198
|
+
};
|
|
199
|
+
var getApiKey = (provider) => {
|
|
200
|
+
try {
|
|
201
|
+
return getKey(provider);
|
|
202
|
+
} catch {
|
|
203
|
+
const envVar = providerKeyEnvVars[provider];
|
|
204
|
+
const key = envVar ? process.env[envVar] || "" : "";
|
|
205
|
+
if (!key) throw new Error(`No API key found for provider: ${provider}`);
|
|
206
|
+
return key;
|
|
207
|
+
}
|
|
208
|
+
};
|
|
209
|
+
var generateOpenAICompatible = async (endpoint, modelName, prompt, apiKey, config) => {
|
|
210
|
+
const isGptImage = modelName.startsWith("gpt-image");
|
|
211
|
+
const body = {
|
|
212
|
+
model: modelName,
|
|
213
|
+
prompt
|
|
214
|
+
};
|
|
215
|
+
if (!isGptImage) {
|
|
216
|
+
body.response_format = config?.responseFormat || "b64_json";
|
|
217
|
+
}
|
|
218
|
+
if (config?.n) body.n = config.n;
|
|
219
|
+
if (config?.size) body.size = config.size;
|
|
220
|
+
if (config?.quality) body.quality = config.quality;
|
|
221
|
+
if (config?.style && !isGptImage) body.style = config.style;
|
|
222
|
+
if (isGptImage) {
|
|
223
|
+
if (config?.outputFormat) body.output_format = config.outputFormat;
|
|
224
|
+
if (config?.outputCompression != null) body.output_compression = config.outputCompression;
|
|
225
|
+
if (config?.background) body.background = config.background;
|
|
226
|
+
}
|
|
227
|
+
const response = await fetch(endpoint, {
|
|
228
|
+
method: "POST",
|
|
229
|
+
headers: {
|
|
230
|
+
"Content-Type": "application/json",
|
|
231
|
+
Authorization: `Bearer ${apiKey}`
|
|
232
|
+
},
|
|
233
|
+
body: JSON.stringify(body)
|
|
234
|
+
});
|
|
235
|
+
if (!response.ok) {
|
|
236
|
+
const error = await response.text();
|
|
237
|
+
throw new Error(`API error: ${error}`);
|
|
238
|
+
}
|
|
239
|
+
const data = await response.json();
|
|
240
|
+
const image = data.data[0];
|
|
241
|
+
return {
|
|
242
|
+
data: image.b64_json || image.url,
|
|
243
|
+
revisedPrompt: image.revised_prompt
|
|
244
|
+
};
|
|
245
|
+
};
|
|
246
|
+
var generateGoogle = async (modelName, prompt, apiKey, config) => {
|
|
247
|
+
const endpoint = `https://generativelanguage.googleapis.com/v1beta/models/${modelName}:generateContent`;
|
|
248
|
+
const body = {
|
|
249
|
+
contents: [{ parts: [{ text: prompt }] }],
|
|
250
|
+
generationConfig: {
|
|
251
|
+
responseModalities: ["TEXT", "IMAGE"]
|
|
252
|
+
}
|
|
253
|
+
};
|
|
254
|
+
if (config?.aspectRatio) {
|
|
255
|
+
body.generationConfig.aspectRatio = config.aspectRatio;
|
|
256
|
+
}
|
|
257
|
+
const response = await fetch(endpoint, {
|
|
258
|
+
method: "POST",
|
|
259
|
+
headers: {
|
|
260
|
+
"Content-Type": "application/json",
|
|
261
|
+
"x-goog-api-key": apiKey
|
|
262
|
+
},
|
|
263
|
+
body: JSON.stringify(body)
|
|
264
|
+
});
|
|
265
|
+
if (!response.ok) {
|
|
266
|
+
const error = await response.text();
|
|
267
|
+
throw new Error(`Google API error: ${error}`);
|
|
268
|
+
}
|
|
269
|
+
const data = await response.json();
|
|
270
|
+
const parts = data.candidates?.[0]?.content?.parts || [];
|
|
271
|
+
const imagePart = parts.find((p) => p.inlineData);
|
|
272
|
+
const textPart = parts.find((p) => p.text);
|
|
273
|
+
if (!imagePart?.inlineData?.data) {
|
|
274
|
+
throw new Error("No image data in response");
|
|
275
|
+
}
|
|
276
|
+
return {
|
|
277
|
+
data: imagePart.inlineData.data,
|
|
278
|
+
revisedPrompt: textPart?.text
|
|
279
|
+
};
|
|
280
|
+
};
|
|
281
|
+
var generateImage = async (model2, prompt, config) => {
|
|
282
|
+
const { provider, model: modelName } = parseModelName(model2);
|
|
283
|
+
const providerLower = provider.toLowerCase();
|
|
284
|
+
const apiKey = getApiKey(providerLower);
|
|
285
|
+
switch (providerLower) {
|
|
286
|
+
case "openai":
|
|
287
|
+
return generateOpenAICompatible(
|
|
288
|
+
"https://api.openai.com/v1/images/generations",
|
|
289
|
+
modelName,
|
|
290
|
+
prompt,
|
|
291
|
+
apiKey,
|
|
292
|
+
config
|
|
293
|
+
);
|
|
294
|
+
case "xai":
|
|
295
|
+
return generateOpenAICompatible(
|
|
296
|
+
"https://api.x.ai/v1/images/generations",
|
|
297
|
+
modelName,
|
|
298
|
+
prompt,
|
|
299
|
+
apiKey,
|
|
300
|
+
config
|
|
301
|
+
);
|
|
302
|
+
case "google":
|
|
303
|
+
return generateGoogle(modelName, prompt, apiKey, config);
|
|
304
|
+
default:
|
|
305
|
+
throw new Error(`Unsupported image generation provider: ${provider}`);
|
|
306
|
+
}
|
|
307
|
+
};
|
|
308
|
+
|
|
193
309
|
// src/providers/openai.ts
|
|
310
|
+
var getApiKey2 = (configApiKey) => {
|
|
311
|
+
if (configApiKey) return configApiKey;
|
|
312
|
+
try {
|
|
313
|
+
return getKey("openai");
|
|
314
|
+
} catch {
|
|
315
|
+
const key = process.env.OPENAI_API_KEY || "";
|
|
316
|
+
if (!key) throw new Error("OpenAI API key not found");
|
|
317
|
+
return key;
|
|
318
|
+
}
|
|
319
|
+
};
|
|
194
320
|
var appendToolCalls = (toolCalls, tcchunklist) => {
|
|
195
321
|
for (const tcchunk of tcchunklist) {
|
|
196
322
|
while (toolCalls.length <= tcchunk.index) {
|
|
@@ -209,10 +335,7 @@ var appendToolCalls = (toolCalls, tcchunklist) => {
|
|
|
209
335
|
};
|
|
210
336
|
var callOpenAI = async (config, ctx) => {
|
|
211
337
|
const { model: model2, instructions, schema, apiKey: configApiKey } = config;
|
|
212
|
-
const apiKey = configApiKey
|
|
213
|
-
if (!apiKey) {
|
|
214
|
-
throw new Error("OpenAI API key not found");
|
|
215
|
-
}
|
|
338
|
+
const apiKey = getApiKey2(configApiKey);
|
|
216
339
|
const messages = [];
|
|
217
340
|
if (instructions) {
|
|
218
341
|
messages.push({ role: "system", content: instructions });
|
|
@@ -325,6 +448,16 @@ var handleOpenAIStream = async (response, ctx) => {
|
|
|
325
448
|
};
|
|
326
449
|
|
|
327
450
|
// src/providers/anthropic.ts
|
|
451
|
+
var getApiKey3 = (configApiKey) => {
|
|
452
|
+
if (configApiKey) return configApiKey;
|
|
453
|
+
try {
|
|
454
|
+
return getKey("anthropic");
|
|
455
|
+
} catch {
|
|
456
|
+
const key = process.env.ANTHROPIC_API_KEY || "";
|
|
457
|
+
if (!key) throw new Error("Anthropic API key not found");
|
|
458
|
+
return key;
|
|
459
|
+
}
|
|
460
|
+
};
|
|
328
461
|
var convertToAnthropicFormat = (messages) => {
|
|
329
462
|
const result = [];
|
|
330
463
|
let i = 0;
|
|
@@ -376,10 +509,7 @@ var convertToAnthropicFormat = (messages) => {
|
|
|
376
509
|
};
|
|
377
510
|
var callAnthropic = async (config, ctx) => {
|
|
378
511
|
const { model: model2, instructions, schema, apiKey: configApiKey } = config;
|
|
379
|
-
const apiKey = configApiKey
|
|
380
|
-
if (!apiKey) {
|
|
381
|
-
throw new Error("Anthropic API key not found");
|
|
382
|
-
}
|
|
512
|
+
const apiKey = getApiKey3(configApiKey);
|
|
383
513
|
let system = instructions;
|
|
384
514
|
if (ctx.history[0]?.role === "system") {
|
|
385
515
|
system = ctx.history[0].content;
|
|
@@ -524,12 +654,19 @@ var handleAnthropicStream = async (response, ctx) => {
|
|
|
524
654
|
};
|
|
525
655
|
|
|
526
656
|
// src/providers/google.ts
|
|
657
|
+
var getApiKey4 = (configApiKey) => {
|
|
658
|
+
if (configApiKey) return configApiKey;
|
|
659
|
+
try {
|
|
660
|
+
return getKey("google");
|
|
661
|
+
} catch {
|
|
662
|
+
const key = process.env.GEMINI_API_KEY || process.env.GOOGLE_AI_API_KEY || "";
|
|
663
|
+
if (!key) throw new Error("Google API key not found");
|
|
664
|
+
return key;
|
|
665
|
+
}
|
|
666
|
+
};
|
|
527
667
|
var callGoogle = async (config, ctx) => {
|
|
528
668
|
const { model: model2, instructions, apiKey: configApiKey } = config;
|
|
529
|
-
const apiKey = configApiKey
|
|
530
|
-
if (!apiKey) {
|
|
531
|
-
throw new Error("Google API key not found");
|
|
532
|
-
}
|
|
669
|
+
const apiKey = getApiKey4(configApiKey);
|
|
533
670
|
const contents = [];
|
|
534
671
|
if (instructions) {
|
|
535
672
|
contents.push({
|
|
@@ -675,6 +812,147 @@ var callHuggingFace = async (config, ctx) => {
|
|
|
675
812
|
);
|
|
676
813
|
};
|
|
677
814
|
|
|
815
|
+
// src/providers/xai.ts
|
|
816
|
+
var appendToolCalls2 = (toolCalls, tcchunklist) => {
|
|
817
|
+
for (const tcchunk of tcchunklist) {
|
|
818
|
+
while (toolCalls.length <= tcchunk.index) {
|
|
819
|
+
toolCalls.push({
|
|
820
|
+
id: "",
|
|
821
|
+
type: "function",
|
|
822
|
+
function: { name: "", arguments: "" }
|
|
823
|
+
});
|
|
824
|
+
}
|
|
825
|
+
const tc = toolCalls[tcchunk.index];
|
|
826
|
+
tc.id += tcchunk.id || "";
|
|
827
|
+
tc.function.name += tcchunk.function?.name || "";
|
|
828
|
+
tc.function.arguments += tcchunk.function?.arguments || "";
|
|
829
|
+
}
|
|
830
|
+
return toolCalls;
|
|
831
|
+
};
|
|
832
|
+
var getApiKey5 = (configApiKey) => {
|
|
833
|
+
if (configApiKey) return configApiKey;
|
|
834
|
+
try {
|
|
835
|
+
return getKey("xai");
|
|
836
|
+
} catch {
|
|
837
|
+
const key = process.env.XAI_API_KEY || "";
|
|
838
|
+
if (!key) throw new Error("xAI API key not found");
|
|
839
|
+
return key;
|
|
840
|
+
}
|
|
841
|
+
};
|
|
842
|
+
var callXAI = async (config, ctx) => {
|
|
843
|
+
const { model: model2, instructions, schema, apiKey: configApiKey } = config;
|
|
844
|
+
const apiKey = getApiKey5(configApiKey);
|
|
845
|
+
const messages = [];
|
|
846
|
+
if (instructions) {
|
|
847
|
+
messages.push({ role: "system", content: instructions });
|
|
848
|
+
}
|
|
849
|
+
messages.push(...ctx.history);
|
|
850
|
+
const body = {
|
|
851
|
+
model: model2,
|
|
852
|
+
messages,
|
|
853
|
+
stream: !!ctx.stream
|
|
854
|
+
};
|
|
855
|
+
if (schema) {
|
|
856
|
+
body.response_format = {
|
|
857
|
+
type: "json_schema",
|
|
858
|
+
json_schema: {
|
|
859
|
+
name: schema.name,
|
|
860
|
+
schema: { ...schema.schema, additionalProperties: false },
|
|
861
|
+
strict: true
|
|
862
|
+
}
|
|
863
|
+
};
|
|
864
|
+
}
|
|
865
|
+
if (ctx.tools && ctx.tools.length > 0) {
|
|
866
|
+
body.tools = ctx.tools;
|
|
867
|
+
body.tool_choice = "auto";
|
|
868
|
+
}
|
|
869
|
+
const response = await fetch("https://api.x.ai/v1/chat/completions", {
|
|
870
|
+
method: "POST",
|
|
871
|
+
headers: {
|
|
872
|
+
"Content-Type": "application/json",
|
|
873
|
+
Authorization: `Bearer ${apiKey}`
|
|
874
|
+
},
|
|
875
|
+
body: JSON.stringify(body),
|
|
876
|
+
signal: ctx.abortSignal
|
|
877
|
+
});
|
|
878
|
+
if (!response.ok) {
|
|
879
|
+
const error = await response.text();
|
|
880
|
+
throw new Error(`xAI API error: ${error}`);
|
|
881
|
+
}
|
|
882
|
+
if (ctx.stream) {
|
|
883
|
+
return handleXAIStream(response, ctx);
|
|
884
|
+
}
|
|
885
|
+
const data = await response.json();
|
|
886
|
+
const choice = data.choices[0];
|
|
887
|
+
const { message } = choice;
|
|
888
|
+
const msg = {
|
|
889
|
+
role: "assistant",
|
|
890
|
+
content: message.content || ""
|
|
891
|
+
};
|
|
892
|
+
if (message.tool_calls) {
|
|
893
|
+
msg.tool_calls = message.tool_calls;
|
|
894
|
+
}
|
|
895
|
+
return {
|
|
896
|
+
...ctx,
|
|
897
|
+
lastResponse: msg,
|
|
898
|
+
history: [...ctx.history, msg]
|
|
899
|
+
};
|
|
900
|
+
};
|
|
901
|
+
var handleXAIStream = async (response, ctx) => {
|
|
902
|
+
const reader = response.body.getReader();
|
|
903
|
+
const decoder = new TextDecoder();
|
|
904
|
+
let fullContent = "";
|
|
905
|
+
let toolCalls = [];
|
|
906
|
+
let buffer = "";
|
|
907
|
+
try {
|
|
908
|
+
while (true) {
|
|
909
|
+
if (ctx.abortSignal?.aborted) {
|
|
910
|
+
break;
|
|
911
|
+
}
|
|
912
|
+
const { done, value } = await reader.read();
|
|
913
|
+
if (done) break;
|
|
914
|
+
buffer += decoder.decode(value, { stream: true });
|
|
915
|
+
const lines = buffer.split("\n");
|
|
916
|
+
buffer = lines.pop() || "";
|
|
917
|
+
for (const line of lines) {
|
|
918
|
+
if (line.startsWith("data: ")) {
|
|
919
|
+
const data = line.slice(6).trim();
|
|
920
|
+
if (data === "[DONE]") continue;
|
|
921
|
+
if (!data) continue;
|
|
922
|
+
try {
|
|
923
|
+
const parsed = JSON.parse(data);
|
|
924
|
+
const delta = parsed.choices?.[0]?.delta;
|
|
925
|
+
if (delta?.content) {
|
|
926
|
+
fullContent += delta.content;
|
|
927
|
+
if (ctx.stream) {
|
|
928
|
+
ctx.stream({ type: "content", content: delta.content });
|
|
929
|
+
}
|
|
930
|
+
}
|
|
931
|
+
if (delta?.tool_calls) {
|
|
932
|
+
toolCalls = appendToolCalls2(toolCalls, delta.tool_calls);
|
|
933
|
+
}
|
|
934
|
+
} catch (e) {
|
|
935
|
+
}
|
|
936
|
+
}
|
|
937
|
+
}
|
|
938
|
+
}
|
|
939
|
+
} finally {
|
|
940
|
+
reader.releaseLock();
|
|
941
|
+
}
|
|
942
|
+
const msg = {
|
|
943
|
+
role: "assistant",
|
|
944
|
+
content: fullContent
|
|
945
|
+
};
|
|
946
|
+
if (toolCalls.length > 0) {
|
|
947
|
+
msg.tool_calls = toolCalls;
|
|
948
|
+
}
|
|
949
|
+
return {
|
|
950
|
+
...ctx,
|
|
951
|
+
lastResponse: msg,
|
|
952
|
+
history: [...ctx.history, msg]
|
|
953
|
+
};
|
|
954
|
+
};
|
|
955
|
+
|
|
678
956
|
// src/providers/index.ts
|
|
679
957
|
var callProvider = async (config, ctx) => {
|
|
680
958
|
const { provider, model: model2 } = parseModelName(config.model);
|
|
@@ -686,6 +964,8 @@ var callProvider = async (config, ctx) => {
|
|
|
686
964
|
return callAnthropic(providerConfig, ctx);
|
|
687
965
|
case "google":
|
|
688
966
|
return callGoogle(providerConfig, ctx);
|
|
967
|
+
case "xai":
|
|
968
|
+
return callXAI(providerConfig, ctx);
|
|
689
969
|
case "huggingface":
|
|
690
970
|
default:
|
|
691
971
|
return callHuggingFace(providerConfig, ctx);
|
|
@@ -1296,6 +1576,7 @@ export {
|
|
|
1296
1576
|
everyNMessages,
|
|
1297
1577
|
everyNTokens,
|
|
1298
1578
|
generateApprovalToken,
|
|
1579
|
+
generateImage,
|
|
1299
1580
|
getKey,
|
|
1300
1581
|
getOrCreateThread,
|
|
1301
1582
|
isStandardSchema,
|