@threaded/ai 1.0.8 → 1.0.10
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.cjs +190 -17
- package/dist/index.cjs.map +1 -1
- package/dist/index.js +190 -17
- package/dist/index.js.map +1 -1
- package/package.json +1 -1
package/dist/index.js
CHANGED
|
@@ -12,14 +12,23 @@ var convertStandardSchemaToJsonSchema = (standardSchema, name = "Schema") => {
|
|
|
12
12
|
};
|
|
13
13
|
var convertMCPSchemaToToolSchema = (mcpSchema) => {
|
|
14
14
|
if (!mcpSchema?.properties) return {};
|
|
15
|
+
const convertProperty = (prop) => ({
|
|
16
|
+
type: prop.type || "string",
|
|
17
|
+
description: prop.description || "",
|
|
18
|
+
...prop.enum && { enum: prop.enum },
|
|
19
|
+
...prop.items && { items: convertProperty(prop.items) },
|
|
20
|
+
...prop.properties && {
|
|
21
|
+
properties: Object.fromEntries(
|
|
22
|
+
Object.entries(prop.properties).map(([k, v]) => [k, convertProperty(v)])
|
|
23
|
+
)
|
|
24
|
+
}
|
|
25
|
+
});
|
|
15
26
|
const result = {};
|
|
16
27
|
for (const [key, value] of Object.entries(mcpSchema.properties)) {
|
|
17
28
|
const prop = value;
|
|
18
29
|
result[key] = {
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
optional: !mcpSchema.required?.includes(key),
|
|
22
|
-
...prop.enum && { enum: prop.enum }
|
|
30
|
+
...convertProperty(prop),
|
|
31
|
+
optional: !mcpSchema.required?.includes(key)
|
|
23
32
|
};
|
|
24
33
|
}
|
|
25
34
|
return result;
|
|
@@ -307,6 +316,16 @@ var generateImage = async (model2, prompt, config) => {
|
|
|
307
316
|
};
|
|
308
317
|
|
|
309
318
|
// src/providers/openai.ts
|
|
319
|
+
var getApiKey2 = (configApiKey) => {
|
|
320
|
+
if (configApiKey) return configApiKey;
|
|
321
|
+
try {
|
|
322
|
+
return getKey("openai");
|
|
323
|
+
} catch {
|
|
324
|
+
const key = process.env.OPENAI_API_KEY || "";
|
|
325
|
+
if (!key) throw new Error("OpenAI API key not found");
|
|
326
|
+
return key;
|
|
327
|
+
}
|
|
328
|
+
};
|
|
310
329
|
var appendToolCalls = (toolCalls, tcchunklist) => {
|
|
311
330
|
for (const tcchunk of tcchunklist) {
|
|
312
331
|
while (toolCalls.length <= tcchunk.index) {
|
|
@@ -325,10 +344,7 @@ var appendToolCalls = (toolCalls, tcchunklist) => {
|
|
|
325
344
|
};
|
|
326
345
|
var callOpenAI = async (config, ctx) => {
|
|
327
346
|
const { model: model2, instructions, schema, apiKey: configApiKey } = config;
|
|
328
|
-
const apiKey = configApiKey
|
|
329
|
-
if (!apiKey) {
|
|
330
|
-
throw new Error("OpenAI API key not found");
|
|
331
|
-
}
|
|
347
|
+
const apiKey = getApiKey2(configApiKey);
|
|
332
348
|
const messages = [];
|
|
333
349
|
if (instructions) {
|
|
334
350
|
messages.push({ role: "system", content: instructions });
|
|
@@ -441,6 +457,16 @@ var handleOpenAIStream = async (response, ctx) => {
|
|
|
441
457
|
};
|
|
442
458
|
|
|
443
459
|
// src/providers/anthropic.ts
|
|
460
|
+
var getApiKey3 = (configApiKey) => {
|
|
461
|
+
if (configApiKey) return configApiKey;
|
|
462
|
+
try {
|
|
463
|
+
return getKey("anthropic");
|
|
464
|
+
} catch {
|
|
465
|
+
const key = process.env.ANTHROPIC_API_KEY || "";
|
|
466
|
+
if (!key) throw new Error("Anthropic API key not found");
|
|
467
|
+
return key;
|
|
468
|
+
}
|
|
469
|
+
};
|
|
444
470
|
var convertToAnthropicFormat = (messages) => {
|
|
445
471
|
const result = [];
|
|
446
472
|
let i = 0;
|
|
@@ -492,10 +518,7 @@ var convertToAnthropicFormat = (messages) => {
|
|
|
492
518
|
};
|
|
493
519
|
var callAnthropic = async (config, ctx) => {
|
|
494
520
|
const { model: model2, instructions, schema, apiKey: configApiKey } = config;
|
|
495
|
-
const apiKey = configApiKey
|
|
496
|
-
if (!apiKey) {
|
|
497
|
-
throw new Error("Anthropic API key not found");
|
|
498
|
-
}
|
|
521
|
+
const apiKey = getApiKey3(configApiKey);
|
|
499
522
|
let system = instructions;
|
|
500
523
|
if (ctx.history[0]?.role === "system") {
|
|
501
524
|
system = ctx.history[0].content;
|
|
@@ -640,12 +663,19 @@ var handleAnthropicStream = async (response, ctx) => {
|
|
|
640
663
|
};
|
|
641
664
|
|
|
642
665
|
// src/providers/google.ts
|
|
666
|
+
var getApiKey4 = (configApiKey) => {
|
|
667
|
+
if (configApiKey) return configApiKey;
|
|
668
|
+
try {
|
|
669
|
+
return getKey("google");
|
|
670
|
+
} catch {
|
|
671
|
+
const key = process.env.GEMINI_API_KEY || process.env.GOOGLE_AI_API_KEY || "";
|
|
672
|
+
if (!key) throw new Error("Google API key not found");
|
|
673
|
+
return key;
|
|
674
|
+
}
|
|
675
|
+
};
|
|
643
676
|
var callGoogle = async (config, ctx) => {
|
|
644
677
|
const { model: model2, instructions, apiKey: configApiKey } = config;
|
|
645
|
-
const apiKey = configApiKey
|
|
646
|
-
if (!apiKey) {
|
|
647
|
-
throw new Error("Google API key not found");
|
|
648
|
-
}
|
|
678
|
+
const apiKey = getApiKey4(configApiKey);
|
|
649
679
|
const contents = [];
|
|
650
680
|
if (instructions) {
|
|
651
681
|
contents.push({
|
|
@@ -680,7 +710,7 @@ var callGoogle = async (config, ctx) => {
|
|
|
680
710
|
}
|
|
681
711
|
const endpoint = ctx.stream ? "streamGenerateContent" : "generateContent";
|
|
682
712
|
const response = await fetch(
|
|
683
|
-
`https://generativelanguage.googleapis.com/v1beta/models/${model2}:${endpoint}?key=${apiKey}`,
|
|
713
|
+
`https://generativelanguage.googleapis.com/v1beta/models/${model2}:${endpoint}?key=${apiKey}${ctx.stream ? "&alt=sse" : ""}`,
|
|
684
714
|
{
|
|
685
715
|
method: "POST",
|
|
686
716
|
headers: {
|
|
@@ -791,6 +821,147 @@ var callHuggingFace = async (config, ctx) => {
|
|
|
791
821
|
);
|
|
792
822
|
};
|
|
793
823
|
|
|
824
|
+
// src/providers/xai.ts
|
|
825
|
+
var appendToolCalls2 = (toolCalls, tcchunklist) => {
|
|
826
|
+
for (const tcchunk of tcchunklist) {
|
|
827
|
+
while (toolCalls.length <= tcchunk.index) {
|
|
828
|
+
toolCalls.push({
|
|
829
|
+
id: "",
|
|
830
|
+
type: "function",
|
|
831
|
+
function: { name: "", arguments: "" }
|
|
832
|
+
});
|
|
833
|
+
}
|
|
834
|
+
const tc = toolCalls[tcchunk.index];
|
|
835
|
+
tc.id += tcchunk.id || "";
|
|
836
|
+
tc.function.name += tcchunk.function?.name || "";
|
|
837
|
+
tc.function.arguments += tcchunk.function?.arguments || "";
|
|
838
|
+
}
|
|
839
|
+
return toolCalls;
|
|
840
|
+
};
|
|
841
|
+
var getApiKey5 = (configApiKey) => {
|
|
842
|
+
if (configApiKey) return configApiKey;
|
|
843
|
+
try {
|
|
844
|
+
return getKey("xai");
|
|
845
|
+
} catch {
|
|
846
|
+
const key = process.env.XAI_API_KEY || "";
|
|
847
|
+
if (!key) throw new Error("xAI API key not found");
|
|
848
|
+
return key;
|
|
849
|
+
}
|
|
850
|
+
};
|
|
851
|
+
var callXAI = async (config, ctx) => {
|
|
852
|
+
const { model: model2, instructions, schema, apiKey: configApiKey } = config;
|
|
853
|
+
const apiKey = getApiKey5(configApiKey);
|
|
854
|
+
const messages = [];
|
|
855
|
+
if (instructions) {
|
|
856
|
+
messages.push({ role: "system", content: instructions });
|
|
857
|
+
}
|
|
858
|
+
messages.push(...ctx.history);
|
|
859
|
+
const body = {
|
|
860
|
+
model: model2,
|
|
861
|
+
messages,
|
|
862
|
+
stream: !!ctx.stream
|
|
863
|
+
};
|
|
864
|
+
if (schema) {
|
|
865
|
+
body.response_format = {
|
|
866
|
+
type: "json_schema",
|
|
867
|
+
json_schema: {
|
|
868
|
+
name: schema.name,
|
|
869
|
+
schema: { ...schema.schema, additionalProperties: false },
|
|
870
|
+
strict: true
|
|
871
|
+
}
|
|
872
|
+
};
|
|
873
|
+
}
|
|
874
|
+
if (ctx.tools && ctx.tools.length > 0) {
|
|
875
|
+
body.tools = ctx.tools;
|
|
876
|
+
body.tool_choice = "auto";
|
|
877
|
+
}
|
|
878
|
+
const response = await fetch("https://api.x.ai/v1/chat/completions", {
|
|
879
|
+
method: "POST",
|
|
880
|
+
headers: {
|
|
881
|
+
"Content-Type": "application/json",
|
|
882
|
+
Authorization: `Bearer ${apiKey}`
|
|
883
|
+
},
|
|
884
|
+
body: JSON.stringify(body),
|
|
885
|
+
signal: ctx.abortSignal
|
|
886
|
+
});
|
|
887
|
+
if (!response.ok) {
|
|
888
|
+
const error = await response.text();
|
|
889
|
+
throw new Error(`xAI API error: ${error}`);
|
|
890
|
+
}
|
|
891
|
+
if (ctx.stream) {
|
|
892
|
+
return handleXAIStream(response, ctx);
|
|
893
|
+
}
|
|
894
|
+
const data = await response.json();
|
|
895
|
+
const choice = data.choices[0];
|
|
896
|
+
const { message } = choice;
|
|
897
|
+
const msg = {
|
|
898
|
+
role: "assistant",
|
|
899
|
+
content: message.content || ""
|
|
900
|
+
};
|
|
901
|
+
if (message.tool_calls) {
|
|
902
|
+
msg.tool_calls = message.tool_calls;
|
|
903
|
+
}
|
|
904
|
+
return {
|
|
905
|
+
...ctx,
|
|
906
|
+
lastResponse: msg,
|
|
907
|
+
history: [...ctx.history, msg]
|
|
908
|
+
};
|
|
909
|
+
};
|
|
910
|
+
var handleXAIStream = async (response, ctx) => {
|
|
911
|
+
const reader = response.body.getReader();
|
|
912
|
+
const decoder = new TextDecoder();
|
|
913
|
+
let fullContent = "";
|
|
914
|
+
let toolCalls = [];
|
|
915
|
+
let buffer = "";
|
|
916
|
+
try {
|
|
917
|
+
while (true) {
|
|
918
|
+
if (ctx.abortSignal?.aborted) {
|
|
919
|
+
break;
|
|
920
|
+
}
|
|
921
|
+
const { done, value } = await reader.read();
|
|
922
|
+
if (done) break;
|
|
923
|
+
buffer += decoder.decode(value, { stream: true });
|
|
924
|
+
const lines = buffer.split("\n");
|
|
925
|
+
buffer = lines.pop() || "";
|
|
926
|
+
for (const line of lines) {
|
|
927
|
+
if (line.startsWith("data: ")) {
|
|
928
|
+
const data = line.slice(6).trim();
|
|
929
|
+
if (data === "[DONE]") continue;
|
|
930
|
+
if (!data) continue;
|
|
931
|
+
try {
|
|
932
|
+
const parsed = JSON.parse(data);
|
|
933
|
+
const delta = parsed.choices?.[0]?.delta;
|
|
934
|
+
if (delta?.content) {
|
|
935
|
+
fullContent += delta.content;
|
|
936
|
+
if (ctx.stream) {
|
|
937
|
+
ctx.stream({ type: "content", content: delta.content });
|
|
938
|
+
}
|
|
939
|
+
}
|
|
940
|
+
if (delta?.tool_calls) {
|
|
941
|
+
toolCalls = appendToolCalls2(toolCalls, delta.tool_calls);
|
|
942
|
+
}
|
|
943
|
+
} catch (e) {
|
|
944
|
+
}
|
|
945
|
+
}
|
|
946
|
+
}
|
|
947
|
+
}
|
|
948
|
+
} finally {
|
|
949
|
+
reader.releaseLock();
|
|
950
|
+
}
|
|
951
|
+
const msg = {
|
|
952
|
+
role: "assistant",
|
|
953
|
+
content: fullContent
|
|
954
|
+
};
|
|
955
|
+
if (toolCalls.length > 0) {
|
|
956
|
+
msg.tool_calls = toolCalls;
|
|
957
|
+
}
|
|
958
|
+
return {
|
|
959
|
+
...ctx,
|
|
960
|
+
lastResponse: msg,
|
|
961
|
+
history: [...ctx.history, msg]
|
|
962
|
+
};
|
|
963
|
+
};
|
|
964
|
+
|
|
794
965
|
// src/providers/index.ts
|
|
795
966
|
var callProvider = async (config, ctx) => {
|
|
796
967
|
const { provider, model: model2 } = parseModelName(config.model);
|
|
@@ -802,6 +973,8 @@ var callProvider = async (config, ctx) => {
|
|
|
802
973
|
return callAnthropic(providerConfig, ctx);
|
|
803
974
|
case "google":
|
|
804
975
|
return callGoogle(providerConfig, ctx);
|
|
976
|
+
case "xai":
|
|
977
|
+
return callXAI(providerConfig, ctx);
|
|
805
978
|
case "huggingface":
|
|
806
979
|
default:
|
|
807
980
|
return callHuggingFace(providerConfig, ctx);
|