@threaded/ai 1.0.8 → 1.0.10
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.cjs +190 -17
- package/dist/index.cjs.map +1 -1
- package/dist/index.js +190 -17
- package/dist/index.js.map +1 -1
- package/package.json +1 -1
package/dist/index.cjs
CHANGED
|
@@ -81,14 +81,23 @@ var convertStandardSchemaToJsonSchema = (standardSchema, name = "Schema") => {
|
|
|
81
81
|
};
|
|
82
82
|
var convertMCPSchemaToToolSchema = (mcpSchema) => {
|
|
83
83
|
if (!mcpSchema?.properties) return {};
|
|
84
|
+
const convertProperty = (prop) => ({
|
|
85
|
+
type: prop.type || "string",
|
|
86
|
+
description: prop.description || "",
|
|
87
|
+
...prop.enum && { enum: prop.enum },
|
|
88
|
+
...prop.items && { items: convertProperty(prop.items) },
|
|
89
|
+
...prop.properties && {
|
|
90
|
+
properties: Object.fromEntries(
|
|
91
|
+
Object.entries(prop.properties).map(([k, v]) => [k, convertProperty(v)])
|
|
92
|
+
)
|
|
93
|
+
}
|
|
94
|
+
});
|
|
84
95
|
const result = {};
|
|
85
96
|
for (const [key, value] of Object.entries(mcpSchema.properties)) {
|
|
86
97
|
const prop = value;
|
|
87
98
|
result[key] = {
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
optional: !mcpSchema.required?.includes(key),
|
|
91
|
-
...prop.enum && { enum: prop.enum }
|
|
99
|
+
...convertProperty(prop),
|
|
100
|
+
optional: !mcpSchema.required?.includes(key)
|
|
92
101
|
};
|
|
93
102
|
}
|
|
94
103
|
return result;
|
|
@@ -376,6 +385,16 @@ var generateImage = async (model2, prompt, config) => {
|
|
|
376
385
|
};
|
|
377
386
|
|
|
378
387
|
// src/providers/openai.ts
|
|
388
|
+
var getApiKey2 = (configApiKey) => {
|
|
389
|
+
if (configApiKey) return configApiKey;
|
|
390
|
+
try {
|
|
391
|
+
return getKey("openai");
|
|
392
|
+
} catch {
|
|
393
|
+
const key = process.env.OPENAI_API_KEY || "";
|
|
394
|
+
if (!key) throw new Error("OpenAI API key not found");
|
|
395
|
+
return key;
|
|
396
|
+
}
|
|
397
|
+
};
|
|
379
398
|
var appendToolCalls = (toolCalls, tcchunklist) => {
|
|
380
399
|
for (const tcchunk of tcchunklist) {
|
|
381
400
|
while (toolCalls.length <= tcchunk.index) {
|
|
@@ -394,10 +413,7 @@ var appendToolCalls = (toolCalls, tcchunklist) => {
|
|
|
394
413
|
};
|
|
395
414
|
var callOpenAI = async (config, ctx) => {
|
|
396
415
|
const { model: model2, instructions, schema, apiKey: configApiKey } = config;
|
|
397
|
-
const apiKey = configApiKey
|
|
398
|
-
if (!apiKey) {
|
|
399
|
-
throw new Error("OpenAI API key not found");
|
|
400
|
-
}
|
|
416
|
+
const apiKey = getApiKey2(configApiKey);
|
|
401
417
|
const messages = [];
|
|
402
418
|
if (instructions) {
|
|
403
419
|
messages.push({ role: "system", content: instructions });
|
|
@@ -510,6 +526,16 @@ var handleOpenAIStream = async (response, ctx) => {
|
|
|
510
526
|
};
|
|
511
527
|
|
|
512
528
|
// src/providers/anthropic.ts
|
|
529
|
+
var getApiKey3 = (configApiKey) => {
|
|
530
|
+
if (configApiKey) return configApiKey;
|
|
531
|
+
try {
|
|
532
|
+
return getKey("anthropic");
|
|
533
|
+
} catch {
|
|
534
|
+
const key = process.env.ANTHROPIC_API_KEY || "";
|
|
535
|
+
if (!key) throw new Error("Anthropic API key not found");
|
|
536
|
+
return key;
|
|
537
|
+
}
|
|
538
|
+
};
|
|
513
539
|
var convertToAnthropicFormat = (messages) => {
|
|
514
540
|
const result = [];
|
|
515
541
|
let i = 0;
|
|
@@ -561,10 +587,7 @@ var convertToAnthropicFormat = (messages) => {
|
|
|
561
587
|
};
|
|
562
588
|
var callAnthropic = async (config, ctx) => {
|
|
563
589
|
const { model: model2, instructions, schema, apiKey: configApiKey } = config;
|
|
564
|
-
const apiKey = configApiKey
|
|
565
|
-
if (!apiKey) {
|
|
566
|
-
throw new Error("Anthropic API key not found");
|
|
567
|
-
}
|
|
590
|
+
const apiKey = getApiKey3(configApiKey);
|
|
568
591
|
let system = instructions;
|
|
569
592
|
if (ctx.history[0]?.role === "system") {
|
|
570
593
|
system = ctx.history[0].content;
|
|
@@ -709,12 +732,19 @@ var handleAnthropicStream = async (response, ctx) => {
|
|
|
709
732
|
};
|
|
710
733
|
|
|
711
734
|
// src/providers/google.ts
|
|
735
|
+
var getApiKey4 = (configApiKey) => {
|
|
736
|
+
if (configApiKey) return configApiKey;
|
|
737
|
+
try {
|
|
738
|
+
return getKey("google");
|
|
739
|
+
} catch {
|
|
740
|
+
const key = process.env.GEMINI_API_KEY || process.env.GOOGLE_AI_API_KEY || "";
|
|
741
|
+
if (!key) throw new Error("Google API key not found");
|
|
742
|
+
return key;
|
|
743
|
+
}
|
|
744
|
+
};
|
|
712
745
|
var callGoogle = async (config, ctx) => {
|
|
713
746
|
const { model: model2, instructions, apiKey: configApiKey } = config;
|
|
714
|
-
const apiKey = configApiKey
|
|
715
|
-
if (!apiKey) {
|
|
716
|
-
throw new Error("Google API key not found");
|
|
717
|
-
}
|
|
747
|
+
const apiKey = getApiKey4(configApiKey);
|
|
718
748
|
const contents = [];
|
|
719
749
|
if (instructions) {
|
|
720
750
|
contents.push({
|
|
@@ -749,7 +779,7 @@ var callGoogle = async (config, ctx) => {
|
|
|
749
779
|
}
|
|
750
780
|
const endpoint = ctx.stream ? "streamGenerateContent" : "generateContent";
|
|
751
781
|
const response = await fetch(
|
|
752
|
-
`https://generativelanguage.googleapis.com/v1beta/models/${model2}:${endpoint}?key=${apiKey}`,
|
|
782
|
+
`https://generativelanguage.googleapis.com/v1beta/models/${model2}:${endpoint}?key=${apiKey}${ctx.stream ? "&alt=sse" : ""}`,
|
|
753
783
|
{
|
|
754
784
|
method: "POST",
|
|
755
785
|
headers: {
|
|
@@ -860,6 +890,147 @@ var callHuggingFace = async (config, ctx) => {
|
|
|
860
890
|
);
|
|
861
891
|
};
|
|
862
892
|
|
|
893
|
+
// src/providers/xai.ts
|
|
894
|
+
var appendToolCalls2 = (toolCalls, tcchunklist) => {
|
|
895
|
+
for (const tcchunk of tcchunklist) {
|
|
896
|
+
while (toolCalls.length <= tcchunk.index) {
|
|
897
|
+
toolCalls.push({
|
|
898
|
+
id: "",
|
|
899
|
+
type: "function",
|
|
900
|
+
function: { name: "", arguments: "" }
|
|
901
|
+
});
|
|
902
|
+
}
|
|
903
|
+
const tc = toolCalls[tcchunk.index];
|
|
904
|
+
tc.id += tcchunk.id || "";
|
|
905
|
+
tc.function.name += tcchunk.function?.name || "";
|
|
906
|
+
tc.function.arguments += tcchunk.function?.arguments || "";
|
|
907
|
+
}
|
|
908
|
+
return toolCalls;
|
|
909
|
+
};
|
|
910
|
+
var getApiKey5 = (configApiKey) => {
|
|
911
|
+
if (configApiKey) return configApiKey;
|
|
912
|
+
try {
|
|
913
|
+
return getKey("xai");
|
|
914
|
+
} catch {
|
|
915
|
+
const key = process.env.XAI_API_KEY || "";
|
|
916
|
+
if (!key) throw new Error("xAI API key not found");
|
|
917
|
+
return key;
|
|
918
|
+
}
|
|
919
|
+
};
|
|
920
|
+
var callXAI = async (config, ctx) => {
|
|
921
|
+
const { model: model2, instructions, schema, apiKey: configApiKey } = config;
|
|
922
|
+
const apiKey = getApiKey5(configApiKey);
|
|
923
|
+
const messages = [];
|
|
924
|
+
if (instructions) {
|
|
925
|
+
messages.push({ role: "system", content: instructions });
|
|
926
|
+
}
|
|
927
|
+
messages.push(...ctx.history);
|
|
928
|
+
const body = {
|
|
929
|
+
model: model2,
|
|
930
|
+
messages,
|
|
931
|
+
stream: !!ctx.stream
|
|
932
|
+
};
|
|
933
|
+
if (schema) {
|
|
934
|
+
body.response_format = {
|
|
935
|
+
type: "json_schema",
|
|
936
|
+
json_schema: {
|
|
937
|
+
name: schema.name,
|
|
938
|
+
schema: { ...schema.schema, additionalProperties: false },
|
|
939
|
+
strict: true
|
|
940
|
+
}
|
|
941
|
+
};
|
|
942
|
+
}
|
|
943
|
+
if (ctx.tools && ctx.tools.length > 0) {
|
|
944
|
+
body.tools = ctx.tools;
|
|
945
|
+
body.tool_choice = "auto";
|
|
946
|
+
}
|
|
947
|
+
const response = await fetch("https://api.x.ai/v1/chat/completions", {
|
|
948
|
+
method: "POST",
|
|
949
|
+
headers: {
|
|
950
|
+
"Content-Type": "application/json",
|
|
951
|
+
Authorization: `Bearer ${apiKey}`
|
|
952
|
+
},
|
|
953
|
+
body: JSON.stringify(body),
|
|
954
|
+
signal: ctx.abortSignal
|
|
955
|
+
});
|
|
956
|
+
if (!response.ok) {
|
|
957
|
+
const error = await response.text();
|
|
958
|
+
throw new Error(`xAI API error: ${error}`);
|
|
959
|
+
}
|
|
960
|
+
if (ctx.stream) {
|
|
961
|
+
return handleXAIStream(response, ctx);
|
|
962
|
+
}
|
|
963
|
+
const data = await response.json();
|
|
964
|
+
const choice = data.choices[0];
|
|
965
|
+
const { message } = choice;
|
|
966
|
+
const msg = {
|
|
967
|
+
role: "assistant",
|
|
968
|
+
content: message.content || ""
|
|
969
|
+
};
|
|
970
|
+
if (message.tool_calls) {
|
|
971
|
+
msg.tool_calls = message.tool_calls;
|
|
972
|
+
}
|
|
973
|
+
return {
|
|
974
|
+
...ctx,
|
|
975
|
+
lastResponse: msg,
|
|
976
|
+
history: [...ctx.history, msg]
|
|
977
|
+
};
|
|
978
|
+
};
|
|
979
|
+
var handleXAIStream = async (response, ctx) => {
|
|
980
|
+
const reader = response.body.getReader();
|
|
981
|
+
const decoder = new TextDecoder();
|
|
982
|
+
let fullContent = "";
|
|
983
|
+
let toolCalls = [];
|
|
984
|
+
let buffer = "";
|
|
985
|
+
try {
|
|
986
|
+
while (true) {
|
|
987
|
+
if (ctx.abortSignal?.aborted) {
|
|
988
|
+
break;
|
|
989
|
+
}
|
|
990
|
+
const { done, value } = await reader.read();
|
|
991
|
+
if (done) break;
|
|
992
|
+
buffer += decoder.decode(value, { stream: true });
|
|
993
|
+
const lines = buffer.split("\n");
|
|
994
|
+
buffer = lines.pop() || "";
|
|
995
|
+
for (const line of lines) {
|
|
996
|
+
if (line.startsWith("data: ")) {
|
|
997
|
+
const data = line.slice(6).trim();
|
|
998
|
+
if (data === "[DONE]") continue;
|
|
999
|
+
if (!data) continue;
|
|
1000
|
+
try {
|
|
1001
|
+
const parsed = JSON.parse(data);
|
|
1002
|
+
const delta = parsed.choices?.[0]?.delta;
|
|
1003
|
+
if (delta?.content) {
|
|
1004
|
+
fullContent += delta.content;
|
|
1005
|
+
if (ctx.stream) {
|
|
1006
|
+
ctx.stream({ type: "content", content: delta.content });
|
|
1007
|
+
}
|
|
1008
|
+
}
|
|
1009
|
+
if (delta?.tool_calls) {
|
|
1010
|
+
toolCalls = appendToolCalls2(toolCalls, delta.tool_calls);
|
|
1011
|
+
}
|
|
1012
|
+
} catch (e) {
|
|
1013
|
+
}
|
|
1014
|
+
}
|
|
1015
|
+
}
|
|
1016
|
+
}
|
|
1017
|
+
} finally {
|
|
1018
|
+
reader.releaseLock();
|
|
1019
|
+
}
|
|
1020
|
+
const msg = {
|
|
1021
|
+
role: "assistant",
|
|
1022
|
+
content: fullContent
|
|
1023
|
+
};
|
|
1024
|
+
if (toolCalls.length > 0) {
|
|
1025
|
+
msg.tool_calls = toolCalls;
|
|
1026
|
+
}
|
|
1027
|
+
return {
|
|
1028
|
+
...ctx,
|
|
1029
|
+
lastResponse: msg,
|
|
1030
|
+
history: [...ctx.history, msg]
|
|
1031
|
+
};
|
|
1032
|
+
};
|
|
1033
|
+
|
|
863
1034
|
// src/providers/index.ts
|
|
864
1035
|
var callProvider = async (config, ctx) => {
|
|
865
1036
|
const { provider, model: model2 } = parseModelName(config.model);
|
|
@@ -871,6 +1042,8 @@ var callProvider = async (config, ctx) => {
|
|
|
871
1042
|
return callAnthropic(providerConfig, ctx);
|
|
872
1043
|
case "google":
|
|
873
1044
|
return callGoogle(providerConfig, ctx);
|
|
1045
|
+
case "xai":
|
|
1046
|
+
return callXAI(providerConfig, ctx);
|
|
874
1047
|
case "huggingface":
|
|
875
1048
|
default:
|
|
876
1049
|
return callHuggingFace(providerConfig, ctx);
|