190proof 1.0.0 → 1.0.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.d.mts +3 -32
- package/dist/index.d.ts +3 -32
- package/dist/index.js +11 -21
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +11 -18
- package/dist/index.mjs.map +1 -1
- package/package.json +1 -1
package/dist/index.mjs
CHANGED
|
@@ -27034,12 +27034,6 @@ var GroqModel = /* @__PURE__ */ ((GroqModel2) => {
|
|
|
27034
27034
|
GroqModel2["LLAMA_3_70B_8192"] = "llama3-70b-8192";
|
|
27035
27035
|
return GroqModel2;
|
|
27036
27036
|
})(GroqModel || {});
|
|
27037
|
-
var Role = /* @__PURE__ */ ((Role2) => {
|
|
27038
|
-
Role2["User"] = "user";
|
|
27039
|
-
Role2["Assistant"] = "assistant";
|
|
27040
|
-
Role2["System"] = "system";
|
|
27041
|
-
return Role2;
|
|
27042
|
-
})(Role || {});
|
|
27043
27037
|
|
|
27044
27038
|
// ../node_modules/@aws-sdk/client-bedrock-runtime/dist-es/BedrockRuntimeClient.js
|
|
27045
27039
|
init_dist_es3();
|
|
@@ -31524,10 +31518,14 @@ async function callOpenAiWithRetries(identifier, openAiPayload, openAiConfig, re
|
|
|
31524
31518
|
async function callOpenAIStream(identifier, openAiPayload, openAiConfig, chunkTimeoutMs) {
|
|
31525
31519
|
const functionNames = openAiPayload.functions ? new Set(openAiPayload.functions.map((fn) => fn.name)) : null;
|
|
31526
31520
|
if (!openAiConfig) {
|
|
31521
|
+
const defaultOpenAIBaseUrl = (
|
|
31522
|
+
// TODO: Remove this one we have per-provider configs
|
|
31523
|
+
"https://gateway.ai.cloudflare.com/v1/932636fc124abb5171fd630afe668905/igpt"
|
|
31524
|
+
);
|
|
31527
31525
|
openAiConfig = {
|
|
31528
31526
|
service: "openai",
|
|
31529
31527
|
apiKey: process.env.OPENAI_API_KEY,
|
|
31530
|
-
baseUrl:
|
|
31528
|
+
baseUrl: defaultOpenAIBaseUrl
|
|
31531
31529
|
};
|
|
31532
31530
|
}
|
|
31533
31531
|
let response;
|
|
@@ -31556,7 +31554,7 @@ async function callOpenAIStream(identifier, openAiPayload, openAiConfig, chunkTi
|
|
|
31556
31554
|
});
|
|
31557
31555
|
} else {
|
|
31558
31556
|
console.log(identifier, "Using OpenAI service", openAiPayload.model);
|
|
31559
|
-
const endpoint = `${openAiConfig
|
|
31557
|
+
const endpoint = `${openAiConfig.baseUrl}/openai/chat/completions`;
|
|
31560
31558
|
if (openAiConfig.orgId) {
|
|
31561
31559
|
console.log(identifier, "Using orgId", openAiConfig.orgId);
|
|
31562
31560
|
}
|
|
@@ -31711,10 +31709,6 @@ async function callAnthropic(identifier, AiPayload, AiConfig) {
|
|
|
31711
31709
|
if ((AiConfig == null ? void 0 : AiConfig.service) === "bedrock") {
|
|
31712
31710
|
const AWS_REGION = "us-east-1";
|
|
31713
31711
|
const MODEL_ID = "anthropic.claude-3-haiku-20240307-v1:0";
|
|
31714
|
-
const AWS_ACCESS_KEY_ID = "AKIAZI2LICXWZC3QQ4O2";
|
|
31715
|
-
const AWS_SECRET_ACCESS_KEY = "76jdCL71cdJZ8QGM/vu93GMpxYYI9IhioUxHjE/l";
|
|
31716
|
-
process.env.AWS_ACCESS_KEY_ID = AWS_ACCESS_KEY_ID;
|
|
31717
|
-
process.env.AWS_SECRET_ACCESS_KEY = AWS_SECRET_ACCESS_KEY;
|
|
31718
31712
|
const client = new BedrockRuntimeClient({ region: AWS_REGION });
|
|
31719
31713
|
const payload = {
|
|
31720
31714
|
anthropic_version: "bedrock-2023-05-31",
|
|
@@ -31761,7 +31755,6 @@ async function callAnthropic(identifier, AiPayload, AiConfig) {
|
|
|
31761
31755
|
data = response2.data;
|
|
31762
31756
|
}
|
|
31763
31757
|
const answers = data.content;
|
|
31764
|
-
console.log("Anthropic API answers:", JSON.stringify({ answers }));
|
|
31765
31758
|
if (!answers[0]) {
|
|
31766
31759
|
console.error(identifier, "Missing answer in Anthropic API:", data);
|
|
31767
31760
|
throw new Error("Missing answer in Anthropic API");
|
|
@@ -31907,7 +31900,7 @@ function prepareOpenAIPayload(payload) {
|
|
|
31907
31900
|
model: payload.model,
|
|
31908
31901
|
messages: payload.messages.map((message) => ({
|
|
31909
31902
|
role: message.role,
|
|
31910
|
-
content: message.content
|
|
31903
|
+
content: normalizeMessageContent(message.content)
|
|
31911
31904
|
// TODO: Handle files
|
|
31912
31905
|
})),
|
|
31913
31906
|
functions: payload.functions
|
|
@@ -31922,7 +31915,7 @@ function prepareGroqPayload(payload) {
|
|
|
31922
31915
|
model: payload.model,
|
|
31923
31916
|
messages: payload.messages.map((message) => ({
|
|
31924
31917
|
role: message.role,
|
|
31925
|
-
content: message.content
|
|
31918
|
+
content: normalizeMessageContent(message.content)
|
|
31926
31919
|
})),
|
|
31927
31920
|
functions: (_a3 = payload.functions) == null ? void 0 : _a3.map((fn) => ({
|
|
31928
31921
|
type: "function",
|
|
@@ -31930,6 +31923,9 @@ function prepareGroqPayload(payload) {
|
|
|
31930
31923
|
}))
|
|
31931
31924
|
};
|
|
31932
31925
|
}
|
|
31926
|
+
function normalizeMessageContent(content) {
|
|
31927
|
+
return Array.isArray(content) ? content.map((c5) => c5.type === "text" ? c5.text : `[${c5.type}]`).join("\n") : content;
|
|
31928
|
+
}
|
|
31933
31929
|
async function callGroq(identifier, payload) {
|
|
31934
31930
|
const response = await axios_default.post(
|
|
31935
31931
|
"https://api.groq.com/openai/v1/chat/completions",
|
|
@@ -31993,9 +31989,6 @@ export {
|
|
|
31993
31989
|
ClaudeModel,
|
|
31994
31990
|
GPTModel,
|
|
31995
31991
|
GroqModel,
|
|
31996
|
-
Role,
|
|
31997
|
-
callAnthropicWithRetries,
|
|
31998
|
-
callOpenAiWithRetries,
|
|
31999
31992
|
callWithRetries
|
|
32000
31993
|
};
|
|
32001
31994
|
/*! Bundled license information:
|