netheriteai-code 0.3.1 → 0.3.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/package.json +1 -1
- package/src/cli.js +1 -1
- package/src/ollama.js +25 -15
package/package.json
CHANGED
package/src/cli.js
CHANGED
package/src/ollama.js
CHANGED
|
@@ -7,23 +7,33 @@ const PREFERRED_DEFAULT_MODELS = [
|
|
|
7
7
|
"glm-5:cloud",
|
|
8
8
|
];
|
|
9
9
|
|
|
10
|
-
async function request(pathname, body, signal) {
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
10
|
+
async function request(pathname, body, signal, retries = 3) {
|
|
11
|
+
for (let i = 0; i < retries; i++) {
|
|
12
|
+
try {
|
|
13
|
+
const response = await fetch(`${BASE_URL}${pathname}`, {
|
|
14
|
+
method: "POST",
|
|
15
|
+
headers: { "content-type": "application/json" },
|
|
16
|
+
body: JSON.stringify(body),
|
|
17
|
+
signal,
|
|
18
|
+
});
|
|
19
|
+
|
|
20
|
+
if (response.ok) return response;
|
|
21
|
+
|
|
22
|
+
// If 500 error, wait and retry
|
|
23
|
+
if (response.status === 500 && i < retries - 1) {
|
|
24
|
+
await new Promise(r => setTimeout(r, 1000));
|
|
25
|
+
continue;
|
|
26
|
+
}
|
|
18
27
|
|
|
19
|
-
|
|
28
|
+
throw new Error("Server down");
|
|
29
|
+
} catch (err) {
|
|
30
|
+
if (err.name === "AbortError") throw err;
|
|
31
|
+
if (i < retries - 1) {
|
|
32
|
+
await new Promise(r => setTimeout(r, 1000));
|
|
33
|
+
continue;
|
|
34
|
+
}
|
|
20
35
|
throw new Error("Server down");
|
|
21
36
|
}
|
|
22
|
-
|
|
23
|
-
return response;
|
|
24
|
-
} catch (err) {
|
|
25
|
-
if (err.name === "AbortError") throw err;
|
|
26
|
-
throw new Error("Server down");
|
|
27
37
|
}
|
|
28
38
|
}
|
|
29
39
|
|
|
@@ -132,7 +142,7 @@ function createTaggedStreamParser({ onContent, onReasoning }) {
|
|
|
132
142
|
|
|
133
143
|
export async function chatStream({ model, messages, tools, onChunk, onReasoningChunk, signal }) {
|
|
134
144
|
const body = { model, messages, stream: true };
|
|
135
|
-
if (tools && tools.length) body.tools = tools;
|
|
145
|
+
if (tools && tools.length && model !== "glm-5:cloud") body.tools = tools;
|
|
136
146
|
|
|
137
147
|
const response = await request("/api/chat", body, signal);
|
|
138
148
|
|