@tiens.nguyen/gonext-local-worker 1.0.9 → 1.0.10

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -94,7 +94,7 @@ if (!apiBase || !workerKey) {
94
94
  }
95
95
 
96
96
  function toOpenAIMessages(messages) {
97
- return messages.map((m) => {
97
+ return (Array.isArray(messages) ? messages : []).map((m) => {
98
98
  if (m.role === "user" && m.attachments?.length) {
99
99
  return {
100
100
  role: m.role,
@@ -123,6 +123,9 @@ async function workerFetch(path, init = {}) {
123
123
 
124
124
  async function runChatJob(job) {
125
125
  const { jobId, payload } = job;
126
+ if (!payload || !Array.isArray(payload.messages)) {
127
+ throw new Error("Invalid chat payload: messages array is missing.");
128
+ }
126
129
  const start = Date.now();
127
130
  await workerFetch(`/api/worker/jobs/${jobId}`, {
128
131
  method: "PATCH",
@@ -166,6 +169,124 @@ async function runChatJob(job) {
166
169
  }
167
170
  }
168
171
 
172
+ function normalizeBaseUrl(raw) {
173
+ return typeof raw === "string" ? raw.trim().replace(/\/+$/, "") : "";
174
+ }
175
+
176
+ function normalizeOpenAiV1Root(raw) {
177
+ const base = normalizeBaseUrl(raw);
178
+ if (!base) return "";
179
+ return /\/v1$/i.test(base) ? base : `${base}/v1`;
180
+ }
181
+
182
+ async function checkOllamaTags(base) {
183
+ const endpoint = `${base}/api/tags`;
184
+ try {
185
+ const res = await fetch(endpoint, { method: "GET" });
186
+ if (!res.ok) return { online: false, endpoint, models: [] };
187
+ const j = await res.json();
188
+ const models = (j.models ?? []).map((m) => {
189
+ const name = m.name ?? m.model ?? "model";
190
+ return { id: name, name, value: `ollama:${name}` };
191
+ });
192
+ return { online: true, endpoint, models };
193
+ } catch {
194
+ return { online: false, endpoint, models: [] };
195
+ }
196
+ }
197
+
198
+ async function checkOpenAiModels(base, apiKey) {
199
+ const endpoint = `${base}/models`;
200
+ const headers = {};
201
+ if (apiKey?.trim()) {
202
+ headers.Authorization = `Bearer ${apiKey.trim()}`;
203
+ }
204
+ try {
205
+ const res = await fetch(endpoint, { method: "GET", headers });
206
+ if (!res.ok) return { online: false, endpoint, models: [] };
207
+ const j = await res.json();
208
+ const models = (j.data ?? [])
209
+ .map((d) => d.id)
210
+ .filter(Boolean)
211
+ .map((id) => ({ id, name: id, value: `mlx:${id}` }));
212
+ return { online: true, endpoint, models };
213
+ } catch {
214
+ return { online: false, endpoint, models: [] };
215
+ }
216
+ }
217
+
218
+ async function runLocalHealthJob(job) {
219
+ const { jobId, payload } = job;
220
+ const start = Date.now();
221
+ await workerFetch(`/api/worker/jobs/${jobId}`, {
222
+ method: "PATCH",
223
+ body: JSON.stringify({ jobStatus: "running" }),
224
+ });
225
+ try {
226
+ const ollamaBases = Array.isArray(payload?.ollamaBaseUrls)
227
+ ? payload.ollamaBaseUrls.map(normalizeBaseUrl).filter(Boolean)
228
+ : [];
229
+ const dedup = new Map();
230
+ let ollamaOnline = false;
231
+ let ollamaEndpoint = "";
232
+ for (const base of ollamaBases) {
233
+ const r = await checkOllamaTags(base);
234
+ ollamaOnline = ollamaOnline || r.online;
235
+ if (!ollamaEndpoint) ollamaEndpoint = r.endpoint;
236
+ for (const m of r.models) {
237
+ if (!dedup.has(m.value)) dedup.set(m.value, m);
238
+ }
239
+ }
240
+ const mlxRoot = normalizeOpenAiV1Root(payload?.mlxOpenAiBaseUrl);
241
+ const mlx = mlxRoot
242
+ ? await checkOpenAiModels(mlxRoot, payload?.mlxApiKey ?? "")
243
+ : null;
244
+ const result = {
245
+ ollama:
246
+ ollamaBases.length > 0
247
+ ? {
248
+ configured: dedup.size > 0,
249
+ online: ollamaOnline,
250
+ models: [...dedup.values()],
251
+ endpoint: ollamaEndpoint,
252
+ }
253
+ : undefined,
254
+ mlx: mlx
255
+ ? {
256
+ configured: mlx.models.length > 0,
257
+ online: mlx.online,
258
+ models: mlx.models,
259
+ endpoint: mlx.endpoint,
260
+ }
261
+ : undefined,
262
+ };
263
+ const totalTimeSeconds = (Date.now() - start) / 1000;
264
+ await workerFetch(`/api/worker/jobs/${jobId}`, {
265
+ method: "PATCH",
266
+ body: JSON.stringify({
267
+ jobStatus: "completed",
268
+ resultText: JSON.stringify(result),
269
+ tokenCount: 1,
270
+ totalTimeSeconds,
271
+ }),
272
+ });
273
+ console.log(
274
+ `[gonext-worker] completed local_health ${jobId} (${totalTimeSeconds.toFixed(1)}s)`
275
+ );
276
+ } catch (e) {
277
+ const message = e instanceof Error ? e.message : String(e);
278
+ await workerFetch(`/api/worker/jobs/${jobId}`, {
279
+ method: "PATCH",
280
+ body: JSON.stringify({
281
+ jobStatus: "failed",
282
+ errorMessage: message,
283
+ totalTimeSeconds: (Date.now() - start) / 1000,
284
+ }),
285
+ });
286
+ console.error(`[gonext-worker] failed local_health ${jobId}:`, message);
287
+ }
288
+ }
289
+
169
290
  async function pollOnce() {
170
291
  const res = await workerFetch("/api/worker/jobs/next", { method: "POST" });
171
292
  if (res.status === 204) return;
@@ -175,6 +296,14 @@ async function pollOnce() {
175
296
  }
176
297
  const job = await res.json();
177
298
  if (job?.jobId) {
299
+ const isLocalHealthByType = job.jobType === "local_health";
300
+ const isLocalHealthByModelKey = job.modelKey === "local_health";
301
+ const isLocalHealthByPayload =
302
+ Array.isArray(job.payload?.ollamaBaseUrls) || !!job.payload?.mlxOpenAiBaseUrl;
303
+ if (isLocalHealthByType || isLocalHealthByModelKey || isLocalHealthByPayload) {
304
+ await runLocalHealthJob(job);
305
+ return;
306
+ }
178
307
  await runChatJob(job);
179
308
  }
180
309
  }
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@tiens.nguyen/gonext-local-worker",
3
- "version": "1.0.9",
3
+ "version": "1.0.10",
4
4
  "description": "Polls GoNext cloud API for async local LLM jobs and runs them against Ollama/OpenAI-compatible servers on this Mac",
5
5
  "type": "module",
6
6
  "license": "MIT",