llmjs2 1.0.0 → 1.0.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. package/README.md +39 -450
  2. package/grapes.jpg +0 -0
  3. package/index.d.ts +43 -0
  4. package/index.js +465 -0
  5. package/package.json +7 -47
  6. package/spec.txt +73 -0
  7. package/test-generate-tools-suite.js +100 -0
  8. package/test-generate-tools.js +57 -0
  9. package/test-generate.js +31 -0
  10. package/test.js +33 -0
  11. package/LICENSE +0 -21
  12. package/dist/agent.d.ts +0 -80
  13. package/dist/agent.d.ts.map +0 -1
  14. package/dist/agent.js +0 -189
  15. package/dist/agent.js.map +0 -1
  16. package/dist/index.d.ts +0 -74
  17. package/dist/index.d.ts.map +0 -1
  18. package/dist/index.js +0 -191
  19. package/dist/index.js.map +0 -1
  20. package/dist/providers/base.d.ts +0 -58
  21. package/dist/providers/base.d.ts.map +0 -1
  22. package/dist/providers/base.js +0 -149
  23. package/dist/providers/base.js.map +0 -1
  24. package/dist/providers/index.d.ts +0 -8
  25. package/dist/providers/index.d.ts.map +0 -1
  26. package/dist/providers/index.js +0 -7
  27. package/dist/providers/index.js.map +0 -1
  28. package/dist/providers/ollama.d.ts +0 -42
  29. package/dist/providers/ollama.d.ts.map +0 -1
  30. package/dist/providers/ollama.js +0 -260
  31. package/dist/providers/ollama.js.map +0 -1
  32. package/dist/providers/openai.d.ts +0 -38
  33. package/dist/providers/openai.d.ts.map +0 -1
  34. package/dist/providers/openai.js +0 -289
  35. package/dist/providers/openai.js.map +0 -1
  36. package/dist/types.d.ts +0 -182
  37. package/dist/types.d.ts.map +0 -1
  38. package/dist/types.js +0 -6
  39. package/dist/types.js.map +0 -1
  40. package/src/agent.ts +0 -285
  41. package/src/index.ts +0 -268
  42. package/src/providers/base.ts +0 -216
  43. package/src/providers/index.ts +0 -8
  44. package/src/providers/ollama.ts +0 -429
  45. package/src/providers/openai.ts +0 -485
  46. package/src/types.ts +0 -231
package/index.js ADDED
@@ -0,0 +1,465 @@
1
+ import fs from 'node:fs/promises';
2
+
3
+ const DEFAULT_BASE_URL = 'https://api.ollama.com';
4
+ const DEFAULT_CHAT_PATH = '/api/chat';
5
+
6
+ const isString = (value) => typeof value === 'string' && value.trim().length > 0;
7
+
8
+ const prettyPrint = (value, max = 3000) => {
9
+ const text = typeof value === 'string' ? value : JSON.stringify(value, null, 2);
10
+ return text.length > max ? `${text.slice(0, max)}\n...TRUNCATED (${text.length} chars)...` : text;
11
+ };
12
+
13
+ const normalizeBaseUrl = (baseUrl) => {
14
+ const candidate = baseUrl ?? process.env.OLLAMA_BASE_URL ?? DEFAULT_BASE_URL;
15
+ if (!isString(candidate)) {
16
+ throw new TypeError('llmjs2: Invalid OLLAMA_BASE_URL. Provide a valid URL string.');
17
+ }
18
+
19
+ try {
20
+ return new URL(candidate).toString().replace(/\/+$|\/$/, '');
21
+ } catch {
22
+ throw new TypeError(`llmjs2: Invalid base URL "${candidate}".`);
23
+ }
24
+ };
25
+
26
+ const getApiKey = (apiKey) => (isString(apiKey) ? apiKey : process.env.OLLAMA_API_KEY);
27
+
28
+ const buildEndpoint = (baseUrl) => `${normalizeBaseUrl(baseUrl)}${DEFAULT_CHAT_PATH}`;
29
+
30
+ const parseModel = (model) => {
31
+ if (!isString(model) || !model.includes('/')) {
32
+ throw new TypeError('llmjs2: model must be provider/model-name.');
33
+ }
34
+
35
+ const [provider, ...rest] = model.split('/');
36
+ const modelName = rest.join('/').trim();
37
+
38
+ if (!isString(provider) || !isString(modelName)) {
39
+ throw new TypeError('llmjs2: model must be provider/model-name.');
40
+ }
41
+
42
+ return { provider: provider.trim(), modelName };
43
+ };
44
+
45
+ const normalizeCompletionInput = (modelOrOptions, prompt) => {
46
+ if (isString(modelOrOptions)) {
47
+ if (!isString(prompt)) {
48
+ throw new TypeError('llmjs2: prompt must be a non-empty string.');
49
+ }
50
+
51
+ return {
52
+ model: modelOrOptions.trim(),
53
+ messages: [{ role: 'user', content: prompt.trim() }],
54
+ baseUrl: undefined,
55
+ apiKey: undefined,
56
+ };
57
+ }
58
+
59
+ if (typeof modelOrOptions !== 'object' || modelOrOptions === null) {
60
+ throw new TypeError('llmjs2: completion requires either (model, prompt) or an options object.');
61
+ }
62
+
63
+ const { model, messages, prompt: promptField, ollamaBaseUrl, ollamaApiKey } = modelOrOptions;
64
+
65
+ if (!isString(model)) {
66
+ throw new TypeError('llmjs2: options.model must be a non-empty string.');
67
+ }
68
+
69
+ const normalizedMessages = Array.isArray(messages)
70
+ ? messages
71
+ : isString(promptField)
72
+ ? [{ role: 'user', content: promptField.trim() }]
73
+ : undefined;
74
+
75
+ if (!Array.isArray(normalizedMessages) || normalizedMessages.length === 0) {
76
+ throw new TypeError('llmjs2: options.messages or options.prompt must be provided.');
77
+ }
78
+
79
+ return {
80
+ model: model.trim(),
81
+ messages: normalizedMessages,
82
+ baseUrl: ollamaBaseUrl,
83
+ apiKey: ollamaApiKey,
84
+ };
85
+ };
86
+
87
+ const normalizeMessages = (messages) =>
88
+ messages.map((message, index) => {
89
+ if (!message || typeof message !== 'object' || !isString(message.content)) {
90
+ // If it's a tool call response from the assistant, it might not have content
91
+ if (message.role === 'assistant' && (message.tool_calls || message.tool_call)) {
92
+ return message;
93
+ }
94
+ throw new TypeError(`llmjs2: messages[${index}] must be an object with a non-empty content string.`);
95
+ }
96
+
97
+ return {
98
+ role: isString(message.role) ? message.role : 'user',
99
+ content: message.content,
100
+ };
101
+ });
102
+
103
+ const isUrl = (value) => {
104
+ if (!isString(value)) return false;
105
+ try {
106
+ new URL(value);
107
+ return true;
108
+ } catch {
109
+ return false;
110
+ }
111
+ };
112
+
113
+ const fileExists = async (filePath) => {
114
+ if (!isString(filePath)) return false;
115
+ try {
116
+ await fs.access(filePath);
117
+ return true;
118
+ } catch {
119
+ return false;
120
+ }
121
+ };
122
+
123
+ const loadReference = async (reference) => {
124
+ if (Buffer.isBuffer(reference)) {
125
+ return reference.toString('utf8');
126
+ }
127
+
128
+ if (!isString(reference)) {
129
+ throw new TypeError('llmjs2: references must be strings or buffers.');
130
+ }
131
+
132
+ if (isUrl(reference)) {
133
+ try {
134
+ const response = await fetch(reference);
135
+ if (response.ok) {
136
+ return await response.text();
137
+ }
138
+ } catch {
139
+ return reference;
140
+ }
141
+ return reference;
142
+ }
143
+
144
+ if (await fileExists(reference)) {
145
+ try {
146
+ return (await fs.readFile(reference)).toString('utf8');
147
+ } catch {
148
+ return reference;
149
+ }
150
+ }
151
+
152
+ return reference;
153
+ };
154
+
155
+ const loadImage = async (image) => {
156
+ if (Buffer.isBuffer(image)) {
157
+ return image.toString('base64');
158
+ }
159
+
160
+ if (!isString(image)) {
161
+ throw new TypeError('llmjs2: images must be strings or buffers.');
162
+ }
163
+
164
+ if (isUrl(image)) {
165
+ try {
166
+ const response = await fetch(image);
167
+ if (!response.ok) {
168
+ throw new Error(`Failed to download image: ${response.status}`);
169
+ }
170
+ const arrayBuffer = await response.arrayBuffer();
171
+ return Buffer.from(arrayBuffer).toString('base64');
172
+ } catch {
173
+ return image;
174
+ }
175
+ }
176
+
177
+ if (await fileExists(image)) {
178
+ try {
179
+ const data = await fs.readFile(image);
180
+ return data.toString('base64');
181
+ } catch {
182
+ return image;
183
+ }
184
+ }
185
+
186
+ return image;
187
+ };
188
+
189
+ const buildGeneratePrompt = (userPrompt, references) => {
190
+ const sections = [`User prompt: ${userPrompt}`];
191
+
192
+ if (references.length) {
193
+ sections.push('References:');
194
+ sections.push(...references.map((item, index) => ` [${index + 1}] ${item}`));
195
+ }
196
+
197
+ sections.push('Generate a response using the prompt and references above.');
198
+ return sections.join('\n');
199
+ };
200
+
201
+ const parseToolCall = (text) => {
202
+ if (!isString(text)) return undefined;
203
+ const trimmed = text.trim();
204
+
205
+ // Try to parse direct JSON first (often returned by tool-calling models)
206
+ try {
207
+ const data = JSON.parse(trimmed);
208
+ // Support both direct {tool, arguments} and OpenAI-style {name, arguments}
209
+ const name = data.tool || data.name;
210
+ const args = data.arguments;
211
+ if (isString(name) && typeof args === 'object') {
212
+ return { tool: name, arguments: args };
213
+ }
214
+ } catch {
215
+ // Fallback to regex for text-wrapped JSON
216
+ const jsonMatch = trimmed.match(/({[\s\S]*})/);
217
+ if (jsonMatch) {
218
+ try {
219
+ const data = JSON.parse(jsonMatch[1]);
220
+ const name = data.tool || data.name;
221
+ if (isString(name)) return { tool: name, arguments: data.arguments || {} };
222
+ } catch { /* ignore */ }
223
+ }
224
+ }
225
+
226
+ return undefined;
227
+ };
228
+
229
+ const runTool = async (toolCall, tools) => {
230
+ const tool = Array.isArray(tools) ? tools.find((item) => item.name === toolCall.tool) : undefined;
231
+ if (!tool) {
232
+ throw new Error(`llmjs2: Tool not found: ${toolCall.tool}`);
233
+ }
234
+
235
+ if (typeof tool.handler !== 'function') {
236
+ throw new TypeError(`llmjs2: Tool handler for ${toolCall.tool} must be a function.`);
237
+ }
238
+
239
+ const result = await tool.handler(toolCall.arguments || {});
240
+ return String(result);
241
+ };
242
+
243
+ const extractText = (body) => {
244
+ if (!body || typeof body !== 'object') return undefined;
245
+ const message = body.message;
246
+
247
+ // Handle native Ollama/OpenAI tool calls
248
+ if (message?.tool_calls && message.tool_calls.length > 0) {
249
+ return JSON.stringify(message.tool_calls[0].function);
250
+ }
251
+
252
+ if (isString(message?.content)) return message.content;
253
+ if (isString(body.result)) return body.result;
254
+ if (isString(body.content)) return body.content;
255
+ if (Array.isArray(body.output) && isString(body.output[0]?.content)) return body.output[0].content;
256
+ if (Array.isArray(body.choices)) {
257
+ const choice = body.choices[0];
258
+ if (choice?.message?.tool_calls) return JSON.stringify(choice.message.tool_calls[0].function);
259
+ if (isString(choice?.message?.content)) return choice.message.content;
260
+ if (isString(choice?.text)) return choice.text;
261
+ }
262
+ };
263
+
264
+ const extractError = (body) => {
265
+ if (!body || typeof body !== 'object') return undefined;
266
+ if (isString(body.error)) return body.error;
267
+ if (isString(body.message)) return body.message;
268
+ if (isString(body.detail)) return body.detail;
269
+ if (Array.isArray(body.errors) && isString(body.errors[0]?.message)) return body.errors[0].message;
270
+ };
271
+
272
+ const wrapFetchError = (error, url) => {
273
+ if (
274
+ error instanceof TypeError ||
275
+ error?.name === 'FetchError' ||
276
+ ['ECONNREFUSED', 'ENOTFOUND', 'EAI_AGAIN'].includes(error?.code) ||
277
+ error?.message?.includes('fetch')
278
+ ) {
279
+ throw new Error(`llmjs2: Could not connect to ${url}. Check your OLLAMA_BASE_URL.`);
280
+ }
281
+ throw error;
282
+ };
283
+
284
+ const requestOllama = async (modelName, messages, baseUrl, apiKey, images = [], tools = []) => {
285
+ const url = buildEndpoint(baseUrl);
286
+ const headers = { 'Content-Type': 'application/json' };
287
+ const token = getApiKey(apiKey);
288
+
289
+ if (token) {
290
+ headers.Authorization = `Bearer ${token}`;
291
+ }
292
+
293
+ const payloadMessages = Array.isArray(images) && images.length
294
+ ? messages.map((message, index) =>
295
+ index === 0 && message.role === 'user'
296
+ ? { ...message, images }
297
+ : message
298
+ )
299
+ : messages;
300
+
301
+ const payload = { model: modelName, messages: payloadMessages, stream: false };
302
+
303
+ // Fixed tool payload format for Ollama/OpenAI compatibility
304
+ if (Array.isArray(tools) && tools.length > 0) {
305
+ payload.tools = tools.map((tool) => ({
306
+ type: 'function',
307
+ function: {
308
+ name: tool.name,
309
+ description: tool.description,
310
+ parameters: tool.parameters,
311
+ }
312
+ }));
313
+ }
314
+
315
+ //console.log('llmjs2: Ollama request:\n' + prettyPrint({ url, headers, payload }));
316
+
317
+ let response;
318
+ try {
319
+ response = await fetch(url, {
320
+ method: 'POST',
321
+ headers,
322
+ body: JSON.stringify(payload),
323
+ });
324
+ } catch (error) {
325
+ wrapFetchError(error, url);
326
+ }
327
+
328
+ const text = await response.text();
329
+ let body;
330
+
331
+ try {
332
+ body = text ? JSON.parse(text) : undefined;
333
+ } catch {
334
+ body = undefined;
335
+ }
336
+
337
+ //console.log('llmjs2: Ollama raw response:\n' + prettyPrint({ url, status: response.status, text, body }));
338
+
339
+ if (!response.ok) {
340
+ if (response.status === 404) {
341
+ throw new Error(`llmjs2: Model "${modelName}" not found on provider "ollama".`);
342
+ }
343
+
344
+ const message = extractError(body) || text || response.statusText;
345
+ throw new Error(`llmjs2: Request to ${url} failed with status ${response.status}: ${message}`);
346
+ }
347
+
348
+ const responseText = extractText(body);
349
+ if (!isString(responseText)) {
350
+ throw new Error(`llmjs2: Could not parse assistant response from ${url}.`);
351
+ }
352
+
353
+ return responseText;
354
+ };
355
+
356
+ export async function completion(modelOrOptions, prompt) {
357
+ const { model, messages, baseUrl, apiKey } = normalizeCompletionInput(modelOrOptions, prompt);
358
+ const { provider, modelName } = parseModel(model);
359
+
360
+ if (provider !== 'ollama') {
361
+ throw new Error(`llmjs2: Unsupported provider "${provider}".`);
362
+ }
363
+
364
+ return requestOllama(modelName, normalizeMessages(messages), baseUrl, apiKey);
365
+ }
366
+
367
+ export async function generate(optionsOrModel, userPrompt, images = [], references = [], tools = []) {
368
+ const options =
369
+ typeof optionsOrModel === 'object' && optionsOrModel !== null && !isString(optionsOrModel)
370
+ ? optionsOrModel
371
+ : {
372
+ model: optionsOrModel,
373
+ userPrompt,
374
+ images,
375
+ references,
376
+ tools,
377
+ };
378
+
379
+ const {
380
+ model,
381
+ userPrompt: promptText,
382
+ messages: providedMessages,
383
+ images: imageInput = [],
384
+ references: referenceInput = [],
385
+ tools: toolInput = [],
386
+ systemPrompt,
387
+ ollamaBaseUrl,
388
+ ollamaApiKey,
389
+ } = options;
390
+
391
+ if (!isString(model)) {
392
+ throw new TypeError('llmjs2: model must be a non-empty string.');
393
+ }
394
+
395
+ const { provider, modelName } = parseModel(model.trim());
396
+ if (provider !== 'ollama') {
397
+ throw new Error(`llmjs2: Unsupported provider "${provider}".`);
398
+ }
399
+
400
+ const imageList = Array.isArray(imageInput) ? imageInput : [imageInput];
401
+ const referenceList = Array.isArray(referenceInput) ? referenceInput : [referenceInput];
402
+ const toolList = Array.isArray(toolInput) ? toolInput : [];
403
+
404
+ const resolvedImages = (await Promise.all(imageList.map(loadImage))).filter(isString);
405
+ const resolvedReferences = (await Promise.all(referenceList.map(loadReference))).filter(isString);
406
+
407
+ const messages = providedMessages
408
+ ? normalizeMessages(providedMessages)
409
+ : (() => {
410
+ if (!isString(promptText)) {
411
+ throw new TypeError('llmjs2: userPrompt must be a non-empty string when messages are not provided.');
412
+ }
413
+
414
+ const prompt = buildGeneratePrompt(promptText.trim(), resolvedReferences);
415
+ const result = [];
416
+
417
+ if (isString(systemPrompt)) {
418
+ result.push({ role: 'system', content: systemPrompt.trim() });
419
+ }
420
+
421
+ result.push({ role: 'user', content: prompt });
422
+ return normalizeMessages(result);
423
+ })();
424
+
425
+ // Simplified tool mapping to be passed to requestOllama
426
+ const toolDefinitions = toolList.map((tool) => ({
427
+ name: tool.name,
428
+ description: tool.description,
429
+ parameters: tool.parameters,
430
+ }));
431
+
432
+ let response = await requestOllama(
433
+ modelName,
434
+ messages,
435
+ ollamaBaseUrl,
436
+ ollamaApiKey,
437
+ resolvedImages,
438
+ toolDefinitions
439
+ );
440
+
441
+ let cycles = 0;
442
+ while (cycles < 3) {
443
+ const toolCall = parseToolCall(response);
444
+ if (!toolCall) {
445
+ return response;
446
+ }
447
+
448
+ const toolResult = await runTool(toolCall, toolList);
449
+ messages.push({ role: 'assistant', content: response });
450
+ messages.push({ role: 'user', content: `Tool ${toolCall.tool} returned:\n${toolResult}` });
451
+
452
+ response = await requestOllama(
453
+ modelName,
454
+ messages,
455
+ ollamaBaseUrl,
456
+ ollamaApiKey,
457
+ resolvedImages,
458
+ toolDefinitions
459
+ );
460
+
461
+ cycles += 1;
462
+ }
463
+
464
+ return response;
465
+ }
package/package.json CHANGED
@@ -1,55 +1,15 @@
1
1
  {
2
2
  "name": "llmjs2",
3
- "version": "1.0.0",
4
- "description": "LLM abstraction layer for Node.js. Unified API for OpenAI and Ollama with error handling and retry logic.",
5
- "main": "dist/dex.js",
6
- "types": "dist/index.d.ts",
3
+ "version": "1.0.5",
4
+ "description": "Minimal zero-dependency Node.js client for Ollama and Ollama Cloud.",
7
5
  "type": "module",
6
+ "main": "index.js",
8
7
  "exports": {
9
8
  ".": {
10
- "import": "./dist/index.js",
11
- "types": "./dist/index.d.ts"
9
+ "import": "./index.js"
12
10
  }
13
11
  },
14
- "keywords": [
15
- "llm",
16
- "llmjs2",
17
- "openai",
18
- "ollama",
19
- "ai",
20
- "machine-learning",
21
- "abstraction",
22
- "unified-api"
23
- ],
24
- "author": "littlellmjs",
25
- "license": "MIT",
26
- "files": [
27
- "dist",
28
- "src",
29
- "README.md",
30
- "LICENSE"
31
- ],
32
- "engines": {
33
- "node": ">=16.0.0"
34
- },
35
- "scripts": {
36
- "build": "tsc",
37
- "build:watch": "tsc --watch",
38
- "clean": "rm -rf dist",
39
- "test": "node --test test.js",
40
- "test:watch": "node --watch test.js",
41
- "prepublishOnly": "npm run build && npm run test"
42
- },
43
- "devDependencies": {
44
- "@types/node": "^20.10.0",
45
- "typescript": "^5.9.3"
46
- },
47
- "repository": {
48
- "type": "git",
49
- "url": "https://github.com/littlellmjs/llmjs2"
50
- },
51
- "bugs": {
52
- "url": "https://github.com/littlellmjs/llmjs2/issues"
53
- },
54
- "homepage": "https://github.com/littlellmjs/llmjs2#readme"
12
+ "types": "index.d.ts",
13
+ "keywords": ["llm", "ollama", "node", "ai", "client", "llmjs", "llmjs2"],
14
+ "license": "MIT"
55
15
  }
package/spec.txt ADDED
@@ -0,0 +1,73 @@
1
+ This specification defines **llmjs2**, a minimalist Node.js library designed to provide a robust, standardized interface for interacting with LLMs. It focuses on a "zero-config" developer experience, initially targeting **Ollama** with a fallback to **Ollama Cloud**.
2
+
3
+ ---
4
+
5
+ ## 1. Project Identity
6
+ * **Name:** `llmjs2`
7
+ * **Mission:** To be the most lightweight, concise, and robust bridge between Node.js applications and AI models.
8
+ * **Core Principle:** Favor convention over configuration. Use OpenAI-compatible schemas to minimize the learning curve.
9
+
10
+ ---
11
+
12
+ ## 2. Technical Requirements
13
+ * **Runtime:** Node.js 18.0.0+ (Required for native `fetch` and `web streams`).
14
+ * **Module System:** ESM (EcmaScript Modules).
15
+ * **Dependencies:** **Zero.** The library must not depend on external packages to ensure a small footprint and security.
16
+
17
+ ---
18
+
19
+ ## 3. Configuration & Environment
20
+ The library automatically resolves connection details using the following hierarchy:
21
+ 1. **Explicit Config:** Passed during initialization (if implemented).
22
+ 2. **Environment Variables:** * `OLLAMA_BASE_URL`: The target API host.
23
+ * `OLLAMA_API_KEY`: The bearer token for authenticated proxies or Cloud access.
24
+ 3. **Default Fallback:** `https://api.ollama.com` (Ollama Cloud).
25
+
26
+ ---
27
+
28
+ ## 4. API Specification
29
+
30
+ ### The `completion` Function
31
+ The library exports a single overloaded function: `completion`.
32
+
33
+ **Signatures:**
34
+ * `completion(model: string, prompt: string): Promise<string>`
35
+ * `completion(options: CompletionOptions): Promise<string>`
36
+
37
+ **Input Object (`CompletionOptions`):**
38
+ | Property | Type | Description |
39
+ | :--- | :--- | :--- |
40
+ | **model** | `string` | Format: `provider/model-name` (e.g., `ollama/llama3`). |
41
+ | **messages** | `Array` | Standard OpenAI `role`/`content` objects. |
42
+
43
+ **Output:** * Returns a **Promise** that resolves to a **string** containing the assistant's response.
44
+
45
+ ---
46
+
47
+ ## 5. Internal Architecture
48
+
49
+ ### A. Provider Routing
50
+ The library utilizes a "prefix-router." It splits the `model` string at the first `/`.
51
+ * If prefix is `ollama`, the request is routed to the `OllamaProvider`.
52
+ * The prefix is stripped before the request is sent to the provider's API.
53
+
54
+ ### B. Request Normalization
55
+ To maintain simplicity, **llmjs2** ignores hyper-parameters like `temperature` or `max_tokens` in the high-level API, allowing the model's internal defaults to govern the output. This ensures the library remains "future-proof" against changing API parameters.
56
+
57
+ ### C. Error Handling
58
+ The library must catch and wrap low-level network errors into high-level, actionable messages:
59
+ * **Connection Error:** "llmjs2: Could not connect to [URL]. Check your OLLAMA_BASE_URL."
60
+ * **Model Error:** "llmjs2: Model [name] not found on provider [provider]."
61
+
62
+ ---
63
+
64
+ ## 6. Implementation Checklist
65
+
66
+ * [ ] **Env Loader:** Logic to check `process.env` and apply fallbacks.
67
+ * [ ] **URL Parser:** Logic to ensure the base URL and `/api/chat` path are joined correctly without double slashes.
68
+ * [ ] **Fetch Wrapper:** A standard `POST` implementation using `Headers` and `body`.
69
+ * [ ] **Response Extractor:** Logic to navigate the JSON response (e.g., `json.message.content`) and return the raw string.
70
+
71
+ ---
72
+
73
+ **Would you like me to generate the actual `index.ts` file that implements this full specification?**
@@ -0,0 +1,100 @@
1
+ import { generate } from './index.js';
2
+
3
+ const MODEL = 'ollama/qwen3.5:397b-cloud';
4
+
5
+ const tools = [
6
+ {
7
+ name: 'get_weather',
8
+ description: 'Get the current weather for a location',
9
+ parameters: {
10
+ location: {
11
+ type: 'string',
12
+ required: true,
13
+ description: 'The city and state, e.g. San Francisco, CA',
14
+ },
15
+ unit: {
16
+ type: 'string',
17
+ enum: ['celsius', 'fahrenheit'],
18
+ description: 'Temperature unit',
19
+ },
20
+ },
21
+ handler: ({ location, unit = 'fahrenheit' }) => {
22
+ const weatherData = {
23
+ 'San Francisco, CA': { temp: 72, condition: 'Sunny' },
24
+ 'New York, NY': { temp: 45, condition: 'Cloudy' },
25
+ 'London, UK': { temp: 48, condition: 'Rainy' },
26
+ };
27
+
28
+ const data = weatherData[location] || { temp: 70, condition: 'Unknown' };
29
+ const temp = unit === 'celsius' ? Math.round((data.temp - 32) * (5 / 9)) : data.temp;
30
+ return `Weather in ${location}: ${temp}°${unit === 'celsius' ? 'C' : 'F'}, ${data.condition}`;
31
+ },
32
+ },
33
+ ];
34
+
35
+ const tests = [
36
+ {
37
+ name: 'Tool call with default unit',
38
+ input: {
39
+ model: MODEL,
40
+ userPrompt: 'Please use get_weather to fetch the weather for San Francisco, CA.',
41
+ tools,
42
+ },
43
+ expected: /San Francisco, CA/i,
44
+ },
45
+ {
46
+ name: 'Tool call with explicit celsius',
47
+ input: {
48
+ model: MODEL,
49
+ userPrompt: 'Please use get_weather to fetch the weather for London, UK in celsius.',
50
+ tools,
51
+ },
52
+ expected: /London, UK/i,
53
+ },
54
+ {
55
+ name: 'No tool needed direct answer',
56
+ input: {
57
+ model: MODEL,
58
+ userPrompt: 'What is 2 + 2?',
59
+ tools,
60
+ },
61
+ expected: /4|four/i,
62
+ },
63
+ {
64
+ name: 'Explicit messages payload with tool definitions',
65
+ input: {
66
+ model: MODEL,
67
+ messages: [
68
+ { role: 'system', content: 'You are a tool-aware assistant.' },
69
+ { role: 'user', content: 'Use get_weather for New York, NY.' },
70
+ ],
71
+ tools,
72
+ },
73
+ expected: /Weather in New York, NY/i,
74
+ },
75
+ ];
76
+
77
+ const runTest = async (test) => {
78
+ console.log(`\n=== ${test.name} ===`);
79
+ try {
80
+ const result = await generate(test.input);
81
+ console.log('Result:');
82
+ console.log(result);
83
+ if (test.expected && !test.expected.test(result)) {
84
+ console.warn('Warning: result did not match expected pattern.');
85
+ }
86
+ } catch (error) {
87
+ console.error('Error:', error?.message ?? error);
88
+ }
89
+ };
90
+
91
+ const runAll = async () => {
92
+ for (const test of tests) {
93
+ await runTest(test);
94
+ }
95
+ };
96
+
97
+ runAll().catch((error) => {
98
+ console.error('Unexpected failure:', error);
99
+ process.exit(1);
100
+ });