viho-llm 0.1.6 → 0.1.7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -4,7 +4,7 @@
4
4
 
5
5
  <h1 align="center">viho-llm</h1>
6
6
 
7
- <p align="center">Utility library for working with Google Gemini AI, providing common tools and helpers for AI interactions.</p>
7
+ <p align="center">Utility library for working with multiple LLM providers (Google Gemini and OpenAI), providing common tools and helpers for AI interactions.</p>
8
8
 
9
9
  ## Installation
10
10
 
@@ -14,7 +14,9 @@ npm install viho-llm
14
14
 
15
15
  ## Prerequisites
16
16
 
17
- This library supports two ways to access Google Gemini AI:
17
+ This library supports multiple LLM providers:
18
+
19
+ ### Google Gemini AI
18
20
 
19
21
  1. **Google AI Studio (GeminiAPI)** - For personal development and prototyping
20
22
  - Get an API key from [Google AI Studio](https://makersuite.google.com/app/apikey)
@@ -23,6 +25,14 @@ This library supports two ways to access Google Gemini AI:
23
25
  - Requires a Google Cloud project with Vertex AI enabled
24
26
  - Supports context caching for cost optimization
25
27
 
28
+ ### OpenAI Compatible APIs
29
+
30
+ **OpenAI API (OpenAIAPI)** - For OpenAI and compatible services
31
+
32
+ - Supports official OpenAI API
33
+ - Compatible with OpenAI-like APIs (e.g., DeepSeek, local LLMs)
34
+ - Supports thinking/reasoning mode for compatible models
35
+
26
36
  ## Usage
27
37
 
28
38
  ### Basic Example with GeminiAPI
@@ -78,9 +88,35 @@ const response = await gemini.chat({
78
88
  console.log(response);
79
89
  ```
80
90
 
91
+ ### Basic Example with OpenAI API
92
+
93
+ Using OpenAI or OpenAI-compatible services:
94
+
95
+ ```javascript
96
+ import { OpenAIAPI } from 'viho-llm';
97
+
98
+ // Initialize OpenAI client
99
+ const openai = OpenAIAPI({
100
+ apiKey: 'your-openai-api-key',
101
+ baseURL: 'https://api.openai.com/v1', // or your custom endpoint
102
+ modelID: 'gpt-4o',
103
+ modelThinking: 'enabled', // 'enabled' or 'disabled' for reasoning models
104
+ });
105
+
106
+ // Send a chat message
107
+ const response = await openai.chat(
108
+ 'You are a helpful assistant.', // system prompt
109
+ 'Hello, how are you?', // user prompt
110
+ );
111
+
112
+ console.log(response);
113
+ ```
114
+
81
115
  ### Streaming Example
82
116
 
83
- Both GeminiAPI and GeminiVertex support streaming responses:
117
+ All providers (GeminiAPI, GeminiVertex, and OpenAIAPI) support streaming responses:
118
+
119
+ #### Gemini Streaming
84
120
 
85
121
  ```javascript
86
122
  // Send a chat message with streaming
@@ -113,6 +149,41 @@ await gemini.chatWithStreaming(
113
149
  );
114
150
  ```
115
151
 
152
+ #### OpenAI Streaming with Thinking Mode
153
+
154
+ OpenAI streaming supports thinking/reasoning content for compatible models:
155
+
156
+ ```javascript
157
+ // Send a chat message with streaming (supports thinking mode)
158
+ await openai.chatWithStreaming(
159
+ 'You are a helpful assistant.', // system prompt
160
+ 'Explain how neural networks work', // user prompt
161
+ {
162
+ beginCallback: () => {
163
+ console.log('Stream started...');
164
+ },
165
+ firstThinkingCallback: () => {
166
+ console.log('\n[Thinking...]');
167
+ },
168
+ thinkingCallback: (thinking) => {
169
+ process.stdout.write(thinking); // Print reasoning process
170
+ },
171
+ firstContentCallback: () => {
172
+ console.log('\n[Response:]');
173
+ },
174
+ contentCallback: (content) => {
175
+ process.stdout.write(content); // Print response content
176
+ },
177
+ endCallback: () => {
178
+ console.log('\nStream ended.');
179
+ },
180
+ errorCallback: (error) => {
181
+ console.error('Error:', error);
182
+ },
183
+ },
184
+ );
185
+ ```
186
+
116
187
  ### Context Caching Example (Vertex AI Only)
117
188
 
118
189
  GeminiVertex supports context caching to reduce costs and latency when using large contexts:
@@ -331,6 +402,88 @@ await gemini.cacheUpdate('projects/.../cachedContents/abc123', {
331
402
  });
332
403
  ```
333
404
 
405
+ ---
406
+
407
+ ### `OpenAIAPI(options)`
408
+
409
+ Creates a new OpenAI client instance supporting OpenAI and compatible APIs.
410
+
411
+ #### Parameters
412
+
413
+ - `options` (Object) - Configuration options
414
+ - `apiKey` (string) **required** - Your OpenAI API key or compatible service key
415
+ - `baseURL` (string) **required** - API base URL (e.g., 'https://api.openai.com/v1')
416
+ - `modelID` (string) **required** - Model identifier (e.g., 'gpt-4o', 'deepseek-reasoner')
417
+ - `modelThinking` (string) **required** - Thinking mode: 'enabled' or 'disabled'
418
+
419
+ #### Returns
420
+
421
+ Returns an OpenAI client object with the following methods:
422
+
423
+ ##### `client.chat(systemPrompt, userPrompt)`
424
+
425
+ Sends a chat request to the OpenAI API.
426
+
427
+ **Parameters:**
428
+
429
+ - `systemPrompt` (string) **required** - System instruction/context for the model
430
+ - `userPrompt` (string) **required** - User's message/question
431
+
432
+ **Returns:**
433
+
434
+ - (Promise\<Object\>) - Message object with `role` and `content` properties
435
+
436
+ **Example:**
437
+
438
+ ```javascript
439
+ const response = await openai.chat(
440
+ 'You are a helpful coding assistant.',
441
+ 'Write a Python function to reverse a string',
442
+ );
443
+ console.log(response.content);
444
+ ```
445
+
446
+ ##### `client.chatWithStreaming(systemPrompt, userPrompt, callbackOptions)`
447
+
448
+ Sends a chat request to the OpenAI API with streaming response and thinking support.
449
+
450
+ **Parameters:**
451
+
452
+ - `systemPrompt` (string) **required** - System instruction/context for the model
453
+ - `userPrompt` (string) **required** - User's message/question
454
+
455
+ - `callbackOptions` (Object) **required** - Callback functions for handling stream events
456
+ - `beginCallback` (Function) - Called when the stream begins
457
+ - `firstThinkingCallback` (Function) - Called when the first thinking chunk is received (for reasoning models)
458
+ - `thinkingCallback` (Function) - Called for each thinking/reasoning chunk received
459
+ - Parameters: `thinking` (string) - The thinking content chunk
460
+ - `firstContentCallback` (Function) - Called when the first response content chunk is received
461
+ - `contentCallback` (Function) - Called for each response content chunk received
462
+ - Parameters: `content` (string) - The text chunk
463
+ - `endCallback` (Function) - Called when the stream ends successfully
464
+ - `errorCallback` (Function) - Called if an error occurs
465
+ - Parameters: `error` (Error) - The error object
466
+
467
+ **Returns:**
468
+
469
+ - (Promise\<void\>) - Resolves when streaming completes
470
+
471
+ **Example:**
472
+
473
+ ```javascript
474
+ await openai.chatWithStreaming('You are a math tutor.', 'Solve: What is 15% of 240?', {
475
+ thinkingCallback: (thinking) => {
476
+ console.log('Thinking:', thinking);
477
+ },
478
+ contentCallback: (chunk) => {
479
+ process.stdout.write(chunk);
480
+ },
481
+ endCallback: () => {
482
+ console.log('\nDone!');
483
+ },
484
+ });
485
+ ```
486
+
334
487
  ## License
335
488
 
336
489
  MIT
package/index.js CHANGED
@@ -3,9 +3,10 @@
3
3
  var genai = require('@google/genai');
4
4
  var mime = require('mime-types');
5
5
  var qiao_log_js = require('qiao.log.js');
6
+ var OpenAI = require('openai');
6
7
 
7
8
  // gemini
8
- const logger$2 = qiao_log_js.Logger('gemini-util.js');
9
+ const logger$4 = qiao_log_js.Logger('gemini-util.js');
9
10
 
10
11
  /**
11
12
  * chat
@@ -14,24 +15,24 @@ const logger$2 = qiao_log_js.Logger('gemini-util.js');
14
15
  * @param {*} chatOptions
15
16
  * @returns
16
17
  */
17
- const chat = async (client, modelName, chatOptions) => {
18
+ const chat$1 = async (client, modelName, chatOptions) => {
18
19
  const methodName = 'chat';
19
20
 
20
21
  // check
21
22
  if (!client) {
22
- logger$2.error(methodName, 'need client');
23
+ logger$4.error(methodName, 'need client');
23
24
  return;
24
25
  }
25
26
  if (!modelName) {
26
- logger$2.error(methodName, 'need modelName');
27
+ logger$4.error(methodName, 'need modelName');
27
28
  return;
28
29
  }
29
30
  if (!chatOptions) {
30
- logger$2.error(methodName, 'need chatOptions');
31
+ logger$4.error(methodName, 'need chatOptions');
31
32
  return;
32
33
  }
33
34
  if (!chatOptions.contents) {
34
- logger$2.error(methodName, 'need chatOptions.contents');
35
+ logger$4.error(methodName, 'need chatOptions.contents');
35
36
  return;
36
37
  }
37
38
 
@@ -47,13 +48,13 @@ const chat = async (client, modelName, chatOptions) => {
47
48
  // gen
48
49
  const response = await client.models.generateContent(options);
49
50
  if (!response || !response.text) {
50
- logger$2.error(methodName, 'invalid response');
51
+ logger$4.error(methodName, 'invalid response');
51
52
  return;
52
53
  }
53
54
 
54
55
  return response.text;
55
56
  } catch (error) {
56
- logger$2.error(methodName, 'error', error);
57
+ logger$4.error(methodName, 'error', error);
57
58
  }
58
59
  };
59
60
 
@@ -65,28 +66,28 @@ const chat = async (client, modelName, chatOptions) => {
65
66
  * @param {*} callbackOptions
66
67
  * @returns
67
68
  */
68
- const chatWithStreaming = async (client, modelName, chatOptions, callbackOptions) => {
69
+ const chatWithStreaming$1 = async (client, modelName, chatOptions, callbackOptions) => {
69
70
  const methodName = 'chatWithStreaming';
70
71
 
71
72
  // check
72
73
  if (!client) {
73
- logger$2.error(methodName, 'need client');
74
+ logger$4.error(methodName, 'need client');
74
75
  return;
75
76
  }
76
77
  if (!modelName) {
77
- logger$2.error(methodName, 'need modelName');
78
+ logger$4.error(methodName, 'need modelName');
78
79
  return;
79
80
  }
80
81
  if (!chatOptions) {
81
- logger$2.error(methodName, 'need chatOptions');
82
+ logger$4.error(methodName, 'need chatOptions');
82
83
  return;
83
84
  }
84
85
  if (!chatOptions.contents) {
85
- logger$2.error(methodName, 'need chatOptions.contents');
86
+ logger$4.error(methodName, 'need chatOptions.contents');
86
87
  return;
87
88
  }
88
89
  if (!callbackOptions) {
89
- logger$2.error(methodName, 'need callbackOptions');
90
+ logger$4.error(methodName, 'need callbackOptions');
90
91
  return;
91
92
  }
92
93
 
@@ -98,9 +99,6 @@ const chatWithStreaming = async (client, modelName, chatOptions, callbackOptions
98
99
  const firstContentCallback = callbackOptions.firstContentCallback;
99
100
 
100
101
  try {
101
- // begin
102
- if (beginCallback) beginCallback();
103
-
104
102
  // options
105
103
  const options = Object.assign(
106
104
  {
@@ -111,6 +109,7 @@ const chatWithStreaming = async (client, modelName, chatOptions, callbackOptions
111
109
 
112
110
  // gen
113
111
  const response = await client.models.generateContentStream(options);
112
+ if (beginCallback) beginCallback();
114
113
 
115
114
  // go
116
115
  let firstContent = true;
@@ -146,38 +145,38 @@ const cacheAdd = async (client, modelName, cacheOptions) => {
146
145
 
147
146
  // check
148
147
  if (!client) {
149
- logger$2.error(methodName, 'need client');
148
+ logger$4.error(methodName, 'need client');
150
149
  return;
151
150
  }
152
151
  if (!modelName) {
153
- logger$2.error(methodName, 'need modelName');
152
+ logger$4.error(methodName, 'need modelName');
154
153
  return;
155
154
  }
156
155
  if (!cacheOptions) {
157
- logger$2.error(methodName, 'need cacheOptions');
156
+ logger$4.error(methodName, 'need cacheOptions');
158
157
  return;
159
158
  }
160
159
  if (!cacheOptions.gsPath) {
161
- logger$2.error(methodName, 'need cacheOptions.gsPath');
160
+ logger$4.error(methodName, 'need cacheOptions.gsPath');
162
161
  return;
163
162
  }
164
163
  if (!cacheOptions.systemPrompt) {
165
- logger$2.error(methodName, 'need cacheOptions.systemPrompt');
164
+ logger$4.error(methodName, 'need cacheOptions.systemPrompt');
166
165
  return;
167
166
  }
168
167
  if (!cacheOptions.cacheName) {
169
- logger$2.error(methodName, 'need cacheOptions.cacheName');
168
+ logger$4.error(methodName, 'need cacheOptions.cacheName');
170
169
  return;
171
170
  }
172
171
  if (!cacheOptions.cacheTTL) {
173
- logger$2.error(methodName, 'need cacheOptions.cacheTTL');
172
+ logger$4.error(methodName, 'need cacheOptions.cacheTTL');
174
173
  return;
175
174
  }
176
175
 
177
176
  // const
178
177
  const mimeType = mime.lookup(cacheOptions.gsPath);
179
- logger$2.info(methodName, 'cacheOptions', cacheOptions);
180
- logger$2.info(methodName, 'mimeType', mimeType);
178
+ logger$4.info(methodName, 'cacheOptions', cacheOptions);
179
+ logger$4.info(methodName, 'mimeType', mimeType);
181
180
 
182
181
  try {
183
182
  // cache add
@@ -193,7 +192,7 @@ const cacheAdd = async (client, modelName, cacheOptions) => {
193
192
 
194
193
  return cache;
195
194
  } catch (error) {
196
- logger$2.error(methodName, 'error', error);
195
+ logger$4.error(methodName, 'error', error);
197
196
  }
198
197
  };
199
198
 
@@ -207,7 +206,7 @@ const cacheList = async (client) => {
207
206
 
208
207
  // check
209
208
  if (!client) {
210
- logger$2.error(methodName, 'need client');
209
+ logger$4.error(methodName, 'need client');
211
210
  return;
212
211
  }
213
212
 
@@ -221,7 +220,7 @@ const cacheList = async (client) => {
221
220
 
222
221
  return cacheObjs;
223
222
  } catch (error) {
224
- logger$2.error(methodName, 'error', error);
223
+ logger$4.error(methodName, 'error', error);
225
224
  }
226
225
  };
227
226
 
@@ -237,15 +236,15 @@ const cacheUpdate = async (client, cacheName, cacheOptions) => {
237
236
 
238
237
  // check
239
238
  if (!client) {
240
- logger$2.error(methodName, 'need client');
239
+ logger$4.error(methodName, 'need client');
241
240
  return;
242
241
  }
243
242
  if (!cacheName) {
244
- logger$2.error(methodName, 'need cacheName');
243
+ logger$4.error(methodName, 'need cacheName');
245
244
  return;
246
245
  }
247
246
  if (!cacheOptions) {
248
- logger$2.error(methodName, 'need cacheOptions');
247
+ logger$4.error(methodName, 'need cacheOptions');
249
248
  return;
250
249
  }
251
250
 
@@ -258,12 +257,12 @@ const cacheUpdate = async (client, cacheName, cacheOptions) => {
258
257
 
259
258
  return res;
260
259
  } catch (error) {
261
- logger$2.error(methodName, 'error', error);
260
+ logger$4.error(methodName, 'error', error);
262
261
  }
263
262
  };
264
263
 
265
264
  // gemini
266
- const logger$1 = qiao_log_js.Logger('gemini-api.js');
265
+ const logger$3 = qiao_log_js.Logger('gemini-api.js');
267
266
 
268
267
  /**
269
268
  * GeminiAPI
@@ -275,15 +274,15 @@ const GeminiAPI = (options) => {
275
274
 
276
275
  // check
277
276
  if (!options) {
278
- logger$1.error(methodName, 'need options');
277
+ logger$3.error(methodName, 'need options');
279
278
  return;
280
279
  }
281
280
  if (!options.apiKey) {
282
- logger$1.error(methodName, 'need options.apiKey');
281
+ logger$3.error(methodName, 'need options.apiKey');
283
282
  return;
284
283
  }
285
284
  if (!options.modelName) {
286
- logger$1.error(methodName, 'need options.modelName');
285
+ logger$3.error(methodName, 'need options.modelName');
287
286
  return;
288
287
  }
289
288
 
@@ -295,10 +294,10 @@ const GeminiAPI = (options) => {
295
294
 
296
295
  // chat
297
296
  gemini.chat = async (chatOptions) => {
298
- return await chat(gemini.client, options.modelName, chatOptions);
297
+ return await chat$1(gemini.client, options.modelName, chatOptions);
299
298
  };
300
299
  gemini.chatWithStreaming = async (chatOptions, callbackOptions) => {
301
- return await chatWithStreaming(gemini.client, options.modelName, chatOptions, callbackOptions);
300
+ return await chatWithStreaming$1(gemini.client, options.modelName, chatOptions, callbackOptions);
302
301
  };
303
302
 
304
303
  // r
@@ -306,7 +305,7 @@ const GeminiAPI = (options) => {
306
305
  };
307
306
 
308
307
  // gemini
309
- const logger = qiao_log_js.Logger('viho-llm');
308
+ const logger$2 = qiao_log_js.Logger('viho-llm');
310
309
 
311
310
  /**
312
311
  * GeminiVertex
@@ -318,19 +317,19 @@ const GeminiVertex = (options) => {
318
317
 
319
318
  // check
320
319
  if (!options) {
321
- logger.error(methodName, 'need options');
320
+ logger$2.error(methodName, 'need options');
322
321
  return;
323
322
  }
324
323
  if (!options.projectId) {
325
- logger.error(methodName, 'need options.projectId');
324
+ logger$2.error(methodName, 'need options.projectId');
326
325
  return;
327
326
  }
328
327
  if (!options.location) {
329
- logger.error(methodName, 'need options.location');
328
+ logger$2.error(methodName, 'need options.location');
330
329
  return;
331
330
  }
332
331
  if (!options.modelName) {
333
- logger.error(methodName, 'need options.modelName');
332
+ logger$2.error(methodName, 'need options.modelName');
334
333
  return;
335
334
  }
336
335
 
@@ -344,10 +343,10 @@ const GeminiVertex = (options) => {
344
343
 
345
344
  // chat
346
345
  gemini.chat = async (chatOptions) => {
347
- return await chat(gemini.client, options.modelName, chatOptions);
346
+ return await chat$1(gemini.client, options.modelName, chatOptions);
348
347
  };
349
348
  gemini.chatWithStreaming = async (chatOptions, callbackOptions) => {
350
- return await chatWithStreaming(gemini.client, options.modelName, chatOptions, callbackOptions);
349
+ return await chatWithStreaming$1(gemini.client, options.modelName, chatOptions, callbackOptions);
351
350
  };
352
351
 
353
352
  // cache
@@ -365,5 +364,225 @@ const GeminiVertex = (options) => {
365
364
  return gemini;
366
365
  };
367
366
 
367
+ // Logger
368
+ const logger$1 = qiao_log_js.Logger('openai-util.js');
369
+
370
+ /**
371
+ * chat
372
+ * @param {*} client
373
+ * @param {*} modelID
374
+ * @param {*} modelThinking
375
+ * @param {*} systemPrompt
376
+ * @param {*} userPrompt
377
+ * @returns
378
+ */
379
+ const chat = async (client, modelID, modelThinking, systemPrompt, userPrompt) => {
380
+ const methodName = 'chat';
381
+
382
+ // check
383
+ if (!client) {
384
+ logger$1.error(methodName, 'need client');
385
+ return;
386
+ }
387
+ if (!modelID) {
388
+ logger$1.error(methodName, 'need modelID');
389
+ return;
390
+ }
391
+ if (!modelThinking) {
392
+ logger$1.error(methodName, 'need modelThinking');
393
+ return;
394
+ }
395
+ if (!systemPrompt) {
396
+ logger$1.error(methodName, 'need systemPrompt');
397
+ return;
398
+ }
399
+ if (!userPrompt) {
400
+ logger$1.error(methodName, 'need userPrompt');
401
+ return;
402
+ }
403
+
404
+ // chat
405
+ const chatOptions = {
406
+ model: modelID,
407
+ messages: [
408
+ { role: 'system', content: systemPrompt },
409
+ { role: 'user', content: userPrompt },
410
+ ],
411
+ thinking: {
412
+ type: modelThinking,
413
+ },
414
+ };
415
+
416
+ // go
417
+ try {
418
+ const completion = await client.chat.completions.create(chatOptions);
419
+ return completion.choices[0]?.message;
420
+ } catch (error) {
421
+ logger$1.error(methodName, 'error', error);
422
+ }
423
+ };
424
+
425
+ /**
426
+ * chatWithStreaming
427
+ * @param {*} client
428
+ * @param {*} modelID
429
+ * @param {*} modelThinking
430
+ * @param {*} systemPrompt
431
+ * @param {*} userPrompt
432
+ * @param {*} callbackOptions
433
+ * @returns
434
+ */
435
+ const chatWithStreaming = async (client, modelID, modelThinking, systemPrompt, userPrompt, callbackOptions) => {
436
+ const methodName = 'chatWithStreaming';
437
+
438
+ // check
439
+ if (!client) {
440
+ logger$1.error(methodName, 'need client');
441
+ return;
442
+ }
443
+ if (!modelID) {
444
+ logger$1.error(methodName, 'need modelID');
445
+ return;
446
+ }
447
+ if (!modelThinking) {
448
+ logger$1.error(methodName, 'need modelThinking');
449
+ return;
450
+ }
451
+ if (!systemPrompt) {
452
+ logger$1.error(methodName, 'need systemPrompt');
453
+ return;
454
+ }
455
+ if (!userPrompt) {
456
+ logger$1.error(methodName, 'need userPrompt');
457
+ return;
458
+ }
459
+ if (!callbackOptions) {
460
+ logger$1.error(methodName, 'need callbackOptions');
461
+ return;
462
+ }
463
+
464
+ // callback
465
+ const beginCallback = callbackOptions.beginCallback;
466
+ const endCallback = callbackOptions.endCallback;
467
+ const errorCallback = callbackOptions.errorCallback;
468
+ const thinkingCallback = callbackOptions.thinkingCallback;
469
+ const firstThinkingCallback = callbackOptions.firstThinkingCallback;
470
+ const contentCallback = callbackOptions.contentCallback;
471
+ const firstContentCallback = callbackOptions.firstContentCallback;
472
+
473
+ // chat
474
+ const chatOptions = {
475
+ model: modelID,
476
+ messages: [
477
+ { role: 'system', content: systemPrompt },
478
+ { role: 'user', content: userPrompt },
479
+ ],
480
+ thinking: {
481
+ type: modelThinking,
482
+ },
483
+ };
484
+
485
+ // go
486
+ try {
487
+ chatOptions.stream = true;
488
+ const stream = await client.chat.completions.create(chatOptions);
489
+ if (beginCallback) beginCallback();
490
+
491
+ // go
492
+ let firstThinking = true;
493
+ let firstContent = true;
494
+ for await (const part of stream) {
495
+ // thinking
496
+ const thinkingContent = part.choices[0]?.delta?.reasoning_content;
497
+ if (thinkingContent && thinkingCallback) {
498
+ if (firstThinking && firstThinkingCallback) {
499
+ firstThinking = false;
500
+ firstThinkingCallback();
501
+ }
502
+
503
+ thinkingCallback(thinkingContent);
504
+ }
505
+
506
+ // content
507
+ const content = part.choices[0]?.delta?.content;
508
+ if (content && contentCallback) {
509
+ if (firstContent && firstContentCallback) {
510
+ firstContent = false;
511
+ firstContentCallback();
512
+ }
513
+
514
+ contentCallback(content);
515
+ }
516
+ }
517
+
518
+ // end
519
+ if (endCallback) endCallback();
520
+ } catch (error) {
521
+ if (errorCallback) errorCallback(error);
522
+ }
523
+ };
524
+
525
+ // openai
526
+ const logger = qiao_log_js.Logger('openai.js');
527
+
528
+ /**
529
+ * OpenAI
530
+ * @param {*} options
531
+ * @returns
532
+ */
533
+ const OpenAIAPI = (options) => {
534
+ const methodName = 'OpenAI';
535
+
536
+ // check
537
+ if (!options) {
538
+ logger.error(methodName, 'need options');
539
+ return;
540
+ }
541
+ if (!options.apiKey) {
542
+ logger.error(methodName, 'need options.apiKey');
543
+ return;
544
+ }
545
+ if (!options.baseURL) {
546
+ logger.error(methodName, 'need options.baseURL');
547
+ return;
548
+ }
549
+ if (!options.modelID) {
550
+ logger.error(methodName, 'need options.modelID');
551
+ return;
552
+ }
553
+ if (!options.modelThinking) {
554
+ logger.error(methodName, 'need options.modelThinking');
555
+ return;
556
+ }
557
+
558
+ // openai
559
+ const openai = {};
560
+ openai.client = new OpenAI({
561
+ apiKey: options.apiKey,
562
+ baseURL: options.baseURL,
563
+ });
564
+
565
+ // chat
566
+ openai.chat = async (systemPrompt, userPrompt) => {
567
+ return await chat(openai.client, options.modelID, options.modelThinking, systemPrompt, userPrompt);
568
+ };
569
+
570
+ // chat with streaming
571
+ openai.chatWithStreaming = async (systemPrompt, userPrompt, callbakOptions) => {
572
+ return await chatWithStreaming(
573
+ openai.client,
574
+ options.modelID,
575
+ options.modelThinking,
576
+ systemPrompt,
577
+ userPrompt,
578
+ callbakOptions,
579
+ );
580
+ };
581
+
582
+ //
583
+ return openai;
584
+ };
585
+
368
586
  exports.GeminiAPI = GeminiAPI;
369
587
  exports.GeminiVertex = GeminiVertex;
588
+ exports.OpenAIAPI = OpenAIAPI;
package/package.json CHANGED
@@ -1,17 +1,23 @@
1
1
  {
2
2
  "name": "viho-llm",
3
- "version": "0.1.6",
4
- "description": "Utility library for working with Google Gemini AI, providing common tools and helpers for AI interactions",
3
+ "version": "0.1.7",
4
+ "description": "Utility library for working with multiple LLM providers (Google Gemini and OpenAI), providing common tools and helpers for AI interactions",
5
5
  "keywords": [
6
6
  "llm",
7
7
  "ai",
8
8
  "gemini",
9
+ "openai",
9
10
  "google-ai",
10
11
  "google-gemini",
11
12
  "genai",
13
+ "gpt",
14
+ "chatgpt",
12
15
  "ai-tools",
13
16
  "language-model",
14
17
  "ai-utilities",
18
+ "reasoning",
19
+ "thinking",
20
+ "deepseek",
15
21
  "viho"
16
22
  ],
17
23
  "author": "uikoo9 <uikoo9@qq.com>",
@@ -42,6 +48,7 @@
42
48
  "dependencies": {
43
49
  "@google/genai": "^1.34.0",
44
50
  "mime-types": "^2.1.35",
51
+ "openai": "^5.23.2",
45
52
  "qiao.log.js": "^3.7.5"
46
53
  },
47
54
  "nx": {
@@ -61,5 +68,5 @@
61
68
  }
62
69
  }
63
70
  },
64
- "gitHead": "d77d2ba692eac3cd5262f55ade7cf74b84172385"
71
+ "gitHead": "21b7c541435da99dceb5dcf657ac5a528ed792ba"
65
72
  }
package/src/index.js CHANGED
@@ -1,2 +1,3 @@
1
1
  export * from './models/gemini-api.js';
2
2
  export * from './models/gemini-vertex.js';
3
+ export * from './models/openai.js';
@@ -99,9 +99,6 @@ export const chatWithStreaming = async (client, modelName, chatOptions, callback
99
99
  const firstContentCallback = callbackOptions.firstContentCallback;
100
100
 
101
101
  try {
102
- // begin
103
- if (beginCallback) beginCallback();
104
-
105
102
  // options
106
103
  const options = Object.assign(
107
104
  {
@@ -112,6 +109,7 @@ export const chatWithStreaming = async (client, modelName, chatOptions, callback
112
109
 
113
110
  // gen
114
111
  const response = await client.models.generateContentStream(options);
112
+ if (beginCallback) beginCallback();
115
113
 
116
114
  // go
117
115
  let firstContent = true;
@@ -0,0 +1,158 @@
1
+ // Logger
2
+ import { Logger } from 'qiao.log.js';
3
+ const logger = Logger('openai-util.js');
4
+
5
+ /**
6
+ * chat
7
+ * @param {*} client
8
+ * @param {*} modelID
9
+ * @param {*} modelThinking
10
+ * @param {*} systemPrompt
11
+ * @param {*} userPrompt
12
+ * @returns
13
+ */
14
+ export const chat = async (client, modelID, modelThinking, systemPrompt, userPrompt) => {
15
+ const methodName = 'chat';
16
+
17
+ // check
18
+ if (!client) {
19
+ logger.error(methodName, 'need client');
20
+ return;
21
+ }
22
+ if (!modelID) {
23
+ logger.error(methodName, 'need modelID');
24
+ return;
25
+ }
26
+ if (!modelThinking) {
27
+ logger.error(methodName, 'need modelThinking');
28
+ return;
29
+ }
30
+ if (!systemPrompt) {
31
+ logger.error(methodName, 'need systemPrompt');
32
+ return;
33
+ }
34
+ if (!userPrompt) {
35
+ logger.error(methodName, 'need userPrompt');
36
+ return;
37
+ }
38
+
39
+ // chat
40
+ const chatOptions = {
41
+ model: modelID,
42
+ messages: [
43
+ { role: 'system', content: systemPrompt },
44
+ { role: 'user', content: userPrompt },
45
+ ],
46
+ thinking: {
47
+ type: modelThinking,
48
+ },
49
+ };
50
+
51
+ // go
52
+ try {
53
+ const completion = await client.chat.completions.create(chatOptions);
54
+ return completion.choices[0]?.message;
55
+ } catch (error) {
56
+ logger.error(methodName, 'error', error);
57
+ }
58
+ };
59
+
60
+ /**
61
+ * chatWithStreaming
62
+ * @param {*} client
63
+ * @param {*} modelID
64
+ * @param {*} modelThinking
65
+ * @param {*} systemPrompt
66
+ * @param {*} userPrompt
67
+ * @param {*} callbackOptions
68
+ * @returns
69
+ */
70
+ export const chatWithStreaming = async (client, modelID, modelThinking, systemPrompt, userPrompt, callbackOptions) => {
71
+ const methodName = 'chatWithStreaming';
72
+
73
+ // check
74
+ if (!client) {
75
+ logger.error(methodName, 'need client');
76
+ return;
77
+ }
78
+ if (!modelID) {
79
+ logger.error(methodName, 'need modelID');
80
+ return;
81
+ }
82
+ if (!modelThinking) {
83
+ logger.error(methodName, 'need modelThinking');
84
+ return;
85
+ }
86
+ if (!systemPrompt) {
87
+ logger.error(methodName, 'need systemPrompt');
88
+ return;
89
+ }
90
+ if (!userPrompt) {
91
+ logger.error(methodName, 'need userPrompt');
92
+ return;
93
+ }
94
+ if (!callbackOptions) {
95
+ logger.error(methodName, 'need callbackOptions');
96
+ return;
97
+ }
98
+
99
+ // callback
100
+ const beginCallback = callbackOptions.beginCallback;
101
+ const endCallback = callbackOptions.endCallback;
102
+ const errorCallback = callbackOptions.errorCallback;
103
+ const thinkingCallback = callbackOptions.thinkingCallback;
104
+ const firstThinkingCallback = callbackOptions.firstThinkingCallback;
105
+ const contentCallback = callbackOptions.contentCallback;
106
+ const firstContentCallback = callbackOptions.firstContentCallback;
107
+
108
+ // chat
109
+ const chatOptions = {
110
+ model: modelID,
111
+ messages: [
112
+ { role: 'system', content: systemPrompt },
113
+ { role: 'user', content: userPrompt },
114
+ ],
115
+ thinking: {
116
+ type: modelThinking,
117
+ },
118
+ };
119
+
120
+ // go
121
+ try {
122
+ chatOptions.stream = true;
123
+ const stream = await client.chat.completions.create(chatOptions);
124
+ if (beginCallback) beginCallback();
125
+
126
+ // go
127
+ let firstThinking = true;
128
+ let firstContent = true;
129
+ for await (const part of stream) {
130
+ // thinking
131
+ const thinkingContent = part.choices[0]?.delta?.reasoning_content;
132
+ if (thinkingContent && thinkingCallback) {
133
+ if (firstThinking && firstThinkingCallback) {
134
+ firstThinking = false;
135
+ firstThinkingCallback();
136
+ }
137
+
138
+ thinkingCallback(thinkingContent);
139
+ }
140
+
141
+ // content
142
+ const content = part.choices[0]?.delta?.content;
143
+ if (content && contentCallback) {
144
+ if (firstContent && firstContentCallback) {
145
+ firstContent = false;
146
+ firstContentCallback();
147
+ }
148
+
149
+ contentCallback(content);
150
+ }
151
+ }
152
+
153
+ // end
154
+ if (endCallback) endCallback();
155
+ } catch (error) {
156
+ if (errorCallback) errorCallback(error);
157
+ }
158
+ };
@@ -0,0 +1,67 @@
1
+ // openai
2
+ import OpenAI from 'openai';
3
+
4
+ // util
5
+ import { chat, chatWithStreaming } from './openai-util.js';
6
+
7
+ // Logger
8
+ import { Logger } from 'qiao.log.js';
9
+ const logger = Logger('openai.js');
10
+
11
+ /**
12
+ * OpenAI
13
+ * @param {*} options
14
+ * @returns
15
+ */
16
+ export const OpenAIAPI = (options) => {
17
+ const methodName = 'OpenAI';
18
+
19
+ // check
20
+ if (!options) {
21
+ logger.error(methodName, 'need options');
22
+ return;
23
+ }
24
+ if (!options.apiKey) {
25
+ logger.error(methodName, 'need options.apiKey');
26
+ return;
27
+ }
28
+ if (!options.baseURL) {
29
+ logger.error(methodName, 'need options.baseURL');
30
+ return;
31
+ }
32
+ if (!options.modelID) {
33
+ logger.error(methodName, 'need options.modelID');
34
+ return;
35
+ }
36
+ if (!options.modelThinking) {
37
+ logger.error(methodName, 'need options.modelThinking');
38
+ return;
39
+ }
40
+
41
+ // openai
42
+ const openai = {};
43
+ openai.client = new OpenAI({
44
+ apiKey: options.apiKey,
45
+ baseURL: options.baseURL,
46
+ });
47
+
48
+ // chat
49
+ openai.chat = async (systemPrompt, userPrompt) => {
50
+ return await chat(openai.client, options.modelID, options.modelThinking, systemPrompt, userPrompt);
51
+ };
52
+
53
+ // chat with streaming
54
+ openai.chatWithStreaming = async (systemPrompt, userPrompt, callbakOptions) => {
55
+ return await chatWithStreaming(
56
+ openai.client,
57
+ options.modelID,
58
+ options.modelThinking,
59
+ systemPrompt,
60
+ userPrompt,
61
+ callbakOptions,
62
+ );
63
+ };
64
+
65
+ //
66
+ return openai;
67
+ };