viho-llm 0.1.8 → 0.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -101,12 +101,13 @@ const openai = OpenAIAPI({
101
101
  baseURL: 'https://api.openai.com/v1', // or your custom endpoint
102
102
  });
103
103
 
104
- // Send a chat message
104
+ // Send a chat message (using native OpenAI API format)
105
105
  const response = await openai.chat({
106
- modelID: 'gpt-4o',
107
- modelThinking: 'enabled', // 'enabled' or 'disabled' for reasoning models
108
- systemPrompt: 'You are a helpful assistant.',
109
- userPrompt: 'Hello, how are you?',
106
+ model: 'gpt-4o',
107
+ messages: [
108
+ { role: 'system', content: 'You are a helpful assistant.' },
109
+ { role: 'user', content: 'Hello, how are you?' },
110
+ ],
110
111
  });
111
112
 
112
113
  console.log(response);
@@ -157,10 +158,14 @@ OpenAI streaming supports thinking/reasoning content for compatible models:
157
158
  // Send a chat message with streaming (supports thinking mode)
158
159
  await openai.chatWithStreaming(
159
160
  {
160
- modelID: 'deepseek-reasoner',
161
- modelThinking: 'enabled',
162
- systemPrompt: 'You are a helpful assistant.',
163
- userPrompt: 'Explain how neural networks work',
161
+ model: 'deepseek-reasoner',
162
+ messages: [
163
+ { role: 'system', content: 'You are a helpful assistant.' },
164
+ { role: 'user', content: 'Explain how neural networks work' },
165
+ ],
166
+ thinking: {
167
+ type: 'enabled', // Enable reasoning mode
168
+ },
164
169
  },
165
170
  {
166
171
  beginCallback: () => {
@@ -428,11 +433,14 @@ Sends a chat request to the OpenAI API.
428
433
 
429
434
  **Parameters:**
430
435
 
431
- - `chatOptions` (Object) **required** - Chat configuration
432
- - `modelID` (string) **required** - Model identifier (e.g., 'gpt-4o', 'deepseek-reasoner')
433
- - `modelThinking` (string) **required** - Thinking mode: 'enabled' or 'disabled'
434
- - `systemPrompt` (string) **required** - System instruction/context for the model
435
- - `userPrompt` (string) **required** - User's message/question
436
+ - `chatOptions` (Object) **required** - Native OpenAI API chat completion options
437
+ - `model` (string) **required** - Model identifier (e.g., 'gpt-4o', 'deepseek-reasoner')
438
+ - `messages` (Array) **required** - Array of message objects
439
+ - `role` (string) - 'system', 'user', or 'assistant'
440
+ - `content` (string) - Message content
441
+ - `thinking` (Object) - Optional thinking/reasoning configuration
442
+ - `type` (string) - 'enabled' or 'disabled'
443
+ - ...other OpenAI API parameters
436
444
 
437
445
  **Returns:**
438
446
 
@@ -442,10 +450,11 @@ Sends a chat request to the OpenAI API.
442
450
 
443
451
  ```javascript
444
452
  const response = await openai.chat({
445
- modelID: 'gpt-4o',
446
- modelThinking: 'disabled',
447
- systemPrompt: 'You are a helpful coding assistant.',
448
- userPrompt: 'Write a Python function to reverse a string',
453
+ model: 'gpt-4o',
454
+ messages: [
455
+ { role: 'system', content: 'You are a helpful coding assistant.' },
456
+ { role: 'user', content: 'Write a Python function to reverse a string' },
457
+ ],
449
458
  });
450
459
  console.log(response.content);
451
460
  ```
@@ -456,11 +465,14 @@ Sends a chat request to the OpenAI API with streaming response and thinking supp
456
465
 
457
466
  **Parameters:**
458
467
 
459
- - `chatOptions` (Object) **required** - Chat configuration
460
- - `modelID` (string) **required** - Model identifier (e.g., 'gpt-4o', 'deepseek-reasoner')
461
- - `modelThinking` (string) **required** - Thinking mode: 'enabled' or 'disabled'
462
- - `systemPrompt` (string) **required** - System instruction/context for the model
463
- - `userPrompt` (string) **required** - User's message/question
468
+ - `chatOptions` (Object) **required** - Native OpenAI API chat completion options
469
+ - `model` (string) **required** - Model identifier (e.g., 'gpt-4o', 'deepseek-reasoner')
470
+ - `messages` (Array) **required** - Array of message objects
471
+ - `role` (string) - 'system', 'user', or 'assistant'
472
+ - `content` (string) - Message content
473
+ - `thinking` (Object) - Optional thinking/reasoning configuration
474
+ - `type` (string) - 'enabled' or 'disabled'
475
+ - ...other OpenAI API parameters (note: `stream` will be automatically set to `true`)
464
476
 
465
477
  - `callbackOptions` (Object) **required** - Callback functions for handling stream events
466
478
  - `beginCallback` (Function) - Called when the stream begins
@@ -483,10 +495,14 @@ Sends a chat request to the OpenAI API with streaming response and thinking supp
483
495
  ```javascript
484
496
  await openai.chatWithStreaming(
485
497
  {
486
- modelID: 'deepseek-reasoner',
487
- modelThinking: 'enabled',
488
- systemPrompt: 'You are a math tutor.',
489
- userPrompt: 'Solve: What is 15% of 240?',
498
+ model: 'deepseek-reasoner',
499
+ messages: [
500
+ { role: 'system', content: 'You are a math tutor.' },
501
+ { role: 'user', content: 'Solve: What is 15% of 240?' },
502
+ ],
503
+ thinking: {
504
+ type: 'enabled',
505
+ },
490
506
  },
491
507
  {
492
508
  thinkingCallback: (thinking) => {
package/index.js CHANGED
@@ -370,10 +370,10 @@ const logger$1 = qiao_log_js.Logger('openai-util.js');
370
370
  /**
371
371
  * chat
372
372
  * @param {*} client
373
- * @param {*} options
373
+ * @param {*} chatOptions
374
374
  * @returns
375
375
  */
376
- const chat = async (client, options) => {
376
+ const chat = async (client, chatOptions) => {
377
377
  const methodName = 'chat';
378
378
 
379
379
  // check
@@ -381,39 +381,11 @@ const chat = async (client, options) => {
381
381
  logger$1.error(methodName, 'need client');
382
382
  return;
383
383
  }
384
- if (!options) {
385
- logger$1.error(methodName, 'need options');
386
- return;
387
- }
388
- if (!options.modelID) {
389
- logger$1.error(methodName, 'need options.modelID');
390
- return;
391
- }
392
- if (!options.modelThinking) {
393
- logger$1.error(methodName, 'need options.modelThinking');
394
- return;
395
- }
396
- if (!options.systemPrompt) {
397
- logger$1.error(methodName, 'need options.systemPrompt');
398
- return;
399
- }
400
- if (!options.userPrompt) {
401
- logger$1.error(methodName, 'need options.userPrompt');
384
+ if (!chatOptions) {
385
+ logger$1.error(methodName, 'need chatOptions');
402
386
  return;
403
387
  }
404
388
 
405
- // chat
406
- const chatOptions = {
407
- model: options.modelID,
408
- messages: [
409
- { role: 'system', content: options.systemPrompt },
410
- { role: 'user', content: options.userPrompt },
411
- ],
412
- thinking: {
413
- type: options.modelThinking,
414
- },
415
- };
416
-
417
389
  // go
418
390
  try {
419
391
  const completion = await client.chat.completions.create(chatOptions);
@@ -426,11 +398,11 @@ const chat = async (client, options) => {
426
398
  /**
427
399
  * chatWithStreaming
428
400
  * @param {*} client
429
- * @param {*} options
401
+ * @param {*} chatOptions
430
402
  * @param {*} callbackOptions
431
403
  * @returns
432
404
  */
433
- const chatWithStreaming = async (client, options, callbackOptions) => {
405
+ const chatWithStreaming = async (client, chatOptions, callbackOptions) => {
434
406
  const methodName = 'chatWithStreaming';
435
407
 
436
408
  // check
@@ -438,24 +410,8 @@ const chatWithStreaming = async (client, options, callbackOptions) => {
438
410
  logger$1.error(methodName, 'need client');
439
411
  return;
440
412
  }
441
- if (!options) {
442
- logger$1.error(methodName, 'need options');
443
- return;
444
- }
445
- if (!options.modelID) {
446
- logger$1.error(methodName, 'need options.modelID');
447
- return;
448
- }
449
- if (!options.modelThinking) {
450
- logger$1.error(methodName, 'need options.modelThinking');
451
- return;
452
- }
453
- if (!options.systemPrompt) {
454
- logger$1.error(methodName, 'need options.systemPrompt');
455
- return;
456
- }
457
- if (!options.userPrompt) {
458
- logger$1.error(methodName, 'need options.userPrompt');
413
+ if (!chatOptions) {
414
+ logger$1.error(methodName, 'need chatOptions');
459
415
  return;
460
416
  }
461
417
  if (!callbackOptions) {
@@ -472,18 +428,6 @@ const chatWithStreaming = async (client, options, callbackOptions) => {
472
428
  const contentCallback = callbackOptions.contentCallback;
473
429
  const firstContentCallback = callbackOptions.firstContentCallback;
474
430
 
475
- // chat
476
- const chatOptions = {
477
- model: options.modelID,
478
- messages: [
479
- { role: 'system', content: options.systemPrompt },
480
- { role: 'user', content: options.userPrompt },
481
- ],
482
- thinking: {
483
- type: options.modelThinking,
484
- },
485
- };
486
-
487
431
  // go
488
432
  try {
489
433
  chatOptions.stream = true;
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "viho-llm",
3
- "version": "0.1.8",
3
+ "version": "0.2.0",
4
4
  "description": "Utility library for working with multiple LLM providers (Google Gemini and OpenAI), providing common tools and helpers for AI interactions",
5
5
  "keywords": [
6
6
  "llm",
@@ -68,5 +68,5 @@
68
68
  }
69
69
  }
70
70
  },
71
- "gitHead": "7d413880dd77b08d2f09ebff758510a357f0f095"
71
+ "gitHead": "38f9238d5c59fe322424ee30e32037b44f6be7d8"
72
72
  }
@@ -5,10 +5,10 @@ const logger = Logger('openai-util.js');
5
5
  /**
6
6
  * chat
7
7
  * @param {*} client
8
- * @param {*} options
8
+ * @param {*} chatOptions
9
9
  * @returns
10
10
  */
11
- export const chat = async (client, options) => {
11
+ export const chat = async (client, chatOptions) => {
12
12
  const methodName = 'chat';
13
13
 
14
14
  // check
@@ -16,38 +16,10 @@ export const chat = async (client, options) => {
16
16
  logger.error(methodName, 'need client');
17
17
  return;
18
18
  }
19
- if (!options) {
20
- logger.error(methodName, 'need options');
19
+ if (!chatOptions) {
20
+ logger.error(methodName, 'need chatOptions');
21
21
  return;
22
22
  }
23
- if (!options.modelID) {
24
- logger.error(methodName, 'need options.modelID');
25
- return;
26
- }
27
- if (!options.modelThinking) {
28
- logger.error(methodName, 'need options.modelThinking');
29
- return;
30
- }
31
- if (!options.systemPrompt) {
32
- logger.error(methodName, 'need options.systemPrompt');
33
- return;
34
- }
35
- if (!options.userPrompt) {
36
- logger.error(methodName, 'need options.userPrompt');
37
- return;
38
- }
39
-
40
- // chat
41
- const chatOptions = {
42
- model: options.modelID,
43
- messages: [
44
- { role: 'system', content: options.systemPrompt },
45
- { role: 'user', content: options.userPrompt },
46
- ],
47
- thinking: {
48
- type: options.modelThinking,
49
- },
50
- };
51
23
 
52
24
  // go
53
25
  try {
@@ -61,11 +33,11 @@ export const chat = async (client, options) => {
61
33
  /**
62
34
  * chatWithStreaming
63
35
  * @param {*} client
64
- * @param {*} options
36
+ * @param {*} chatOptions
65
37
  * @param {*} callbackOptions
66
38
  * @returns
67
39
  */
68
- export const chatWithStreaming = async (client, options, callbackOptions) => {
40
+ export const chatWithStreaming = async (client, chatOptions, callbackOptions) => {
69
41
  const methodName = 'chatWithStreaming';
70
42
 
71
43
  // check
@@ -73,24 +45,8 @@ export const chatWithStreaming = async (client, options, callbackOptions) => {
73
45
  logger.error(methodName, 'need client');
74
46
  return;
75
47
  }
76
- if (!options) {
77
- logger.error(methodName, 'need options');
78
- return;
79
- }
80
- if (!options.modelID) {
81
- logger.error(methodName, 'need options.modelID');
82
- return;
83
- }
84
- if (!options.modelThinking) {
85
- logger.error(methodName, 'need options.modelThinking');
86
- return;
87
- }
88
- if (!options.systemPrompt) {
89
- logger.error(methodName, 'need options.systemPrompt');
90
- return;
91
- }
92
- if (!options.userPrompt) {
93
- logger.error(methodName, 'need options.userPrompt');
48
+ if (!chatOptions) {
49
+ logger.error(methodName, 'need chatOptions');
94
50
  return;
95
51
  }
96
52
  if (!callbackOptions) {
@@ -107,18 +63,6 @@ export const chatWithStreaming = async (client, options, callbackOptions) => {
107
63
  const contentCallback = callbackOptions.contentCallback;
108
64
  const firstContentCallback = callbackOptions.firstContentCallback;
109
65
 
110
- // chat
111
- const chatOptions = {
112
- model: options.modelID,
113
- messages: [
114
- { role: 'system', content: options.systemPrompt },
115
- { role: 'user', content: options.userPrompt },
116
- ],
117
- thinking: {
118
- type: options.modelThinking,
119
- },
120
- };
121
-
122
66
  // go
123
67
  try {
124
68
  chatOptions.stream = true;