viho-llm 0.1.1 → 0.1.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/index.js +54 -0
- package/package.json +2 -2
- package/src/gemini.js +54 -0
package/index.js
CHANGED
|
@@ -40,6 +40,11 @@ const Gemini = (options) => {
|
|
|
40
40
|
return await chat(gemini.client, options.modelName, chatOptions);
|
|
41
41
|
};
|
|
42
42
|
|
|
43
|
+
// chat with streaming
|
|
44
|
+
gemini.chatWithStreaming = async (chatOptions, callbackOptions) => {
|
|
45
|
+
return await chatWithStreaming(gemini.client, options.modelName, chatOptions, callbackOptions);
|
|
46
|
+
};
|
|
47
|
+
|
|
43
48
|
// r
|
|
44
49
|
return gemini;
|
|
45
50
|
};
|
|
@@ -74,4 +79,53 @@ async function chat(client, modelName, chatOptions) {
|
|
|
74
79
|
}
|
|
75
80
|
}
|
|
76
81
|
|
|
82
|
+
async function chatWithStreaming(client, modelName, chatOptions, callbackOptions) {
|
|
83
|
+
const methodName = 'Gemini - chatWithStreaming';
|
|
84
|
+
|
|
85
|
+
// check
|
|
86
|
+
if (!chatOptions) {
|
|
87
|
+
logger.info(methodName, 'need chatOptions');
|
|
88
|
+
return;
|
|
89
|
+
}
|
|
90
|
+
if (!chatOptions.contents) {
|
|
91
|
+
logger.info(methodName, 'need chatOptions.contents');
|
|
92
|
+
return;
|
|
93
|
+
}
|
|
94
|
+
|
|
95
|
+
// callback
|
|
96
|
+
const beginCallback = callbackOptions.beginCallback;
|
|
97
|
+
const endCallback = callbackOptions.endCallback;
|
|
98
|
+
const errorCallback = callbackOptions.errorCallback;
|
|
99
|
+
const contentCallback = callbackOptions.contentCallback;
|
|
100
|
+
const firstContentCallback = callbackOptions.firstContentCallback;
|
|
101
|
+
|
|
102
|
+
try {
|
|
103
|
+
if (beginCallback) beginCallback();
|
|
104
|
+
const response = await client.models.generateContentStream({
|
|
105
|
+
model: modelName,
|
|
106
|
+
contents: chatOptions.contents,
|
|
107
|
+
});
|
|
108
|
+
|
|
109
|
+
// go
|
|
110
|
+
let firstContent = true;
|
|
111
|
+
for await (const chunk of response) {
|
|
112
|
+
// content
|
|
113
|
+
const content = chunk.text;
|
|
114
|
+
if (content && contentCallback) {
|
|
115
|
+
if (firstContent && firstContentCallback) {
|
|
116
|
+
firstContent = false;
|
|
117
|
+
firstContentCallback();
|
|
118
|
+
}
|
|
119
|
+
|
|
120
|
+
contentCallback(content);
|
|
121
|
+
}
|
|
122
|
+
}
|
|
123
|
+
|
|
124
|
+
// end
|
|
125
|
+
if (endCallback) endCallback();
|
|
126
|
+
} catch (error) {
|
|
127
|
+
if (errorCallback) errorCallback(error);
|
|
128
|
+
}
|
|
129
|
+
}
|
|
130
|
+
|
|
77
131
|
exports.Gemini = Gemini;
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "viho-llm",
|
|
3
|
-
"version": "0.1.
|
|
3
|
+
"version": "0.1.2",
|
|
4
4
|
"description": "Utility library for working with Google Gemini AI, providing common tools and helpers for AI interactions",
|
|
5
5
|
"keywords": [
|
|
6
6
|
"llm",
|
|
@@ -60,5 +60,5 @@
|
|
|
60
60
|
}
|
|
61
61
|
}
|
|
62
62
|
},
|
|
63
|
-
"gitHead": "
|
|
63
|
+
"gitHead": "ddc9883c92c6b3bb4a0521819c79b389b438967f"
|
|
64
64
|
}
|
package/src/gemini.js
CHANGED
|
@@ -39,6 +39,11 @@ export const Gemini = (options) => {
|
|
|
39
39
|
return await chat(gemini.client, options.modelName, chatOptions);
|
|
40
40
|
};
|
|
41
41
|
|
|
42
|
+
// chat with streaming
|
|
43
|
+
gemini.chatWithStreaming = async (chatOptions, callbackOptions) => {
|
|
44
|
+
return await chatWithStreaming(gemini.client, options.modelName, chatOptions, callbackOptions);
|
|
45
|
+
};
|
|
46
|
+
|
|
42
47
|
// r
|
|
43
48
|
return gemini;
|
|
44
49
|
};
|
|
@@ -72,3 +77,52 @@ async function chat(client, modelName, chatOptions) {
|
|
|
72
77
|
logger.error(methodName, 'error', error);
|
|
73
78
|
}
|
|
74
79
|
}
|
|
80
|
+
|
|
81
|
+
async function chatWithStreaming(client, modelName, chatOptions, callbackOptions) {
|
|
82
|
+
const methodName = 'Gemini - chatWithStreaming';
|
|
83
|
+
|
|
84
|
+
// check
|
|
85
|
+
if (!chatOptions) {
|
|
86
|
+
logger.info(methodName, 'need chatOptions');
|
|
87
|
+
return;
|
|
88
|
+
}
|
|
89
|
+
if (!chatOptions.contents) {
|
|
90
|
+
logger.info(methodName, 'need chatOptions.contents');
|
|
91
|
+
return;
|
|
92
|
+
}
|
|
93
|
+
|
|
94
|
+
// callback
|
|
95
|
+
const beginCallback = callbackOptions.beginCallback;
|
|
96
|
+
const endCallback = callbackOptions.endCallback;
|
|
97
|
+
const errorCallback = callbackOptions.errorCallback;
|
|
98
|
+
const contentCallback = callbackOptions.contentCallback;
|
|
99
|
+
const firstContentCallback = callbackOptions.firstContentCallback;
|
|
100
|
+
|
|
101
|
+
try {
|
|
102
|
+
if (beginCallback) beginCallback();
|
|
103
|
+
const response = await client.models.generateContentStream({
|
|
104
|
+
model: modelName,
|
|
105
|
+
contents: chatOptions.contents,
|
|
106
|
+
});
|
|
107
|
+
|
|
108
|
+
// go
|
|
109
|
+
let firstContent = true;
|
|
110
|
+
for await (const chunk of response) {
|
|
111
|
+
// content
|
|
112
|
+
const content = chunk.text;
|
|
113
|
+
if (content && contentCallback) {
|
|
114
|
+
if (firstContent && firstContentCallback) {
|
|
115
|
+
firstContent = false;
|
|
116
|
+
firstContentCallback();
|
|
117
|
+
}
|
|
118
|
+
|
|
119
|
+
contentCallback(content);
|
|
120
|
+
}
|
|
121
|
+
}
|
|
122
|
+
|
|
123
|
+
// end
|
|
124
|
+
if (endCallback) endCallback();
|
|
125
|
+
} catch (error) {
|
|
126
|
+
if (errorCallback) errorCallback(error);
|
|
127
|
+
}
|
|
128
|
+
}
|