@graf-research/llm-runner 0.0.14 → 0.0.16
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +36 -3
- package/dist/base/llm-runner.d.ts +4 -4
- package/dist/base/llm-runner.js +8 -4
- package/dist/platform/chatgpt.d.ts +1 -1
- package/dist/platform/chatgpt.js +2 -2
- package/package.json +1 -1
package/README.md
CHANGED
|
@@ -8,6 +8,20 @@ Sebuah alternatif untuk mengutilisasi LLM ke programming NodeJS/Javascript. Dide
|
|
|
8
8
|
npm install --save @graf-research/llm-runner
|
|
9
9
|
```
|
|
10
10
|
|
|
11
|
+
## Supported LLM
|
|
12
|
+
|
|
13
|
+
```ts
|
|
14
|
+
import { ChatGPTLLM, OllamaLLM, AnthropicLLM, GeminiLLM, LLMRunner } from "@graf-research/llm-runner";
|
|
15
|
+
|
|
16
|
+
const chatgpt: LLMRunner.BaseLLM = new ChatGPTLLM('apikey', '<chatgpt model>');
|
|
17
|
+
const ollama: LLMRunner.BaseLLM = new OllamaLLM('http://my-ollama-server', '<ollama model>');
|
|
18
|
+
const anthropic: LLMRunner.BaseLLM = new AnthropicLLM('apikey', '<anthropic model>');
|
|
19
|
+
const gemini: LLMRunner.BaseLLM = new GeminiLLM('apikey', '<gemini model>');
|
|
20
|
+
|
|
21
|
+
// different platform implementation but same signature BaseLLM class
|
|
22
|
+
const llm: LLMRunner.BaseLLM = ollama;
|
|
23
|
+
```
|
|
24
|
+
|
|
11
25
|
## Example
|
|
12
26
|
|
|
13
27
|
#### Simple
|
|
@@ -17,7 +31,15 @@ import { ChatGPTLLM } from "@graf-research/llm-runner";
|
|
|
17
31
|
|
|
18
32
|
const chat_gpt_api_key = '<apikey>';
|
|
19
33
|
const chatgpt = new ChatGPTLLM(chat_gpt_api_key, 'gpt-4o-mini');
|
|
20
|
-
|
|
34
|
+
|
|
35
|
+
// pass string
|
|
36
|
+
const response: string = await chatgpt.askNoContext('Apa ibukota Indonesia?');
|
|
37
|
+
|
|
38
|
+
// pass array of string
|
|
39
|
+
const response: string = await chatgpt.askNoContext([
|
|
40
|
+
'Saya sedang berada di Indonesia',
|
|
41
|
+
'apa ibukota negara tersebut?'
|
|
42
|
+
]);
|
|
21
43
|
```
|
|
22
44
|
|
|
23
45
|
#### Simple w/ Context
|
|
@@ -66,6 +88,7 @@ import { ChatGPTLLM, GenericLLM } from "@graf-research/llm-runner";
|
|
|
66
88
|
const chat_gpt_api_key = '<apikey>';
|
|
67
89
|
const chatgpt = new ChatGPTLLM(chat_gpt_api_key, 'gpt-4o-mini');
|
|
68
90
|
|
|
91
|
+
// can pass string or array of string
|
|
69
92
|
const response: GenericLLM.StreamResponse = await chatgpt.streamNoContext(['Jelaskan proses metamorfosis pada kupu-kupu']);
|
|
70
93
|
response.stream((chunk: string, is_complete: boolean) => {
|
|
71
94
|
if (!is_complete) {
|
|
@@ -134,8 +157,9 @@ import { ChatGPTLLM, OllamaLLM, AnthropicLLM, LLMRunner } from "@graf-research/l
|
|
|
134
157
|
const chatgpt: LLMRunner.BaseLLM = new ChatGPTLLM('apikey', 'gpt-4o-mini');
|
|
135
158
|
const ollama: LLMRunner.BaseLLM = new OllamaLLM('http://my-ollama-server', 'deepseek-r1:8b');
|
|
136
159
|
const anthropic: LLMRunner.BaseLLM = new AnthropicLLM('apikey', 'claude-3-opus-latest');
|
|
160
|
+
const gemini: LLMRunner.BaseLLM = new GeminiLLM('apikey', 'gemini-1.5-flash');
|
|
137
161
|
|
|
138
|
-
// different platform
|
|
162
|
+
// different platform implementation but same signature BaseLLM class
|
|
139
163
|
const llm: LLMRunner.BaseLLM = ollama;
|
|
140
164
|
```
|
|
141
165
|
|
|
@@ -149,7 +173,15 @@ const llm: LLMRunner.BaseLLM = ollama;
|
|
|
149
173
|
import { ChatGPTLLM, LLMRunner } from "@graf-research/llm-runner";
|
|
150
174
|
|
|
151
175
|
const llm: LLMRunner.BaseLLM = new ChatGPTLLM('<apikey>', 'gpt-4o-mini');
|
|
152
|
-
|
|
176
|
+
|
|
177
|
+
// pass string
|
|
178
|
+
const response: string = await llm.askNoContext('Apa ibukota Indonesia?');
|
|
179
|
+
|
|
180
|
+
// pass array of string
|
|
181
|
+
const response: string = await llm.askNoContext([
|
|
182
|
+
'Saya sedang berada di Indonesia',
|
|
183
|
+
'apa ibukota negara tersebut?'
|
|
184
|
+
]);
|
|
153
185
|
```
|
|
154
186
|
|
|
155
187
|
*With Stream*
|
|
@@ -159,6 +191,7 @@ import { ChatGPTLLM, GenericLLM, LLMRunner } from "@graf-research/llm-runner";
|
|
|
159
191
|
|
|
160
192
|
const llm: LLMRunner.BaseLLM = new ChatGPTLLM('<apikey>', 'gpt-4o-mini');
|
|
161
193
|
|
|
194
|
+
// can pass string or array of string
|
|
162
195
|
const response: GenericLLM.StreamResponse = await chatgpt.streamNoContext(['Jelaskan proses metamorfosis pada kupu-kupu']);
|
|
163
196
|
response.stream((chunk: string, is_complete: boolean) => {
|
|
164
197
|
...
|
|
@@ -23,9 +23,9 @@ export declare namespace LLMRunner {
|
|
|
23
23
|
* Abstract Base LLM Class
|
|
24
24
|
*/
|
|
25
25
|
abstract class BaseLLM extends GenericLLM.BaseLLM<ChatSession, Message> {
|
|
26
|
-
stream(
|
|
27
|
-
streamNoContext(
|
|
28
|
-
ask(
|
|
29
|
-
askNoContext(
|
|
26
|
+
stream(message_data: string[] | string, id_session: string): Promise<GenericLLM.StreamResponse>;
|
|
27
|
+
streamNoContext(message_data: string[] | string): Promise<GenericLLM.StreamResponse>;
|
|
28
|
+
ask(message_data: string[] | string, id_session: string): Promise<string>;
|
|
29
|
+
askNoContext(message_data: string[] | string): Promise<string>;
|
|
30
30
|
}
|
|
31
31
|
}
|
package/dist/base/llm-runner.js
CHANGED
|
@@ -74,8 +74,9 @@ var LLMRunner;
|
|
|
74
74
|
* Abstract Base LLM Class
|
|
75
75
|
*/
|
|
76
76
|
class BaseLLM extends generic_llm_1.GenericLLM.BaseLLM {
|
|
77
|
-
stream(
|
|
77
|
+
stream(message_data, id_session) {
|
|
78
78
|
return __awaiter(this, void 0, void 0, function* () {
|
|
79
|
+
const messages = Array.isArray(message_data) ? message_data : [message_data];
|
|
79
80
|
const ac = new AbortController();
|
|
80
81
|
const stream = new node_stream_1.Readable({ objectMode: true, read() { } });
|
|
81
82
|
this.streamChat(messages, id_session, stream, ac);
|
|
@@ -109,8 +110,9 @@ var LLMRunner;
|
|
|
109
110
|
};
|
|
110
111
|
});
|
|
111
112
|
}
|
|
112
|
-
streamNoContext(
|
|
113
|
+
streamNoContext(message_data) {
|
|
113
114
|
return __awaiter(this, void 0, void 0, function* () {
|
|
115
|
+
const messages = Array.isArray(message_data) ? message_data : [message_data];
|
|
114
116
|
const ac = new AbortController();
|
|
115
117
|
const stream = new node_stream_1.Readable({ objectMode: true, read() { } });
|
|
116
118
|
this.streamChat(messages, null, stream, ac);
|
|
@@ -142,16 +144,18 @@ var LLMRunner;
|
|
|
142
144
|
};
|
|
143
145
|
});
|
|
144
146
|
}
|
|
145
|
-
ask(
|
|
147
|
+
ask(message_data, id_session) {
|
|
146
148
|
return __awaiter(this, void 0, void 0, function* () {
|
|
149
|
+
const messages = Array.isArray(message_data) ? message_data : [message_data];
|
|
147
150
|
const res = yield this.chat(messages, id_session);
|
|
148
151
|
yield this.chat_session_manager.saveMessage(messages, 'user', id_session);
|
|
149
152
|
yield this.chat_session_manager.saveMessage([res], 'assistant', id_session);
|
|
150
153
|
return res;
|
|
151
154
|
});
|
|
152
155
|
}
|
|
153
|
-
askNoContext(
|
|
156
|
+
askNoContext(message_data) {
|
|
154
157
|
return __awaiter(this, void 0, void 0, function* () {
|
|
158
|
+
const messages = Array.isArray(message_data) ? message_data : [message_data];
|
|
155
159
|
return yield this.chat(messages, null);
|
|
156
160
|
});
|
|
157
161
|
}
|
|
@@ -8,7 +8,7 @@ import { ChatModel as ChatGPTModel } from "openai/resources";
|
|
|
8
8
|
export declare class ChatGPTLLM extends LLMRunner.BaseLLM {
|
|
9
9
|
private cgpt;
|
|
10
10
|
private model;
|
|
11
|
-
constructor(api_key: string, model: ChatGPTModel, chat_session_manager?: GenericLLM.ChatSessionManager<LLMRunner.ChatSession, LLMRunner.Message
|
|
11
|
+
constructor(api_key: string, model: ChatGPTModel, chat_session_manager?: GenericLLM.ChatSessionManager<LLMRunner.ChatSession, LLMRunner.Message>, base_url?: string);
|
|
12
12
|
protected streamChat(messages: string[], id_session: string | null, stream: Readable, ac: AbortController): Promise<void>;
|
|
13
13
|
protected chat(messages: string[], id_session: string | null): Promise<string>;
|
|
14
14
|
}
|
package/dist/platform/chatgpt.js
CHANGED
|
@@ -26,9 +26,9 @@ const llm_runner_1 = require("../base/llm-runner");
|
|
|
26
26
|
* Chat GPT Implementation
|
|
27
27
|
*/
|
|
28
28
|
class ChatGPTLLM extends llm_runner_1.LLMRunner.BaseLLM {
|
|
29
|
-
constructor(api_key, model, chat_session_manager) {
|
|
29
|
+
constructor(api_key, model, chat_session_manager, base_url) {
|
|
30
30
|
super(chat_session_manager !== null && chat_session_manager !== void 0 ? chat_session_manager : new llm_runner_1.LLMRunner.SessionManager());
|
|
31
|
-
this.cgpt = new openai_1.default({ apiKey: api_key });
|
|
31
|
+
this.cgpt = new openai_1.default({ apiKey: api_key, baseURL: base_url });
|
|
32
32
|
this.model = model;
|
|
33
33
|
}
|
|
34
34
|
streamChat(messages, id_session, stream, ac) {
|