@graf-research/llm-runner 0.0.12 → 0.0.14
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +13 -12
- package/dist/example/abort-stream.d.ts +1 -0
- package/dist/example/abort-stream.js +29 -1
- package/dist/example/simple-multistep-with-stream.js +1 -1
- package/dist/example/simple-multistep.js +1 -1
- package/dist/example/simple-with-context.js +2 -2
- package/dist/example/simple.d.ts +2 -0
- package/dist/example/simple.js +46 -2
- package/dist/index.d.ts +3 -1
- package/dist/index.js +5 -1
- package/dist/multistep/modules/choose.js +2 -2
- package/dist/multistep/modules/multiple-choice-answer.js +2 -2
- package/dist/platform/anthropic.d.ts +15 -0
- package/dist/platform/anthropic.js +100 -0
- package/dist/platform/chatgpt.d.ts +3 -1
- package/dist/platform/chatgpt.js +4 -3
- package/dist/platform/gemini.d.ts +13 -0
- package/dist/platform/gemini.js +96 -0
- package/package.json +3 -1
package/README.md
CHANGED
|
@@ -16,7 +16,7 @@ npm install --save @graf-research/llm-runner
|
|
|
16
16
|
import { ChatGPTLLM } from "@graf-research/llm-runner";
|
|
17
17
|
|
|
18
18
|
const chat_gpt_api_key = '<apikey>';
|
|
19
|
-
const chatgpt = new ChatGPTLLM(chat_gpt_api_key);
|
|
19
|
+
const chatgpt = new ChatGPTLLM(chat_gpt_api_key, 'gpt-4o-mini');
|
|
20
20
|
const response: string = await chatgpt.askNoContext(['Apa ibukota Indonesia?']);
|
|
21
21
|
```
|
|
22
22
|
|
|
@@ -26,7 +26,7 @@ const response: string = await chatgpt.askNoContext(['Apa ibukota Indonesia?']);
|
|
|
26
26
|
import { ChatGPTLLM } from "@graf-research/llm-runner";
|
|
27
27
|
|
|
28
28
|
const chat_gpt_api_key = '<apikey>';
|
|
29
|
-
const chatgpt = new ChatGPTLLM(chat_gpt_api_key);
|
|
29
|
+
const chatgpt = new ChatGPTLLM(chat_gpt_api_key, 'gpt-4o-mini');
|
|
30
30
|
const session = await chatgpt.chat_session_manager.newSession();
|
|
31
31
|
|
|
32
32
|
const response1: string = await chatgpt.ask(['Apa ibukota Indonesia?'], session.id);
|
|
@@ -42,7 +42,7 @@ console.log(response2);
|
|
|
42
42
|
import { ChatGPTLLM, MSModule_Choose, MSModule_OpenListAnswer } from "@graf-research/llm-runner";
|
|
43
43
|
|
|
44
44
|
const chat_gpt_api_key = '<apikey>';
|
|
45
|
-
const chatgpt = new ChatGPTLLM(chat_gpt_api_key);
|
|
45
|
+
const chatgpt = new ChatGPTLLM(chat_gpt_api_key, 'gpt-4o-mini');
|
|
46
46
|
|
|
47
47
|
const q1 = 'Saya sedang berada di tempat banyak orang mengantri untuk menyimpan uang';
|
|
48
48
|
const q1_options = ['Bank BCA', 'Istana Negara', 'POM Bensin'];
|
|
@@ -64,7 +64,7 @@ if (a1 === 'Bank BCA') {
|
|
|
64
64
|
import { ChatGPTLLM, GenericLLM } from "@graf-research/llm-runner";
|
|
65
65
|
|
|
66
66
|
const chat_gpt_api_key = '<apikey>';
|
|
67
|
-
const chatgpt = new ChatGPTLLM(chat_gpt_api_key);
|
|
67
|
+
const chatgpt = new ChatGPTLLM(chat_gpt_api_key, 'gpt-4o-mini');
|
|
68
68
|
|
|
69
69
|
const response: GenericLLM.StreamResponse = await chatgpt.streamNoContext(['Jelaskan proses metamorfosis pada kupu-kupu']);
|
|
70
70
|
response.stream((chunk: string, is_complete: boolean) => {
|
|
@@ -85,7 +85,7 @@ response.stream((chunk: string, is_complete: boolean) => {
|
|
|
85
85
|
```ts
|
|
86
86
|
import { ChatGPTLLM, MultistepTypes, MSModule_Choose, MSModule_OpenListAnswer } from "@graf-research/llm-runner";
|
|
87
87
|
|
|
88
|
-
const chatgpt = new ChatGPTLLM(chat_gpt_api_key);
|
|
88
|
+
const chatgpt = new ChatGPTLLM(chat_gpt_api_key, 'gpt-4o-mini');
|
|
89
89
|
const stream = new Readable({ objectMode: true, read() {} });
|
|
90
90
|
|
|
91
91
|
const q1 = 'Saya sedang berada di tempat banyak orang mengantri untuk menyimpan uang';
|
|
@@ -129,10 +129,11 @@ console.log('<finish>');
|
|
|
129
129
|
### ⚡ Multiple LLM Instance Implementation
|
|
130
130
|
|
|
131
131
|
```ts
|
|
132
|
-
import { ChatGPTLLM, OllamaLLM, LLMRunner } from "@graf-research/llm-runner";
|
|
132
|
+
import { ChatGPTLLM, OllamaLLM, AnthropicLLM, LLMRunner } from "@graf-research/llm-runner";
|
|
133
133
|
|
|
134
|
-
const chatgpt: LLMRunner.BaseLLM = new ChatGPTLLM('');
|
|
134
|
+
const chatgpt: LLMRunner.BaseLLM = new ChatGPTLLM('apikey', 'gpt-4o-mini');
|
|
135
135
|
const ollama: LLMRunner.BaseLLM = new OllamaLLM('http://my-ollama-server', 'deepseek-r1:8b');
|
|
136
|
+
const anthropic: LLMRunner.BaseLLM = new AnthropicLLM('apikey', 'claude-3-opus-latest');
|
|
136
137
|
|
|
137
138
|
// different platform implemented on but same signature BaseLLM class
|
|
138
139
|
const llm: LLMRunner.BaseLLM = ollama;
|
|
@@ -147,7 +148,7 @@ const llm: LLMRunner.BaseLLM = ollama;
|
|
|
147
148
|
```ts
|
|
148
149
|
import { ChatGPTLLM, LLMRunner } from "@graf-research/llm-runner";
|
|
149
150
|
|
|
150
|
-
const llm: LLMRunner.BaseLLM = new ChatGPTLLM('<apikey>');
|
|
151
|
+
const llm: LLMRunner.BaseLLM = new ChatGPTLLM('<apikey>', 'gpt-4o-mini');
|
|
151
152
|
const response: string = await llm.askNoContext(['Apa ibukota Indonesia?']);
|
|
152
153
|
```
|
|
153
154
|
|
|
@@ -156,7 +157,7 @@ const response: string = await llm.askNoContext(['Apa ibukota Indonesia?']);
|
|
|
156
157
|
```ts
|
|
157
158
|
import { ChatGPTLLM, GenericLLM, LLMRunner } from "@graf-research/llm-runner";
|
|
158
159
|
|
|
159
|
-
const llm: LLMRunner.BaseLLM = new ChatGPTLLM('<apikey>');
|
|
160
|
+
const llm: LLMRunner.BaseLLM = new ChatGPTLLM('<apikey>', 'gpt-4o-mini');
|
|
160
161
|
|
|
161
162
|
const response: GenericLLM.StreamResponse = await chatgpt.streamNoContext(['Jelaskan proses metamorfosis pada kupu-kupu']);
|
|
162
163
|
response.stream((chunk: string, is_complete: boolean) => {
|
|
@@ -171,7 +172,7 @@ response.stream((chunk: string, is_complete: boolean) => {
|
|
|
171
172
|
```ts
|
|
172
173
|
import { ChatGPTLLM, LLMRunner } from "@graf-research/llm-runner";
|
|
173
174
|
|
|
174
|
-
const llm: LLMRunner.BaseLLM = new ChatGPTLLM('<apikey>');
|
|
175
|
+
const llm: LLMRunner.BaseLLM = new ChatGPTLLM('<apikey>', 'gpt-4o-mini');
|
|
175
176
|
const session = await llm.chat_session_manager.newSession();
|
|
176
177
|
const response1: string = await llm.ask(['Apa ibukota Indonesia?'], session.id);
|
|
177
178
|
const response2: string = await llm.ask(['Apa yang saya tanyakan sebelumnya?'], session.id);
|
|
@@ -183,7 +184,7 @@ const response2: string = await llm.ask(['Apa yang saya tanyakan sebelumnya?'],
|
|
|
183
184
|
```ts
|
|
184
185
|
import { ChatGPTLLM, LLMRunner } from "@graf-research/llm-runner";
|
|
185
186
|
|
|
186
|
-
const llm: LLMRunner.BaseLLM = new ChatGPTLLM('<apikey>');
|
|
187
|
+
const llm: LLMRunner.BaseLLM = new ChatGPTLLM('<apikey>', 'gpt-4o-mini');
|
|
187
188
|
const session = await llm.chat_session_manager.newSession();
|
|
188
189
|
|
|
189
190
|
const response1: GenericLLM.StreamResponse = await chatgpt.stream(['Jelaskan proses metamorfosis pada kupu-kupu'], session.id);
|
|
@@ -345,7 +346,7 @@ Pada umumnya stream yang terlalu panjang kadang harus diberhentikan karena suatu
|
|
|
345
346
|
```ts
|
|
346
347
|
import { ChatGPTLLM, GenericLLM } from "@graf-research/llm-runner";
|
|
347
348
|
|
|
348
|
-
const chatgpt = new ChatGPTLLM(chat_gpt_api_key);
|
|
349
|
+
const chatgpt = new ChatGPTLLM(chat_gpt_api_key, 'gpt-4o-mini');
|
|
349
350
|
|
|
350
351
|
// 1. Siapkan Abort Controller
|
|
351
352
|
const ac = new AbortController();
|
|
@@ -10,11 +10,13 @@ var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, ge
|
|
|
10
10
|
};
|
|
11
11
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
12
12
|
exports.abortStream = abortStream;
|
|
13
|
+
exports.abortStream2 = abortStream2;
|
|
13
14
|
const chatgpt_1 = require("../platform/chatgpt");
|
|
15
|
+
const gemini_1 = require("../platform/gemini");
|
|
14
16
|
// Stream Mode
|
|
15
17
|
function abortStream(chat_gpt_api_key) {
|
|
16
18
|
return __awaiter(this, void 0, void 0, function* () {
|
|
17
|
-
const chatgpt = new chatgpt_1.ChatGPTLLM(chat_gpt_api_key);
|
|
19
|
+
const chatgpt = new chatgpt_1.ChatGPTLLM(chat_gpt_api_key, 'gpt-4o-mini');
|
|
18
20
|
const ac = new AbortController();
|
|
19
21
|
setTimeout(() => {
|
|
20
22
|
ac.abort();
|
|
@@ -37,3 +39,29 @@ function abortStream(chat_gpt_api_key) {
|
|
|
37
39
|
});
|
|
38
40
|
});
|
|
39
41
|
}
|
|
42
|
+
// Stream Mode
|
|
43
|
+
function abortStream2(gemini_api_key) {
|
|
44
|
+
return __awaiter(this, void 0, void 0, function* () {
|
|
45
|
+
const gemini = new gemini_1.GeminiLLM(gemini_api_key, 'gemini-1.5-flash');
|
|
46
|
+
const ac = new AbortController();
|
|
47
|
+
setTimeout(() => {
|
|
48
|
+
ac.abort();
|
|
49
|
+
console.log(`<<RESPONSE STREAM ABORTED>>`);
|
|
50
|
+
}, 2000);
|
|
51
|
+
const response = yield gemini.streamNoContext(['Jelaskan proses metamorfosis pada kupu-kupu']);
|
|
52
|
+
ac.signal.addEventListener('abort', () => response.cancel());
|
|
53
|
+
yield new Promise(resolve => {
|
|
54
|
+
response.stream((chunk, is_complete) => {
|
|
55
|
+
if (!is_complete) {
|
|
56
|
+
process.stdout.write(chunk);
|
|
57
|
+
}
|
|
58
|
+
else {
|
|
59
|
+
console.log('\n');
|
|
60
|
+
console.log(`<selesai>`);
|
|
61
|
+
// resolve promise
|
|
62
|
+
resolve(null);
|
|
63
|
+
}
|
|
64
|
+
});
|
|
65
|
+
});
|
|
66
|
+
});
|
|
67
|
+
}
|
|
@@ -25,7 +25,7 @@ const open_list_answer_1 = require("../multistep/modules/open-list-answer");
|
|
|
25
25
|
function simpleMultistepWithStream(chat_gpt_api_key) {
|
|
26
26
|
return __awaiter(this, void 0, void 0, function* () {
|
|
27
27
|
var _a, e_1, _b, _c;
|
|
28
|
-
const chatgpt = new chatgpt_1.ChatGPTLLM(chat_gpt_api_key);
|
|
28
|
+
const chatgpt = new chatgpt_1.ChatGPTLLM(chat_gpt_api_key, 'gpt-4o-mini');
|
|
29
29
|
const stream = new node_stream_1.Readable({ objectMode: true, read() { } });
|
|
30
30
|
const q1 = 'Saya sedang berada di tempat banyak orang mengantri untuk menyimpan uang';
|
|
31
31
|
const q1_options = ['Bank BCA', 'Istana Negara', 'POM Bensin'];
|
|
@@ -16,7 +16,7 @@ const open_list_answer_1 = require("../multistep/modules/open-list-answer");
|
|
|
16
16
|
// Waiting Mode
|
|
17
17
|
function simpleMultistep(chat_gpt_api_key) {
|
|
18
18
|
return __awaiter(this, void 0, void 0, function* () {
|
|
19
|
-
const chatgpt = new chatgpt_1.ChatGPTLLM(chat_gpt_api_key);
|
|
19
|
+
const chatgpt = new chatgpt_1.ChatGPTLLM(chat_gpt_api_key, 'gpt-4o-mini');
|
|
20
20
|
const q1 = 'Saya sedang berada di tempat banyak orang mengantri untuk menyimpan uang';
|
|
21
21
|
const q1_options = ['Bank BCA', 'Istana Negara', 'POM Bensin'];
|
|
22
22
|
const q2 = 'Saya ingin belajar LLM';
|
|
@@ -15,7 +15,7 @@ const chatgpt_1 = require("../platform/chatgpt");
|
|
|
15
15
|
// Waiting Mode
|
|
16
16
|
function simpleWithContext(chat_gpt_api_key) {
|
|
17
17
|
return __awaiter(this, void 0, void 0, function* () {
|
|
18
|
-
const chatgpt = new chatgpt_1.ChatGPTLLM(chat_gpt_api_key);
|
|
18
|
+
const chatgpt = new chatgpt_1.ChatGPTLLM(chat_gpt_api_key, 'gpt-4o-mini');
|
|
19
19
|
const session_id = 'sample-id';
|
|
20
20
|
const response1 = yield chatgpt.ask(['Apa ibukota Indonesia?'], session_id);
|
|
21
21
|
console.log(response1);
|
|
@@ -26,7 +26,7 @@ function simpleWithContext(chat_gpt_api_key) {
|
|
|
26
26
|
// Stream Mode
|
|
27
27
|
function simpleWithContext_stream(chat_gpt_api_key) {
|
|
28
28
|
return __awaiter(this, void 0, void 0, function* () {
|
|
29
|
-
const chatgpt = new chatgpt_1.ChatGPTLLM(chat_gpt_api_key);
|
|
29
|
+
const chatgpt = new chatgpt_1.ChatGPTLLM(chat_gpt_api_key, 'gpt-4o-mini');
|
|
30
30
|
const session_id = 'sample-id';
|
|
31
31
|
const response1 = yield chatgpt.stream(['Jelaskan proses metamorfosis pada kupu-kupu'], session_id);
|
|
32
32
|
yield new Promise(resolve => {
|
package/dist/example/simple.d.ts
CHANGED
|
@@ -1,2 +1,4 @@
|
|
|
1
1
|
export declare function simple(chat_gpt_api_key: string): Promise<void>;
|
|
2
2
|
export declare function simple_stream(chat_gpt_api_key: string): Promise<void>;
|
|
3
|
+
export declare function simple_stream2(anthropic_api_key: string): Promise<void>;
|
|
4
|
+
export declare function simple_stream3(gemini_api_key: string): Promise<void>;
|
package/dist/example/simple.js
CHANGED
|
@@ -11,11 +11,15 @@ var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, ge
|
|
|
11
11
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
12
12
|
exports.simple = simple;
|
|
13
13
|
exports.simple_stream = simple_stream;
|
|
14
|
+
exports.simple_stream2 = simple_stream2;
|
|
15
|
+
exports.simple_stream3 = simple_stream3;
|
|
16
|
+
const anthropic_1 = require("../platform/anthropic");
|
|
14
17
|
const chatgpt_1 = require("../platform/chatgpt");
|
|
18
|
+
const gemini_1 = require("../platform/gemini");
|
|
15
19
|
// Waiting Mode
|
|
16
20
|
function simple(chat_gpt_api_key) {
|
|
17
21
|
return __awaiter(this, void 0, void 0, function* () {
|
|
18
|
-
const chatgpt = new chatgpt_1.ChatGPTLLM(chat_gpt_api_key);
|
|
22
|
+
const chatgpt = new chatgpt_1.ChatGPTLLM(chat_gpt_api_key, 'gpt-4o-mini');
|
|
19
23
|
const response = yield chatgpt.askNoContext(['Apa ibukota Indonesia?']);
|
|
20
24
|
console.log(response);
|
|
21
25
|
});
|
|
@@ -23,7 +27,47 @@ function simple(chat_gpt_api_key) {
|
|
|
23
27
|
// Stream Mode
|
|
24
28
|
function simple_stream(chat_gpt_api_key) {
|
|
25
29
|
return __awaiter(this, void 0, void 0, function* () {
|
|
26
|
-
const chatgpt = new chatgpt_1.ChatGPTLLM(chat_gpt_api_key);
|
|
30
|
+
const chatgpt = new chatgpt_1.ChatGPTLLM(chat_gpt_api_key, 'gpt-4o-mini');
|
|
31
|
+
const response = yield chatgpt.streamNoContext(['Jelaskan proses metamorfosis pada kupu-kupu']);
|
|
32
|
+
yield new Promise(resolve => {
|
|
33
|
+
response.stream((chunk, is_complete) => {
|
|
34
|
+
if (!is_complete) {
|
|
35
|
+
process.stdout.write(chunk);
|
|
36
|
+
}
|
|
37
|
+
else {
|
|
38
|
+
console.log('\n');
|
|
39
|
+
console.log(`<selesai>`);
|
|
40
|
+
// resolve promise
|
|
41
|
+
resolve(null);
|
|
42
|
+
}
|
|
43
|
+
});
|
|
44
|
+
});
|
|
45
|
+
});
|
|
46
|
+
}
|
|
47
|
+
// Stream Mode
|
|
48
|
+
function simple_stream2(anthropic_api_key) {
|
|
49
|
+
return __awaiter(this, void 0, void 0, function* () {
|
|
50
|
+
const chatgpt = new anthropic_1.AnthropicLLM(anthropic_api_key, 'claude-3-opus-latest');
|
|
51
|
+
const response = yield chatgpt.streamNoContext(['Jelaskan proses metamorfosis pada kupu-kupu']);
|
|
52
|
+
yield new Promise(resolve => {
|
|
53
|
+
response.stream((chunk, is_complete) => {
|
|
54
|
+
if (!is_complete) {
|
|
55
|
+
process.stdout.write(chunk);
|
|
56
|
+
}
|
|
57
|
+
else {
|
|
58
|
+
console.log('\n');
|
|
59
|
+
console.log(`<selesai>`);
|
|
60
|
+
// resolve promise
|
|
61
|
+
resolve(null);
|
|
62
|
+
}
|
|
63
|
+
});
|
|
64
|
+
});
|
|
65
|
+
});
|
|
66
|
+
}
|
|
67
|
+
// Stream Mode
|
|
68
|
+
function simple_stream3(gemini_api_key) {
|
|
69
|
+
return __awaiter(this, void 0, void 0, function* () {
|
|
70
|
+
const chatgpt = new gemini_1.GeminiLLM(gemini_api_key, 'gemini-1.5-flash');
|
|
27
71
|
const response = yield chatgpt.streamNoContext(['Jelaskan proses metamorfosis pada kupu-kupu']);
|
|
28
72
|
yield new Promise(resolve => {
|
|
29
73
|
response.stream((chunk, is_complete) => {
|
package/dist/index.d.ts
CHANGED
|
@@ -1,6 +1,8 @@
|
|
|
1
1
|
import { LLMRunner } from "./base/llm-runner";
|
|
2
2
|
import { ChatGPTLLM } from "./platform/chatgpt";
|
|
3
3
|
import { OllamaLLM } from "./platform/ollama";
|
|
4
|
+
import { AnthropicLLM } from "./platform/anthropic";
|
|
5
|
+
import { GeminiLLM } from "./platform/gemini";
|
|
4
6
|
import { GenericLLM } from "./base/generic-llm";
|
|
5
7
|
import { MultistepTypes } from "./multistep/types";
|
|
6
8
|
import { MSModule_Choose } from "./multistep/modules/choose";
|
|
@@ -9,4 +11,4 @@ import { MSModule_Normal } from "./multistep/modules/normal";
|
|
|
9
11
|
import { MSModule_OpenListAnswer } from "./multistep/modules/open-list-answer";
|
|
10
12
|
import { MSModule_Plan } from "./multistep/modules/plan";
|
|
11
13
|
import { MSModule_YesNo } from "./multistep/modules/yes-no";
|
|
12
|
-
export { GenericLLM, LLMRunner, ChatGPTLLM, OllamaLLM, MultistepTypes, MSModule_Choose, MSModule_MultipleChoiceAnswer, MSModule_Normal, MSModule_OpenListAnswer, MSModule_Plan, MSModule_YesNo };
|
|
14
|
+
export { GenericLLM, LLMRunner, ChatGPTLLM, OllamaLLM, AnthropicLLM, GeminiLLM, MultistepTypes, MSModule_Choose, MSModule_MultipleChoiceAnswer, MSModule_Normal, MSModule_OpenListAnswer, MSModule_Plan, MSModule_YesNo };
|
package/dist/index.js
CHANGED
|
@@ -1,12 +1,16 @@
|
|
|
1
1
|
"use strict";
|
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
-
exports.MSModule_YesNo = exports.MSModule_Plan = exports.MSModule_OpenListAnswer = exports.MSModule_Normal = exports.MSModule_MultipleChoiceAnswer = exports.MSModule_Choose = exports.OllamaLLM = exports.ChatGPTLLM = exports.LLMRunner = exports.GenericLLM = void 0;
|
|
3
|
+
exports.MSModule_YesNo = exports.MSModule_Plan = exports.MSModule_OpenListAnswer = exports.MSModule_Normal = exports.MSModule_MultipleChoiceAnswer = exports.MSModule_Choose = exports.GeminiLLM = exports.AnthropicLLM = exports.OllamaLLM = exports.ChatGPTLLM = exports.LLMRunner = exports.GenericLLM = void 0;
|
|
4
4
|
const llm_runner_1 = require("./base/llm-runner");
|
|
5
5
|
Object.defineProperty(exports, "LLMRunner", { enumerable: true, get: function () { return llm_runner_1.LLMRunner; } });
|
|
6
6
|
const chatgpt_1 = require("./platform/chatgpt");
|
|
7
7
|
Object.defineProperty(exports, "ChatGPTLLM", { enumerable: true, get: function () { return chatgpt_1.ChatGPTLLM; } });
|
|
8
8
|
const ollama_1 = require("./platform/ollama");
|
|
9
9
|
Object.defineProperty(exports, "OllamaLLM", { enumerable: true, get: function () { return ollama_1.OllamaLLM; } });
|
|
10
|
+
const anthropic_1 = require("./platform/anthropic");
|
|
11
|
+
Object.defineProperty(exports, "AnthropicLLM", { enumerable: true, get: function () { return anthropic_1.AnthropicLLM; } });
|
|
12
|
+
const gemini_1 = require("./platform/gemini");
|
|
13
|
+
Object.defineProperty(exports, "GeminiLLM", { enumerable: true, get: function () { return gemini_1.GeminiLLM; } });
|
|
10
14
|
const generic_llm_1 = require("./base/generic-llm");
|
|
11
15
|
Object.defineProperty(exports, "GenericLLM", { enumerable: true, get: function () { return generic_llm_1.GenericLLM; } });
|
|
12
16
|
const choose_1 = require("./multistep/modules/choose");
|
|
@@ -24,14 +24,14 @@ var MSModule_Choose;
|
|
|
24
24
|
}
|
|
25
25
|
function ask(llm, q, options, session_id) {
|
|
26
26
|
return __awaiter(this, void 0, void 0, function* () {
|
|
27
|
-
const prompt = `Respond this question: ${q}, choose the closest answer from: ${options.map(a => `"${a}"`).join(',')}. Answer with only one option, even when multiple answer is available`;
|
|
27
|
+
const prompt = `Respond this question: ${q}, choose the closest answer from: ${options.map(a => `"${a}"`).join(',')}. Answer with only one option, even when multiple answer is available, answer without quote symbol`;
|
|
28
28
|
return yield wrapper_1.MultistepWrapper.ask(llm, prompt, generateChooseResolver(options), session_id);
|
|
29
29
|
});
|
|
30
30
|
}
|
|
31
31
|
MSModule_Choose.ask = ask;
|
|
32
32
|
function stream(llm, q, options, session_id) {
|
|
33
33
|
return __awaiter(this, void 0, void 0, function* () {
|
|
34
|
-
const prompt = `Respond this question: ${q}, choose the closest answer from: ${options.map(a => `"${a}"`).join(',')}. Answer with only one option, even when multiple answer is available`;
|
|
34
|
+
const prompt = `Respond this question: ${q}, choose the closest answer from: ${options.map(a => `"${a}"`).join(',')}. Answer with only one option, even when multiple answer is available, answer without quote symbol`;
|
|
35
35
|
return yield wrapper_1.MultistepWrapper.stream(llm, prompt, generateChooseResolver(options), session_id);
|
|
36
36
|
});
|
|
37
37
|
}
|
|
@@ -23,14 +23,14 @@ var MSModule_MultipleChoiceAnswer;
|
|
|
23
23
|
}
|
|
24
24
|
function ask(llm, q, options, session_id) {
|
|
25
25
|
return __awaiter(this, void 0, void 0, function* () {
|
|
26
|
-
const prompt = `Respond this question: ${q}, choose one or more from this options as answer: ${options.map(a => `"${a}"`).join(',')}. Put all the answers into one line each. Don't make up an answer, only choose from available options`;
|
|
26
|
+
const prompt = `Respond this question: ${q}, choose one or more from this options as answer: ${options.map(a => `"${a}"`).join(',')}. Put all the answers into one line each. Don't make up an answer, only choose from available options, answer without quote symbol`;
|
|
27
27
|
return yield wrapper_1.MultistepWrapper.ask(llm, prompt, generateMultipleChoiceAnswerResolver(options), session_id);
|
|
28
28
|
});
|
|
29
29
|
}
|
|
30
30
|
MSModule_MultipleChoiceAnswer.ask = ask;
|
|
31
31
|
function stream(llm, q, options, session_id) {
|
|
32
32
|
return __awaiter(this, void 0, void 0, function* () {
|
|
33
|
-
const prompt = `Respond this question: ${q}, choose one or more from this options as answer: ${options.map(a => `"${a}"`).join(',')}. Put all the answers into one line each. Don't make up an answer, only choose from available options`;
|
|
33
|
+
const prompt = `Respond this question: ${q}, choose one or more from this options as answer: ${options.map(a => `"${a}"`).join(',')}. Put all the answers into one line each. Don't make up an answer, only choose from available options, answer without quote symbol`;
|
|
34
34
|
return yield wrapper_1.MultistepWrapper.stream(llm, prompt, generateMultipleChoiceAnswerResolver(options), session_id);
|
|
35
35
|
});
|
|
36
36
|
}
|
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
import { Model as AnthropicModel } from '@anthropic-ai/sdk/resources';
|
|
2
|
+
import { LLMRunner } from "../base/llm-runner";
|
|
3
|
+
import { GenericLLM } from "../base/generic-llm";
|
|
4
|
+
import { Readable } from 'node:stream';
|
|
5
|
+
/**
|
|
6
|
+
* Ollama Implementation
|
|
7
|
+
*/
|
|
8
|
+
export declare class AnthropicLLM extends LLMRunner.BaseLLM {
|
|
9
|
+
private anthropic;
|
|
10
|
+
private model;
|
|
11
|
+
private max_tokens;
|
|
12
|
+
constructor(apikey: string, model: AnthropicModel, chat_session_manager?: GenericLLM.ChatSessionManager<LLMRunner.ChatSession, LLMRunner.Message>, max_tokens?: number);
|
|
13
|
+
protected streamChat(messages: string[], id_session: string | null, stream: Readable, ac: AbortController): Promise<void>;
|
|
14
|
+
protected chat(messages: string[], id_session: string | null): Promise<string>;
|
|
15
|
+
}
|
|
@@ -0,0 +1,100 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
|
|
3
|
+
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
|
|
4
|
+
return new (P || (P = Promise))(function (resolve, reject) {
|
|
5
|
+
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
|
|
6
|
+
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
|
|
7
|
+
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
|
|
8
|
+
step((generator = generator.apply(thisArg, _arguments || [])).next());
|
|
9
|
+
});
|
|
10
|
+
};
|
|
11
|
+
var __asyncValues = (this && this.__asyncValues) || function (o) {
|
|
12
|
+
if (!Symbol.asyncIterator) throw new TypeError("Symbol.asyncIterator is not defined.");
|
|
13
|
+
var m = o[Symbol.asyncIterator], i;
|
|
14
|
+
return m ? m.call(o) : (o = typeof __values === "function" ? __values(o) : o[Symbol.iterator](), i = {}, verb("next"), verb("throw"), verb("return"), i[Symbol.asyncIterator] = function () { return this; }, i);
|
|
15
|
+
function verb(n) { i[n] = o[n] && function (v) { return new Promise(function (resolve, reject) { v = o[n](v), settle(resolve, reject, v.done, v.value); }); }; }
|
|
16
|
+
function settle(resolve, reject, d, v) { Promise.resolve(v).then(function(v) { resolve({ value: v, done: d }); }, reject); }
|
|
17
|
+
};
|
|
18
|
+
var __importDefault = (this && this.__importDefault) || function (mod) {
|
|
19
|
+
return (mod && mod.__esModule) ? mod : { "default": mod };
|
|
20
|
+
};
|
|
21
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
22
|
+
exports.AnthropicLLM = void 0;
|
|
23
|
+
const sdk_1 = __importDefault(require("@anthropic-ai/sdk"));
|
|
24
|
+
const llm_runner_1 = require("../base/llm-runner");
|
|
25
|
+
/**
|
|
26
|
+
* Ollama Implementation
|
|
27
|
+
*/
|
|
28
|
+
class AnthropicLLM extends llm_runner_1.LLMRunner.BaseLLM {
|
|
29
|
+
constructor(apikey, model, chat_session_manager, max_tokens = 1024) {
|
|
30
|
+
super(chat_session_manager !== null && chat_session_manager !== void 0 ? chat_session_manager : new llm_runner_1.LLMRunner.SessionManager());
|
|
31
|
+
this.max_tokens = 1024;
|
|
32
|
+
this.anthropic = new sdk_1.default({ apiKey: apikey });
|
|
33
|
+
this.model = model;
|
|
34
|
+
this.max_tokens = max_tokens;
|
|
35
|
+
}
|
|
36
|
+
streamChat(messages, id_session, stream, ac) {
|
|
37
|
+
return __awaiter(this, void 0, void 0, function* () {
|
|
38
|
+
var _a, e_1, _b, _c;
|
|
39
|
+
var _d;
|
|
40
|
+
const chat_history = id_session ? yield this.chat_session_manager.retrieveHistory(id_session) : [];
|
|
41
|
+
const cgpt_stream = yield this.anthropic.messages.create({
|
|
42
|
+
model: this.model,
|
|
43
|
+
max_tokens: 1024,
|
|
44
|
+
messages: [
|
|
45
|
+
...chat_history.map((msg) => ({
|
|
46
|
+
role: msg.role,
|
|
47
|
+
content: msg.content
|
|
48
|
+
})),
|
|
49
|
+
...messages.map(content => ({
|
|
50
|
+
role: 'user', content
|
|
51
|
+
}))
|
|
52
|
+
],
|
|
53
|
+
stream: true
|
|
54
|
+
});
|
|
55
|
+
ac.signal.addEventListener('abort', () => cgpt_stream.controller.abort());
|
|
56
|
+
try {
|
|
57
|
+
for (var _e = true, cgpt_stream_1 = __asyncValues(cgpt_stream), cgpt_stream_1_1; cgpt_stream_1_1 = yield cgpt_stream_1.next(), _a = cgpt_stream_1_1.done, !_a; _e = true) {
|
|
58
|
+
_c = cgpt_stream_1_1.value;
|
|
59
|
+
_e = false;
|
|
60
|
+
const chunk = _c;
|
|
61
|
+
if (chunk.type === 'content_block_delta' && chunk.delta.type === 'text_delta') {
|
|
62
|
+
stream.push((_d = chunk.delta.text) !== null && _d !== void 0 ? _d : '');
|
|
63
|
+
}
|
|
64
|
+
}
|
|
65
|
+
}
|
|
66
|
+
catch (e_1_1) { e_1 = { error: e_1_1 }; }
|
|
67
|
+
finally {
|
|
68
|
+
try {
|
|
69
|
+
if (!_e && !_a && (_b = cgpt_stream_1.return)) yield _b.call(cgpt_stream_1);
|
|
70
|
+
}
|
|
71
|
+
finally { if (e_1) throw e_1.error; }
|
|
72
|
+
}
|
|
73
|
+
stream.push(null);
|
|
74
|
+
});
|
|
75
|
+
}
|
|
76
|
+
chat(messages, id_session) {
|
|
77
|
+
return __awaiter(this, void 0, void 0, function* () {
|
|
78
|
+
var _a;
|
|
79
|
+
const chat_history = id_session ? yield this.chat_session_manager.retrieveHistory(id_session) : [];
|
|
80
|
+
const res = yield this.anthropic.messages.create({
|
|
81
|
+
model: this.model,
|
|
82
|
+
max_tokens: 1024,
|
|
83
|
+
messages: [
|
|
84
|
+
...chat_history.map((msg) => ({
|
|
85
|
+
role: msg.role,
|
|
86
|
+
content: msg.content
|
|
87
|
+
})),
|
|
88
|
+
...messages.map(content => ({
|
|
89
|
+
role: 'user', content
|
|
90
|
+
}))
|
|
91
|
+
],
|
|
92
|
+
});
|
|
93
|
+
if (((_a = res.content) === null || _a === void 0 ? void 0 : _a[0].type) === 'text') {
|
|
94
|
+
return res.content[0].text;
|
|
95
|
+
}
|
|
96
|
+
return '';
|
|
97
|
+
});
|
|
98
|
+
}
|
|
99
|
+
}
|
|
100
|
+
exports.AnthropicLLM = AnthropicLLM;
|
|
@@ -1,12 +1,14 @@
|
|
|
1
1
|
import { LLMRunner } from "../base/llm-runner";
|
|
2
2
|
import { GenericLLM } from "../base/generic-llm";
|
|
3
3
|
import { Readable } from 'node:stream';
|
|
4
|
+
import { ChatModel as ChatGPTModel } from "openai/resources";
|
|
4
5
|
/**
|
|
5
6
|
* Chat GPT Implementation
|
|
6
7
|
*/
|
|
7
8
|
export declare class ChatGPTLLM extends LLMRunner.BaseLLM {
|
|
8
9
|
private cgpt;
|
|
9
|
-
|
|
10
|
+
private model;
|
|
11
|
+
constructor(api_key: string, model: ChatGPTModel, chat_session_manager?: GenericLLM.ChatSessionManager<LLMRunner.ChatSession, LLMRunner.Message>);
|
|
10
12
|
protected streamChat(messages: string[], id_session: string | null, stream: Readable, ac: AbortController): Promise<void>;
|
|
11
13
|
protected chat(messages: string[], id_session: string | null): Promise<string>;
|
|
12
14
|
}
|
package/dist/platform/chatgpt.js
CHANGED
|
@@ -26,9 +26,10 @@ const llm_runner_1 = require("../base/llm-runner");
|
|
|
26
26
|
* Chat GPT Implementation
|
|
27
27
|
*/
|
|
28
28
|
class ChatGPTLLM extends llm_runner_1.LLMRunner.BaseLLM {
|
|
29
|
-
constructor(api_key, chat_session_manager) {
|
|
29
|
+
constructor(api_key, model, chat_session_manager) {
|
|
30
30
|
super(chat_session_manager !== null && chat_session_manager !== void 0 ? chat_session_manager : new llm_runner_1.LLMRunner.SessionManager());
|
|
31
31
|
this.cgpt = new openai_1.default({ apiKey: api_key });
|
|
32
|
+
this.model = model;
|
|
32
33
|
}
|
|
33
34
|
streamChat(messages, id_session, stream, ac) {
|
|
34
35
|
return __awaiter(this, void 0, void 0, function* () {
|
|
@@ -44,7 +45,7 @@ class ChatGPTLLM extends llm_runner_1.LLMRunner.BaseLLM {
|
|
|
44
45
|
return;
|
|
45
46
|
}
|
|
46
47
|
const cgpt_stream = yield this.cgpt.chat.completions.create({
|
|
47
|
-
model:
|
|
48
|
+
model: this.model,
|
|
48
49
|
store: false,
|
|
49
50
|
stream: true,
|
|
50
51
|
n: 1,
|
|
@@ -84,7 +85,7 @@ class ChatGPTLLM extends llm_runner_1.LLMRunner.BaseLLM {
|
|
|
84
85
|
...messages.map(content => ({ role: 'user', content }))
|
|
85
86
|
];
|
|
86
87
|
const res = yield this.cgpt.chat.completions.create({
|
|
87
|
-
model:
|
|
88
|
+
model: this.model,
|
|
88
89
|
store: false,
|
|
89
90
|
n: 1,
|
|
90
91
|
messages: chat_messages
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
import { LLMRunner } from "../base/llm-runner";
|
|
2
|
+
import { GenericLLM } from "../base/generic-llm";
|
|
3
|
+
import { Readable } from 'node:stream';
|
|
4
|
+
export type GeminiModel = 'gemini-2.0-flash' | 'gemini-2.0-flash-lite' | 'gemini-1.5-flash' | 'gemini-1.5-flash-8b' | 'gemini-1.5-pro';
|
|
5
|
+
/**
|
|
6
|
+
* Gemini Implementation
|
|
7
|
+
*/
|
|
8
|
+
export declare class GeminiLLM extends LLMRunner.BaseLLM {
|
|
9
|
+
private gemini;
|
|
10
|
+
constructor(apikey: string, model: GeminiModel, chat_session_manager?: GenericLLM.ChatSessionManager<LLMRunner.ChatSession, LLMRunner.Message>, max_tokens?: number);
|
|
11
|
+
protected streamChat(messages: string[], id_session: string | null, stream: Readable, ac: AbortController): Promise<void>;
|
|
12
|
+
protected chat(messages: string[], id_session: string | null): Promise<string>;
|
|
13
|
+
}
|
|
@@ -0,0 +1,96 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
|
|
3
|
+
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
|
|
4
|
+
return new (P || (P = Promise))(function (resolve, reject) {
|
|
5
|
+
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
|
|
6
|
+
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
|
|
7
|
+
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
|
|
8
|
+
step((generator = generator.apply(thisArg, _arguments || [])).next());
|
|
9
|
+
});
|
|
10
|
+
};
|
|
11
|
+
var __asyncValues = (this && this.__asyncValues) || function (o) {
|
|
12
|
+
if (!Symbol.asyncIterator) throw new TypeError("Symbol.asyncIterator is not defined.");
|
|
13
|
+
var m = o[Symbol.asyncIterator], i;
|
|
14
|
+
return m ? m.call(o) : (o = typeof __values === "function" ? __values(o) : o[Symbol.iterator](), i = {}, verb("next"), verb("throw"), verb("return"), i[Symbol.asyncIterator] = function () { return this; }, i);
|
|
15
|
+
function verb(n) { i[n] = o[n] && function (v) { return new Promise(function (resolve, reject) { v = o[n](v), settle(resolve, reject, v.done, v.value); }); }; }
|
|
16
|
+
function settle(resolve, reject, d, v) { Promise.resolve(v).then(function(v) { resolve({ value: v, done: d }); }, reject); }
|
|
17
|
+
};
|
|
18
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
19
|
+
exports.GeminiLLM = void 0;
|
|
20
|
+
const generative_ai_1 = require("@google/generative-ai");
|
|
21
|
+
const llm_runner_1 = require("../base/llm-runner");
|
|
22
|
+
/**
|
|
23
|
+
* Gemini Implementation
|
|
24
|
+
*/
|
|
25
|
+
class GeminiLLM extends llm_runner_1.LLMRunner.BaseLLM {
|
|
26
|
+
constructor(apikey, model, chat_session_manager, max_tokens = 1024) {
|
|
27
|
+
super(chat_session_manager !== null && chat_session_manager !== void 0 ? chat_session_manager : new llm_runner_1.LLMRunner.SessionManager());
|
|
28
|
+
this.gemini = new generative_ai_1.GoogleGenerativeAI(apikey).getGenerativeModel({ model });
|
|
29
|
+
}
|
|
30
|
+
streamChat(messages, id_session, stream, ac) {
|
|
31
|
+
return __awaiter(this, void 0, void 0, function* () {
|
|
32
|
+
var _a, e_1, _b, _c;
|
|
33
|
+
const chat_history = id_session ? yield this.chat_session_manager.retrieveHistory(id_session) : [];
|
|
34
|
+
const result = yield this.gemini.generateContentStream({
|
|
35
|
+
contents: [
|
|
36
|
+
...chat_history.map((msg) => ({
|
|
37
|
+
role: msg.role === 'assistant' ? 'model' : 'user',
|
|
38
|
+
parts: [{ text: msg.content }]
|
|
39
|
+
})),
|
|
40
|
+
...messages.map(content => ({
|
|
41
|
+
role: 'user',
|
|
42
|
+
parts: [{ text: content }]
|
|
43
|
+
}))
|
|
44
|
+
]
|
|
45
|
+
}, { signal: ac.signal });
|
|
46
|
+
//
|
|
47
|
+
result.response.catch(() => { });
|
|
48
|
+
try {
|
|
49
|
+
try {
|
|
50
|
+
for (var _d = true, _e = __asyncValues(result.stream), _f; _f = yield _e.next(), _a = _f.done, !_a; _d = true) {
|
|
51
|
+
_c = _f.value;
|
|
52
|
+
_d = false;
|
|
53
|
+
const chunk = _c;
|
|
54
|
+
stream.push(chunk.text());
|
|
55
|
+
}
|
|
56
|
+
}
|
|
57
|
+
catch (e_1_1) { e_1 = { error: e_1_1 }; }
|
|
58
|
+
finally {
|
|
59
|
+
try {
|
|
60
|
+
if (!_d && !_a && (_b = _e.return)) yield _b.call(_e);
|
|
61
|
+
}
|
|
62
|
+
finally { if (e_1) throw e_1.error; }
|
|
63
|
+
}
|
|
64
|
+
}
|
|
65
|
+
catch (err) {
|
|
66
|
+
if (err.name == 'AbortError') {
|
|
67
|
+
// aborted
|
|
68
|
+
return;
|
|
69
|
+
}
|
|
70
|
+
throw err;
|
|
71
|
+
}
|
|
72
|
+
finally {
|
|
73
|
+
stream.push(null);
|
|
74
|
+
}
|
|
75
|
+
});
|
|
76
|
+
}
|
|
77
|
+
chat(messages, id_session) {
|
|
78
|
+
return __awaiter(this, void 0, void 0, function* () {
|
|
79
|
+
const chat_history = id_session ? yield this.chat_session_manager.retrieveHistory(id_session) : [];
|
|
80
|
+
const result = yield this.gemini.generateContent({
|
|
81
|
+
contents: [
|
|
82
|
+
...chat_history.map((msg) => ({
|
|
83
|
+
role: msg.role === 'assistant' ? 'model' : 'user',
|
|
84
|
+
parts: [{ text: msg.content }]
|
|
85
|
+
})),
|
|
86
|
+
...messages.map(content => ({
|
|
87
|
+
role: 'user',
|
|
88
|
+
parts: [{ text: content }]
|
|
89
|
+
}))
|
|
90
|
+
]
|
|
91
|
+
});
|
|
92
|
+
return result.response.text();
|
|
93
|
+
});
|
|
94
|
+
}
|
|
95
|
+
}
|
|
96
|
+
exports.GeminiLLM = GeminiLLM;
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@graf-research/llm-runner",
|
|
3
|
-
"version": "0.0.
|
|
3
|
+
"version": "0.0.14",
|
|
4
4
|
"main": "dist/index.js",
|
|
5
5
|
"scripts": {
|
|
6
6
|
"build": "rm -rf dist && tsc",
|
|
@@ -19,6 +19,8 @@
|
|
|
19
19
|
"typescript": "^5.7.3"
|
|
20
20
|
},
|
|
21
21
|
"dependencies": {
|
|
22
|
+
"@anthropic-ai/sdk": "^0.38.0",
|
|
23
|
+
"@google/generative-ai": "^0.22.0",
|
|
22
24
|
"lodash": "^4.17.21",
|
|
23
25
|
"ollama": "^0.5.13",
|
|
24
26
|
"openai": "^4.85.3",
|