@graf-research/llm-runner 0.0.13 → 0.0.15

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -8,6 +8,20 @@ Sebuah alternatif untuk mengutilisasi LLM ke programming NodeJS/Javascript. Dide
8
8
  npm install --save @graf-research/llm-runner
9
9
  ```
10
10
 
11
+ ## Supported LLM
12
+
13
+ ```ts
14
+ import { ChatGPTLLM, OllamaLLM, AnthropicLLM, GeminiLLM, LLMRunner } from "@graf-research/llm-runner";
15
+
16
+ const chatgpt: LLMRunner.BaseLLM = new ChatGPTLLM('apikey', '<chatgpt model>');
17
+ const ollama: LLMRunner.BaseLLM = new OllamaLLM('http://my-ollama-server', '<ollama model>');
18
+ const anthropic: LLMRunner.BaseLLM = new AnthropicLLM('apikey', '<anthropic model>');
19
+ const gemini: LLMRunner.BaseLLM = new GeminiLLM('apikey', '<gemini model>');
20
+
21
+ // different platform implementation but same signature BaseLLM class
22
+ const llm: LLMRunner.BaseLLM = ollama;
23
+ ```
24
+
11
25
  ## Example
12
26
 
13
27
  #### Simple
@@ -16,8 +30,16 @@ npm install --save @graf-research/llm-runner
16
30
  import { ChatGPTLLM } from "@graf-research/llm-runner";
17
31
 
18
32
  const chat_gpt_api_key = '<apikey>';
19
- const chatgpt = new ChatGPTLLM(chat_gpt_api_key);
20
- const response: string = await chatgpt.askNoContext(['Apa ibukota Indonesia?']);
33
+ const chatgpt = new ChatGPTLLM(chat_gpt_api_key, 'gpt-4o-mini');
34
+
35
+ // pass string
36
+ const response: string = await chatgpt.askNoContext('Apa ibukota Indonesia?');
37
+
38
+ // pass array of string
39
+ const response: string = await chatgpt.askNoContext([
40
+ 'Saya sedang berada di Indonesia',
41
+ 'apa ibukota negara tersebut?'
42
+ ]);
21
43
  ```
22
44
 
23
45
  #### Simple w/ Context
@@ -26,7 +48,7 @@ const response: string = await chatgpt.askNoContext(['Apa ibukota Indonesia?']);
26
48
  import { ChatGPTLLM } from "@graf-research/llm-runner";
27
49
 
28
50
  const chat_gpt_api_key = '<apikey>';
29
- const chatgpt = new ChatGPTLLM(chat_gpt_api_key);
51
+ const chatgpt = new ChatGPTLLM(chat_gpt_api_key, 'gpt-4o-mini');
30
52
  const session = await chatgpt.chat_session_manager.newSession();
31
53
 
32
54
  const response1: string = await chatgpt.ask(['Apa ibukota Indonesia?'], session.id);
@@ -42,7 +64,7 @@ console.log(response2);
42
64
  import { ChatGPTLLM, MSModule_Choose, MSModule_OpenListAnswer } from "@graf-research/llm-runner";
43
65
 
44
66
  const chat_gpt_api_key = '<apikey>';
45
- const chatgpt = new ChatGPTLLM(chat_gpt_api_key);
67
+ const chatgpt = new ChatGPTLLM(chat_gpt_api_key, 'gpt-4o-mini');
46
68
 
47
69
  const q1 = 'Saya sedang berada di tempat banyak orang mengantri untuk menyimpan uang';
48
70
  const q1_options = ['Bank BCA', 'Istana Negara', 'POM Bensin'];
@@ -64,8 +86,9 @@ if (a1 === 'Bank BCA') {
64
86
  import { ChatGPTLLM, GenericLLM } from "@graf-research/llm-runner";
65
87
 
66
88
  const chat_gpt_api_key = '<apikey>';
67
- const chatgpt = new ChatGPTLLM(chat_gpt_api_key);
89
+ const chatgpt = new ChatGPTLLM(chat_gpt_api_key, 'gpt-4o-mini');
68
90
 
91
+ // can pass string or array of string
69
92
  const response: GenericLLM.StreamResponse = await chatgpt.streamNoContext(['Jelaskan proses metamorfosis pada kupu-kupu']);
70
93
  response.stream((chunk: string, is_complete: boolean) => {
71
94
  if (!is_complete) {
@@ -85,7 +108,7 @@ response.stream((chunk: string, is_complete: boolean) => {
85
108
  ```ts
86
109
  import { ChatGPTLLM, MultistepTypes, MSModule_Choose, MSModule_OpenListAnswer } from "@graf-research/llm-runner";
87
110
 
88
- const chatgpt = new ChatGPTLLM(chat_gpt_api_key);
111
+ const chatgpt = new ChatGPTLLM(chat_gpt_api_key, 'gpt-4o-mini');
89
112
  const stream = new Readable({ objectMode: true, read() {} });
90
113
 
91
114
  const q1 = 'Saya sedang berada di tempat banyak orang mengantri untuk menyimpan uang';
@@ -134,8 +157,9 @@ import { ChatGPTLLM, OllamaLLM, AnthropicLLM, LLMRunner } from "@graf-research/l
134
157
  const chatgpt: LLMRunner.BaseLLM = new ChatGPTLLM('apikey', 'gpt-4o-mini');
135
158
  const ollama: LLMRunner.BaseLLM = new OllamaLLM('http://my-ollama-server', 'deepseek-r1:8b');
136
159
  const anthropic: LLMRunner.BaseLLM = new AnthropicLLM('apikey', 'claude-3-opus-latest');
160
+ const gemini: LLMRunner.BaseLLM = new GeminiLLM('apikey', 'gemini-1.5-flash');
137
161
 
138
- // different platform implemented on but same signature BaseLLM class
162
+ // different platform implementation but same signature BaseLLM class
139
163
  const llm: LLMRunner.BaseLLM = ollama;
140
164
  ```
141
165
 
@@ -148,8 +172,16 @@ const llm: LLMRunner.BaseLLM = ollama;
148
172
  ```ts
149
173
  import { ChatGPTLLM, LLMRunner } from "@graf-research/llm-runner";
150
174
 
151
- const llm: LLMRunner.BaseLLM = new ChatGPTLLM('<apikey>');
152
- const response: string = await llm.askNoContext(['Apa ibukota Indonesia?']);
175
+ const llm: LLMRunner.BaseLLM = new ChatGPTLLM('<apikey>', 'gpt-4o-mini');
176
+
177
+ // pass string
178
+ const response: string = await llm.askNoContext('Apa ibukota Indonesia?');
179
+
180
+ // pass array of string
181
+ const response: string = await llm.askNoContext([
182
+ 'Saya sedang berada di Indonesia',
183
+ 'apa ibukota negara tersebut?'
184
+ ]);
153
185
  ```
154
186
 
155
187
  *With Stream*
@@ -157,8 +189,9 @@ const response: string = await llm.askNoContext(['Apa ibukota Indonesia?']);
157
189
  ```ts
158
190
  import { ChatGPTLLM, GenericLLM, LLMRunner } from "@graf-research/llm-runner";
159
191
 
160
- const llm: LLMRunner.BaseLLM = new ChatGPTLLM('<apikey>');
192
+ const llm: LLMRunner.BaseLLM = new ChatGPTLLM('<apikey>', 'gpt-4o-mini');
161
193
 
194
+ // can pass string or array of string
162
195
  const response: GenericLLM.StreamResponse = await chatgpt.streamNoContext(['Jelaskan proses metamorfosis pada kupu-kupu']);
163
196
  response.stream((chunk: string, is_complete: boolean) => {
164
197
  ...
@@ -172,7 +205,7 @@ response.stream((chunk: string, is_complete: boolean) => {
172
205
  ```ts
173
206
  import { ChatGPTLLM, LLMRunner } from "@graf-research/llm-runner";
174
207
 
175
- const llm: LLMRunner.BaseLLM = new ChatGPTLLM('<apikey>');
208
+ const llm: LLMRunner.BaseLLM = new ChatGPTLLM('<apikey>', 'gpt-4o-mini');
176
209
  const session = await llm.chat_session_manager.newSession();
177
210
  const response1: string = await llm.ask(['Apa ibukota Indonesia?'], session.id);
178
211
  const response2: string = await llm.ask(['Apa yang saya tanyakan sebelumnya?'], session.id);
@@ -184,7 +217,7 @@ const response2: string = await llm.ask(['Apa yang saya tanyakan sebelumnya?'],
184
217
  ```ts
185
218
  import { ChatGPTLLM, LLMRunner } from "@graf-research/llm-runner";
186
219
 
187
- const llm: LLMRunner.BaseLLM = new ChatGPTLLM('<apikey>');
220
+ const llm: LLMRunner.BaseLLM = new ChatGPTLLM('<apikey>', 'gpt-4o-mini');
188
221
  const session = await llm.chat_session_manager.newSession();
189
222
 
190
223
  const response1: GenericLLM.StreamResponse = await chatgpt.stream(['Jelaskan proses metamorfosis pada kupu-kupu'], session.id);
@@ -346,7 +379,7 @@ Pada umumnya stream yang terlalu panjang kadang harus diberhentikan karena suatu
346
379
  ```ts
347
380
  import { ChatGPTLLM, GenericLLM } from "@graf-research/llm-runner";
348
381
 
349
- const chatgpt = new ChatGPTLLM(chat_gpt_api_key);
382
+ const chatgpt = new ChatGPTLLM(chat_gpt_api_key, 'gpt-4o-mini');
350
383
 
351
384
  // 1. Siapkan Abort Controller
352
385
  const ac = new AbortController();
@@ -23,9 +23,9 @@ export declare namespace LLMRunner {
23
23
  * Abstract Base LLM Class
24
24
  */
25
25
  abstract class BaseLLM extends GenericLLM.BaseLLM<ChatSession, Message> {
26
- stream(messages: string[], id_session: string): Promise<GenericLLM.StreamResponse>;
27
- streamNoContext(messages: string[]): Promise<GenericLLM.StreamResponse>;
28
- ask(messages: string[], id_session: string): Promise<string>;
29
- askNoContext(messages: string[]): Promise<string>;
26
+ stream(message_data: string[] | string, id_session: string): Promise<GenericLLM.StreamResponse>;
27
+ streamNoContext(message_data: string[] | string): Promise<GenericLLM.StreamResponse>;
28
+ ask(message_data: string[] | string, id_session: string): Promise<string>;
29
+ askNoContext(message_data: string[] | string): Promise<string>;
30
30
  }
31
31
  }
@@ -74,8 +74,9 @@ var LLMRunner;
74
74
  * Abstract Base LLM Class
75
75
  */
76
76
  class BaseLLM extends generic_llm_1.GenericLLM.BaseLLM {
77
- stream(messages, id_session) {
77
+ stream(message_data, id_session) {
78
78
  return __awaiter(this, void 0, void 0, function* () {
79
+ const messages = Array.isArray(message_data) ? message_data : [message_data];
79
80
  const ac = new AbortController();
80
81
  const stream = new node_stream_1.Readable({ objectMode: true, read() { } });
81
82
  this.streamChat(messages, id_session, stream, ac);
@@ -109,8 +110,9 @@ var LLMRunner;
109
110
  };
110
111
  });
111
112
  }
112
- streamNoContext(messages) {
113
+ streamNoContext(message_data) {
113
114
  return __awaiter(this, void 0, void 0, function* () {
115
+ const messages = Array.isArray(message_data) ? message_data : [message_data];
114
116
  const ac = new AbortController();
115
117
  const stream = new node_stream_1.Readable({ objectMode: true, read() { } });
116
118
  this.streamChat(messages, null, stream, ac);
@@ -142,16 +144,18 @@ var LLMRunner;
142
144
  };
143
145
  });
144
146
  }
145
- ask(messages, id_session) {
147
+ ask(message_data, id_session) {
146
148
  return __awaiter(this, void 0, void 0, function* () {
149
+ const messages = Array.isArray(message_data) ? message_data : [message_data];
147
150
  const res = yield this.chat(messages, id_session);
148
151
  yield this.chat_session_manager.saveMessage(messages, 'user', id_session);
149
152
  yield this.chat_session_manager.saveMessage([res], 'assistant', id_session);
150
153
  return res;
151
154
  });
152
155
  }
153
- askNoContext(messages) {
156
+ askNoContext(message_data) {
154
157
  return __awaiter(this, void 0, void 0, function* () {
158
+ const messages = Array.isArray(message_data) ? message_data : [message_data];
155
159
  return yield this.chat(messages, null);
156
160
  });
157
161
  }
@@ -1 +1,2 @@
1
1
  export declare function abortStream(chat_gpt_api_key: string): Promise<void>;
2
+ export declare function abortStream2(gemini_api_key: string): Promise<void>;
@@ -10,7 +10,9 @@ var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, ge
10
10
  };
11
11
  Object.defineProperty(exports, "__esModule", { value: true });
12
12
  exports.abortStream = abortStream;
13
+ exports.abortStream2 = abortStream2;
13
14
  const chatgpt_1 = require("../platform/chatgpt");
15
+ const gemini_1 = require("../platform/gemini");
14
16
  // Stream Mode
15
17
  function abortStream(chat_gpt_api_key) {
16
18
  return __awaiter(this, void 0, void 0, function* () {
@@ -37,3 +39,29 @@ function abortStream(chat_gpt_api_key) {
37
39
  });
38
40
  });
39
41
  }
42
+ // Stream Mode
43
+ function abortStream2(gemini_api_key) {
44
+ return __awaiter(this, void 0, void 0, function* () {
45
+ const gemini = new gemini_1.GeminiLLM(gemini_api_key, 'gemini-1.5-flash');
46
+ const ac = new AbortController();
47
+ setTimeout(() => {
48
+ ac.abort();
49
+ console.log(`<<RESPONSE STREAM ABORTED>>`);
50
+ }, 2000);
51
+ const response = yield gemini.streamNoContext(['Jelaskan proses metamorfosis pada kupu-kupu']);
52
+ ac.signal.addEventListener('abort', () => response.cancel());
53
+ yield new Promise(resolve => {
54
+ response.stream((chunk, is_complete) => {
55
+ if (!is_complete) {
56
+ process.stdout.write(chunk);
57
+ }
58
+ else {
59
+ console.log('\n');
60
+ console.log(`<selesai>`);
61
+ // resolve promise
62
+ resolve(null);
63
+ }
64
+ });
65
+ });
66
+ });
67
+ }
@@ -1,3 +1,4 @@
1
1
  export declare function simple(chat_gpt_api_key: string): Promise<void>;
2
2
  export declare function simple_stream(chat_gpt_api_key: string): Promise<void>;
3
3
  export declare function simple_stream2(anthropic_api_key: string): Promise<void>;
4
+ export declare function simple_stream3(gemini_api_key: string): Promise<void>;
@@ -12,8 +12,10 @@ Object.defineProperty(exports, "__esModule", { value: true });
12
12
  exports.simple = simple;
13
13
  exports.simple_stream = simple_stream;
14
14
  exports.simple_stream2 = simple_stream2;
15
+ exports.simple_stream3 = simple_stream3;
15
16
  const anthropic_1 = require("../platform/anthropic");
16
17
  const chatgpt_1 = require("../platform/chatgpt");
18
+ const gemini_1 = require("../platform/gemini");
17
19
  // Waiting Mode
18
20
  function simple(chat_gpt_api_key) {
19
21
  return __awaiter(this, void 0, void 0, function* () {
@@ -62,3 +64,23 @@ function simple_stream2(anthropic_api_key) {
62
64
  });
63
65
  });
64
66
  }
67
+ // Stream Mode
68
+ function simple_stream3(gemini_api_key) {
69
+ return __awaiter(this, void 0, void 0, function* () {
70
+ const chatgpt = new gemini_1.GeminiLLM(gemini_api_key, 'gemini-1.5-flash');
71
+ const response = yield chatgpt.streamNoContext(['Jelaskan proses metamorfosis pada kupu-kupu']);
72
+ yield new Promise(resolve => {
73
+ response.stream((chunk, is_complete) => {
74
+ if (!is_complete) {
75
+ process.stdout.write(chunk);
76
+ }
77
+ else {
78
+ console.log('\n');
79
+ console.log(`<selesai>`);
80
+ // resolve promise
81
+ resolve(null);
82
+ }
83
+ });
84
+ });
85
+ });
86
+ }
package/dist/index.d.ts CHANGED
@@ -2,6 +2,7 @@ import { LLMRunner } from "./base/llm-runner";
2
2
  import { ChatGPTLLM } from "./platform/chatgpt";
3
3
  import { OllamaLLM } from "./platform/ollama";
4
4
  import { AnthropicLLM } from "./platform/anthropic";
5
+ import { GeminiLLM } from "./platform/gemini";
5
6
  import { GenericLLM } from "./base/generic-llm";
6
7
  import { MultistepTypes } from "./multistep/types";
7
8
  import { MSModule_Choose } from "./multistep/modules/choose";
@@ -10,4 +11,4 @@ import { MSModule_Normal } from "./multistep/modules/normal";
10
11
  import { MSModule_OpenListAnswer } from "./multistep/modules/open-list-answer";
11
12
  import { MSModule_Plan } from "./multistep/modules/plan";
12
13
  import { MSModule_YesNo } from "./multistep/modules/yes-no";
13
- export { GenericLLM, LLMRunner, ChatGPTLLM, OllamaLLM, AnthropicLLM, MultistepTypes, MSModule_Choose, MSModule_MultipleChoiceAnswer, MSModule_Normal, MSModule_OpenListAnswer, MSModule_Plan, MSModule_YesNo };
14
+ export { GenericLLM, LLMRunner, ChatGPTLLM, OllamaLLM, AnthropicLLM, GeminiLLM, MultistepTypes, MSModule_Choose, MSModule_MultipleChoiceAnswer, MSModule_Normal, MSModule_OpenListAnswer, MSModule_Plan, MSModule_YesNo };
package/dist/index.js CHANGED
@@ -1,6 +1,6 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.MSModule_YesNo = exports.MSModule_Plan = exports.MSModule_OpenListAnswer = exports.MSModule_Normal = exports.MSModule_MultipleChoiceAnswer = exports.MSModule_Choose = exports.AnthropicLLM = exports.OllamaLLM = exports.ChatGPTLLM = exports.LLMRunner = exports.GenericLLM = void 0;
3
+ exports.MSModule_YesNo = exports.MSModule_Plan = exports.MSModule_OpenListAnswer = exports.MSModule_Normal = exports.MSModule_MultipleChoiceAnswer = exports.MSModule_Choose = exports.GeminiLLM = exports.AnthropicLLM = exports.OllamaLLM = exports.ChatGPTLLM = exports.LLMRunner = exports.GenericLLM = void 0;
4
4
  const llm_runner_1 = require("./base/llm-runner");
5
5
  Object.defineProperty(exports, "LLMRunner", { enumerable: true, get: function () { return llm_runner_1.LLMRunner; } });
6
6
  const chatgpt_1 = require("./platform/chatgpt");
@@ -9,6 +9,8 @@ const ollama_1 = require("./platform/ollama");
9
9
  Object.defineProperty(exports, "OllamaLLM", { enumerable: true, get: function () { return ollama_1.OllamaLLM; } });
10
10
  const anthropic_1 = require("./platform/anthropic");
11
11
  Object.defineProperty(exports, "AnthropicLLM", { enumerable: true, get: function () { return anthropic_1.AnthropicLLM; } });
12
+ const gemini_1 = require("./platform/gemini");
13
+ Object.defineProperty(exports, "GeminiLLM", { enumerable: true, get: function () { return gemini_1.GeminiLLM; } });
12
14
  const generic_llm_1 = require("./base/generic-llm");
13
15
  Object.defineProperty(exports, "GenericLLM", { enumerable: true, get: function () { return generic_llm_1.GenericLLM; } });
14
16
  const choose_1 = require("./multistep/modules/choose");
@@ -0,0 +1,13 @@
1
+ import { LLMRunner } from "../base/llm-runner";
2
+ import { GenericLLM } from "../base/generic-llm";
3
+ import { Readable } from 'node:stream';
4
+ export type GeminiModel = 'gemini-2.0-flash' | 'gemini-2.0-flash-lite' | 'gemini-1.5-flash' | 'gemini-1.5-flash-8b' | 'gemini-1.5-pro';
5
+ /**
6
+ * Gemini Implementation
7
+ */
8
+ export declare class GeminiLLM extends LLMRunner.BaseLLM {
9
+ private gemini;
10
+ constructor(apikey: string, model: GeminiModel, chat_session_manager?: GenericLLM.ChatSessionManager<LLMRunner.ChatSession, LLMRunner.Message>, max_tokens?: number);
11
+ protected streamChat(messages: string[], id_session: string | null, stream: Readable, ac: AbortController): Promise<void>;
12
+ protected chat(messages: string[], id_session: string | null): Promise<string>;
13
+ }
@@ -0,0 +1,96 @@
1
+ "use strict";
2
+ var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
3
+ function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
4
+ return new (P || (P = Promise))(function (resolve, reject) {
5
+ function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
6
+ function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
7
+ function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
8
+ step((generator = generator.apply(thisArg, _arguments || [])).next());
9
+ });
10
+ };
11
+ var __asyncValues = (this && this.__asyncValues) || function (o) {
12
+ if (!Symbol.asyncIterator) throw new TypeError("Symbol.asyncIterator is not defined.");
13
+ var m = o[Symbol.asyncIterator], i;
14
+ return m ? m.call(o) : (o = typeof __values === "function" ? __values(o) : o[Symbol.iterator](), i = {}, verb("next"), verb("throw"), verb("return"), i[Symbol.asyncIterator] = function () { return this; }, i);
15
+ function verb(n) { i[n] = o[n] && function (v) { return new Promise(function (resolve, reject) { v = o[n](v), settle(resolve, reject, v.done, v.value); }); }; }
16
+ function settle(resolve, reject, d, v) { Promise.resolve(v).then(function(v) { resolve({ value: v, done: d }); }, reject); }
17
+ };
18
+ Object.defineProperty(exports, "__esModule", { value: true });
19
+ exports.GeminiLLM = void 0;
20
+ const generative_ai_1 = require("@google/generative-ai");
21
+ const llm_runner_1 = require("../base/llm-runner");
22
+ /**
23
+ * Gemini Implementation
24
+ */
25
+ class GeminiLLM extends llm_runner_1.LLMRunner.BaseLLM {
26
+ constructor(apikey, model, chat_session_manager, max_tokens = 1024) {
27
+ super(chat_session_manager !== null && chat_session_manager !== void 0 ? chat_session_manager : new llm_runner_1.LLMRunner.SessionManager());
28
+ this.gemini = new generative_ai_1.GoogleGenerativeAI(apikey).getGenerativeModel({ model });
29
+ }
30
+ streamChat(messages, id_session, stream, ac) {
31
+ return __awaiter(this, void 0, void 0, function* () {
32
+ var _a, e_1, _b, _c;
33
+ const chat_history = id_session ? yield this.chat_session_manager.retrieveHistory(id_session) : [];
34
+ const result = yield this.gemini.generateContentStream({
35
+ contents: [
36
+ ...chat_history.map((msg) => ({
37
+ role: msg.role === 'assistant' ? 'model' : 'user',
38
+ parts: [{ text: msg.content }]
39
+ })),
40
+ ...messages.map(content => ({
41
+ role: 'user',
42
+ parts: [{ text: content }]
43
+ }))
44
+ ]
45
+ }, { signal: ac.signal });
46
+ //
47
+ result.response.catch(() => { });
48
+ try {
49
+ try {
50
+ for (var _d = true, _e = __asyncValues(result.stream), _f; _f = yield _e.next(), _a = _f.done, !_a; _d = true) {
51
+ _c = _f.value;
52
+ _d = false;
53
+ const chunk = _c;
54
+ stream.push(chunk.text());
55
+ }
56
+ }
57
+ catch (e_1_1) { e_1 = { error: e_1_1 }; }
58
+ finally {
59
+ try {
60
+ if (!_d && !_a && (_b = _e.return)) yield _b.call(_e);
61
+ }
62
+ finally { if (e_1) throw e_1.error; }
63
+ }
64
+ }
65
+ catch (err) {
66
+ if (err.name == 'AbortError') {
67
+ // aborted
68
+ return;
69
+ }
70
+ throw err;
71
+ }
72
+ finally {
73
+ stream.push(null);
74
+ }
75
+ });
76
+ }
77
+ chat(messages, id_session) {
78
+ return __awaiter(this, void 0, void 0, function* () {
79
+ const chat_history = id_session ? yield this.chat_session_manager.retrieveHistory(id_session) : [];
80
+ const result = yield this.gemini.generateContent({
81
+ contents: [
82
+ ...chat_history.map((msg) => ({
83
+ role: msg.role === 'assistant' ? 'model' : 'user',
84
+ parts: [{ text: msg.content }]
85
+ })),
86
+ ...messages.map(content => ({
87
+ role: 'user',
88
+ parts: [{ text: content }]
89
+ }))
90
+ ]
91
+ });
92
+ return result.response.text();
93
+ });
94
+ }
95
+ }
96
+ exports.GeminiLLM = GeminiLLM;
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@graf-research/llm-runner",
3
- "version": "0.0.13",
3
+ "version": "0.0.15",
4
4
  "main": "dist/index.js",
5
5
  "scripts": {
6
6
  "build": "rm -rf dist && tsc",
@@ -20,6 +20,7 @@
20
20
  },
21
21
  "dependencies": {
22
22
  "@anthropic-ai/sdk": "^0.38.0",
23
+ "@google/generative-ai": "^0.22.0",
23
24
  "lodash": "^4.17.21",
24
25
  "ollama": "^0.5.13",
25
26
  "openai": "^4.85.3",