@graf-research/llm-runner 0.0.13 → 0.0.14

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -16,7 +16,7 @@ npm install --save @graf-research/llm-runner
16
16
  import { ChatGPTLLM } from "@graf-research/llm-runner";
17
17
 
18
18
  const chat_gpt_api_key = '<apikey>';
19
- const chatgpt = new ChatGPTLLM(chat_gpt_api_key);
19
+ const chatgpt = new ChatGPTLLM(chat_gpt_api_key, 'gpt-4o-mini');
20
20
  const response: string = await chatgpt.askNoContext(['Apa ibukota Indonesia?']);
21
21
  ```
22
22
 
@@ -26,7 +26,7 @@ const response: string = await chatgpt.askNoContext(['Apa ibukota Indonesia?']);
26
26
  import { ChatGPTLLM } from "@graf-research/llm-runner";
27
27
 
28
28
  const chat_gpt_api_key = '<apikey>';
29
- const chatgpt = new ChatGPTLLM(chat_gpt_api_key);
29
+ const chatgpt = new ChatGPTLLM(chat_gpt_api_key, 'gpt-4o-mini');
30
30
  const session = await chatgpt.chat_session_manager.newSession();
31
31
 
32
32
  const response1: string = await chatgpt.ask(['Apa ibukota Indonesia?'], session.id);
@@ -42,7 +42,7 @@ console.log(response2);
42
42
  import { ChatGPTLLM, MSModule_Choose, MSModule_OpenListAnswer } from "@graf-research/llm-runner";
43
43
 
44
44
  const chat_gpt_api_key = '<apikey>';
45
- const chatgpt = new ChatGPTLLM(chat_gpt_api_key);
45
+ const chatgpt = new ChatGPTLLM(chat_gpt_api_key, 'gpt-4o-mini');
46
46
 
47
47
  const q1 = 'Saya sedang berada di tempat banyak orang mengantri untuk menyimpan uang';
48
48
  const q1_options = ['Bank BCA', 'Istana Negara', 'POM Bensin'];
@@ -64,7 +64,7 @@ if (a1 === 'Bank BCA') {
64
64
  import { ChatGPTLLM, GenericLLM } from "@graf-research/llm-runner";
65
65
 
66
66
  const chat_gpt_api_key = '<apikey>';
67
- const chatgpt = new ChatGPTLLM(chat_gpt_api_key);
67
+ const chatgpt = new ChatGPTLLM(chat_gpt_api_key, 'gpt-4o-mini');
68
68
 
69
69
  const response: GenericLLM.StreamResponse = await chatgpt.streamNoContext(['Jelaskan proses metamorfosis pada kupu-kupu']);
70
70
  response.stream((chunk: string, is_complete: boolean) => {
@@ -85,7 +85,7 @@ response.stream((chunk: string, is_complete: boolean) => {
85
85
  ```ts
86
86
  import { ChatGPTLLM, MultistepTypes, MSModule_Choose, MSModule_OpenListAnswer } from "@graf-research/llm-runner";
87
87
 
88
- const chatgpt = new ChatGPTLLM(chat_gpt_api_key);
88
+ const chatgpt = new ChatGPTLLM(chat_gpt_api_key, 'gpt-4o-mini');
89
89
  const stream = new Readable({ objectMode: true, read() {} });
90
90
 
91
91
  const q1 = 'Saya sedang berada di tempat banyak orang mengantri untuk menyimpan uang';
@@ -148,7 +148,7 @@ const llm: LLMRunner.BaseLLM = ollama;
148
148
  ```ts
149
149
  import { ChatGPTLLM, LLMRunner } from "@graf-research/llm-runner";
150
150
 
151
- const llm: LLMRunner.BaseLLM = new ChatGPTLLM('<apikey>');
151
+ const llm: LLMRunner.BaseLLM = new ChatGPTLLM('<apikey>', 'gpt-4o-mini');
152
152
  const response: string = await llm.askNoContext(['Apa ibukota Indonesia?']);
153
153
  ```
154
154
 
@@ -157,7 +157,7 @@ const response: string = await llm.askNoContext(['Apa ibukota Indonesia?']);
157
157
  ```ts
158
158
  import { ChatGPTLLM, GenericLLM, LLMRunner } from "@graf-research/llm-runner";
159
159
 
160
- const llm: LLMRunner.BaseLLM = new ChatGPTLLM('<apikey>');
160
+ const llm: LLMRunner.BaseLLM = new ChatGPTLLM('<apikey>', 'gpt-4o-mini');
161
161
 
162
162
  const response: GenericLLM.StreamResponse = await chatgpt.streamNoContext(['Jelaskan proses metamorfosis pada kupu-kupu']);
163
163
  response.stream((chunk: string, is_complete: boolean) => {
@@ -172,7 +172,7 @@ response.stream((chunk: string, is_complete: boolean) => {
172
172
  ```ts
173
173
  import { ChatGPTLLM, LLMRunner } from "@graf-research/llm-runner";
174
174
 
175
- const llm: LLMRunner.BaseLLM = new ChatGPTLLM('<apikey>');
175
+ const llm: LLMRunner.BaseLLM = new ChatGPTLLM('<apikey>', 'gpt-4o-mini');
176
176
  const session = await llm.chat_session_manager.newSession();
177
177
  const response1: string = await llm.ask(['Apa ibukota Indonesia?'], session.id);
178
178
  const response2: string = await llm.ask(['Apa yang saya tanyakan sebelumnya?'], session.id);
@@ -184,7 +184,7 @@ const response2: string = await llm.ask(['Apa yang saya tanyakan sebelumnya?'],
184
184
  ```ts
185
185
  import { ChatGPTLLM, LLMRunner } from "@graf-research/llm-runner";
186
186
 
187
- const llm: LLMRunner.BaseLLM = new ChatGPTLLM('<apikey>');
187
+ const llm: LLMRunner.BaseLLM = new ChatGPTLLM('<apikey>', 'gpt-4o-mini');
188
188
  const session = await llm.chat_session_manager.newSession();
189
189
 
190
190
  const response1: GenericLLM.StreamResponse = await chatgpt.stream(['Jelaskan proses metamorfosis pada kupu-kupu'], session.id);
@@ -346,7 +346,7 @@ Pada umumnya stream yang terlalu panjang kadang harus diberhentikan karena suatu
346
346
  ```ts
347
347
  import { ChatGPTLLM, GenericLLM } from "@graf-research/llm-runner";
348
348
 
349
- const chatgpt = new ChatGPTLLM(chat_gpt_api_key);
349
+ const chatgpt = new ChatGPTLLM(chat_gpt_api_key, 'gpt-4o-mini');
350
350
 
351
351
  // 1. Siapkan Abort Controller
352
352
  const ac = new AbortController();
@@ -1 +1,2 @@
1
1
  export declare function abortStream(chat_gpt_api_key: string): Promise<void>;
2
+ export declare function abortStream2(gemini_api_key: string): Promise<void>;
@@ -10,7 +10,9 @@ var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, ge
10
10
  };
11
11
  Object.defineProperty(exports, "__esModule", { value: true });
12
12
  exports.abortStream = abortStream;
13
+ exports.abortStream2 = abortStream2;
13
14
  const chatgpt_1 = require("../platform/chatgpt");
15
+ const gemini_1 = require("../platform/gemini");
14
16
  // Stream Mode
15
17
  function abortStream(chat_gpt_api_key) {
16
18
  return __awaiter(this, void 0, void 0, function* () {
@@ -37,3 +39,29 @@ function abortStream(chat_gpt_api_key) {
37
39
  });
38
40
  });
39
41
  }
42
+ // Stream Mode
43
+ function abortStream2(gemini_api_key) {
44
+ return __awaiter(this, void 0, void 0, function* () {
45
+ const gemini = new gemini_1.GeminiLLM(gemini_api_key, 'gemini-1.5-flash');
46
+ const ac = new AbortController();
47
+ setTimeout(() => {
48
+ ac.abort();
49
+ console.log(`<<RESPONSE STREAM ABORTED>>`);
50
+ }, 2000);
51
+ const response = yield gemini.streamNoContext(['Jelaskan proses metamorfosis pada kupu-kupu']);
52
+ ac.signal.addEventListener('abort', () => response.cancel());
53
+ yield new Promise(resolve => {
54
+ response.stream((chunk, is_complete) => {
55
+ if (!is_complete) {
56
+ process.stdout.write(chunk);
57
+ }
58
+ else {
59
+ console.log('\n');
60
+ console.log(`<selesai>`);
61
+ // resolve promise
62
+ resolve(null);
63
+ }
64
+ });
65
+ });
66
+ });
67
+ }
@@ -1,3 +1,4 @@
1
1
  export declare function simple(chat_gpt_api_key: string): Promise<void>;
2
2
  export declare function simple_stream(chat_gpt_api_key: string): Promise<void>;
3
3
  export declare function simple_stream2(anthropic_api_key: string): Promise<void>;
4
+ export declare function simple_stream3(gemini_api_key: string): Promise<void>;
@@ -12,8 +12,10 @@ Object.defineProperty(exports, "__esModule", { value: true });
12
12
  exports.simple = simple;
13
13
  exports.simple_stream = simple_stream;
14
14
  exports.simple_stream2 = simple_stream2;
15
+ exports.simple_stream3 = simple_stream3;
15
16
  const anthropic_1 = require("../platform/anthropic");
16
17
  const chatgpt_1 = require("../platform/chatgpt");
18
+ const gemini_1 = require("../platform/gemini");
17
19
  // Waiting Mode
18
20
  function simple(chat_gpt_api_key) {
19
21
  return __awaiter(this, void 0, void 0, function* () {
@@ -62,3 +64,23 @@ function simple_stream2(anthropic_api_key) {
62
64
  });
63
65
  });
64
66
  }
67
+ // Stream Mode
68
+ function simple_stream3(gemini_api_key) {
69
+ return __awaiter(this, void 0, void 0, function* () {
70
+ const chatgpt = new gemini_1.GeminiLLM(gemini_api_key, 'gemini-1.5-flash');
71
+ const response = yield chatgpt.streamNoContext(['Jelaskan proses metamorfosis pada kupu-kupu']);
72
+ yield new Promise(resolve => {
73
+ response.stream((chunk, is_complete) => {
74
+ if (!is_complete) {
75
+ process.stdout.write(chunk);
76
+ }
77
+ else {
78
+ console.log('\n');
79
+ console.log(`<selesai>`);
80
+ // resolve promise
81
+ resolve(null);
82
+ }
83
+ });
84
+ });
85
+ });
86
+ }
package/dist/index.d.ts CHANGED
@@ -2,6 +2,7 @@ import { LLMRunner } from "./base/llm-runner";
2
2
  import { ChatGPTLLM } from "./platform/chatgpt";
3
3
  import { OllamaLLM } from "./platform/ollama";
4
4
  import { AnthropicLLM } from "./platform/anthropic";
5
+ import { GeminiLLM } from "./platform/gemini";
5
6
  import { GenericLLM } from "./base/generic-llm";
6
7
  import { MultistepTypes } from "./multistep/types";
7
8
  import { MSModule_Choose } from "./multistep/modules/choose";
@@ -10,4 +11,4 @@ import { MSModule_Normal } from "./multistep/modules/normal";
10
11
  import { MSModule_OpenListAnswer } from "./multistep/modules/open-list-answer";
11
12
  import { MSModule_Plan } from "./multistep/modules/plan";
12
13
  import { MSModule_YesNo } from "./multistep/modules/yes-no";
13
- export { GenericLLM, LLMRunner, ChatGPTLLM, OllamaLLM, AnthropicLLM, MultistepTypes, MSModule_Choose, MSModule_MultipleChoiceAnswer, MSModule_Normal, MSModule_OpenListAnswer, MSModule_Plan, MSModule_YesNo };
14
+ export { GenericLLM, LLMRunner, ChatGPTLLM, OllamaLLM, AnthropicLLM, GeminiLLM, MultistepTypes, MSModule_Choose, MSModule_MultipleChoiceAnswer, MSModule_Normal, MSModule_OpenListAnswer, MSModule_Plan, MSModule_YesNo };
package/dist/index.js CHANGED
@@ -1,6 +1,6 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.MSModule_YesNo = exports.MSModule_Plan = exports.MSModule_OpenListAnswer = exports.MSModule_Normal = exports.MSModule_MultipleChoiceAnswer = exports.MSModule_Choose = exports.AnthropicLLM = exports.OllamaLLM = exports.ChatGPTLLM = exports.LLMRunner = exports.GenericLLM = void 0;
3
+ exports.MSModule_YesNo = exports.MSModule_Plan = exports.MSModule_OpenListAnswer = exports.MSModule_Normal = exports.MSModule_MultipleChoiceAnswer = exports.MSModule_Choose = exports.GeminiLLM = exports.AnthropicLLM = exports.OllamaLLM = exports.ChatGPTLLM = exports.LLMRunner = exports.GenericLLM = void 0;
4
4
  const llm_runner_1 = require("./base/llm-runner");
5
5
  Object.defineProperty(exports, "LLMRunner", { enumerable: true, get: function () { return llm_runner_1.LLMRunner; } });
6
6
  const chatgpt_1 = require("./platform/chatgpt");
@@ -9,6 +9,8 @@ const ollama_1 = require("./platform/ollama");
9
9
  Object.defineProperty(exports, "OllamaLLM", { enumerable: true, get: function () { return ollama_1.OllamaLLM; } });
10
10
  const anthropic_1 = require("./platform/anthropic");
11
11
  Object.defineProperty(exports, "AnthropicLLM", { enumerable: true, get: function () { return anthropic_1.AnthropicLLM; } });
12
+ const gemini_1 = require("./platform/gemini");
13
+ Object.defineProperty(exports, "GeminiLLM", { enumerable: true, get: function () { return gemini_1.GeminiLLM; } });
12
14
  const generic_llm_1 = require("./base/generic-llm");
13
15
  Object.defineProperty(exports, "GenericLLM", { enumerable: true, get: function () { return generic_llm_1.GenericLLM; } });
14
16
  const choose_1 = require("./multistep/modules/choose");
@@ -0,0 +1,13 @@
1
+ import { LLMRunner } from "../base/llm-runner";
2
+ import { GenericLLM } from "../base/generic-llm";
3
+ import { Readable } from 'node:stream';
4
+ export type GeminiModel = 'gemini-2.0-flash' | 'gemini-2.0-flash-lite' | 'gemini-1.5-flash' | 'gemini-1.5-flash-8b' | 'gemini-1.5-pro';
5
+ /**
6
+ * Gemini Implementation
7
+ */
8
+ export declare class GeminiLLM extends LLMRunner.BaseLLM {
9
+ private gemini;
10
+ constructor(apikey: string, model: GeminiModel, chat_session_manager?: GenericLLM.ChatSessionManager<LLMRunner.ChatSession, LLMRunner.Message>, max_tokens?: number);
11
+ protected streamChat(messages: string[], id_session: string | null, stream: Readable, ac: AbortController): Promise<void>;
12
+ protected chat(messages: string[], id_session: string | null): Promise<string>;
13
+ }
@@ -0,0 +1,96 @@
1
+ "use strict";
2
+ var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
3
+ function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
4
+ return new (P || (P = Promise))(function (resolve, reject) {
5
+ function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
6
+ function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
7
+ function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
8
+ step((generator = generator.apply(thisArg, _arguments || [])).next());
9
+ });
10
+ };
11
+ var __asyncValues = (this && this.__asyncValues) || function (o) {
12
+ if (!Symbol.asyncIterator) throw new TypeError("Symbol.asyncIterator is not defined.");
13
+ var m = o[Symbol.asyncIterator], i;
14
+ return m ? m.call(o) : (o = typeof __values === "function" ? __values(o) : o[Symbol.iterator](), i = {}, verb("next"), verb("throw"), verb("return"), i[Symbol.asyncIterator] = function () { return this; }, i);
15
+ function verb(n) { i[n] = o[n] && function (v) { return new Promise(function (resolve, reject) { v = o[n](v), settle(resolve, reject, v.done, v.value); }); }; }
16
+ function settle(resolve, reject, d, v) { Promise.resolve(v).then(function(v) { resolve({ value: v, done: d }); }, reject); }
17
+ };
18
+ Object.defineProperty(exports, "__esModule", { value: true });
19
+ exports.GeminiLLM = void 0;
20
+ const generative_ai_1 = require("@google/generative-ai");
21
+ const llm_runner_1 = require("../base/llm-runner");
22
+ /**
23
+ * Gemini Implementation
24
+ */
25
+ class GeminiLLM extends llm_runner_1.LLMRunner.BaseLLM {
26
+ constructor(apikey, model, chat_session_manager, max_tokens = 1024) {
27
+ super(chat_session_manager !== null && chat_session_manager !== void 0 ? chat_session_manager : new llm_runner_1.LLMRunner.SessionManager());
28
+ this.gemini = new generative_ai_1.GoogleGenerativeAI(apikey).getGenerativeModel({ model });
29
+ }
30
+ streamChat(messages, id_session, stream, ac) {
31
+ return __awaiter(this, void 0, void 0, function* () {
32
+ var _a, e_1, _b, _c;
33
+ const chat_history = id_session ? yield this.chat_session_manager.retrieveHistory(id_session) : [];
34
+ const result = yield this.gemini.generateContentStream({
35
+ contents: [
36
+ ...chat_history.map((msg) => ({
37
+ role: msg.role === 'assistant' ? 'model' : 'user',
38
+ parts: [{ text: msg.content }]
39
+ })),
40
+ ...messages.map(content => ({
41
+ role: 'user',
42
+ parts: [{ text: content }]
43
+ }))
44
+ ]
45
+ }, { signal: ac.signal });
46
+ //
47
+ result.response.catch(() => { });
48
+ try {
49
+ try {
50
+ for (var _d = true, _e = __asyncValues(result.stream), _f; _f = yield _e.next(), _a = _f.done, !_a; _d = true) {
51
+ _c = _f.value;
52
+ _d = false;
53
+ const chunk = _c;
54
+ stream.push(chunk.text());
55
+ }
56
+ }
57
+ catch (e_1_1) { e_1 = { error: e_1_1 }; }
58
+ finally {
59
+ try {
60
+ if (!_d && !_a && (_b = _e.return)) yield _b.call(_e);
61
+ }
62
+ finally { if (e_1) throw e_1.error; }
63
+ }
64
+ }
65
+ catch (err) {
66
+ if (err.name == 'AbortError') {
67
+ // aborted
68
+ return;
69
+ }
70
+ throw err;
71
+ }
72
+ finally {
73
+ stream.push(null);
74
+ }
75
+ });
76
+ }
77
+ chat(messages, id_session) {
78
+ return __awaiter(this, void 0, void 0, function* () {
79
+ const chat_history = id_session ? yield this.chat_session_manager.retrieveHistory(id_session) : [];
80
+ const result = yield this.gemini.generateContent({
81
+ contents: [
82
+ ...chat_history.map((msg) => ({
83
+ role: msg.role === 'assistant' ? 'model' : 'user',
84
+ parts: [{ text: msg.content }]
85
+ })),
86
+ ...messages.map(content => ({
87
+ role: 'user',
88
+ parts: [{ text: content }]
89
+ }))
90
+ ]
91
+ });
92
+ return result.response.text();
93
+ });
94
+ }
95
+ }
96
+ exports.GeminiLLM = GeminiLLM;
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@graf-research/llm-runner",
3
- "version": "0.0.13",
3
+ "version": "0.0.14",
4
4
  "main": "dist/index.js",
5
5
  "scripts": {
6
6
  "build": "rm -rf dist && tsc",
@@ -20,6 +20,7 @@
20
20
  },
21
21
  "dependencies": {
22
22
  "@anthropic-ai/sdk": "^0.38.0",
23
+ "@google/generative-ai": "^0.22.0",
23
24
  "lodash": "^4.17.21",
24
25
  "ollama": "^0.5.13",
25
26
  "openai": "^4.85.3",