@graf-research/llm-runner 0.0.12 → 0.0.13

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -129,10 +129,11 @@ console.log('<finish>');
129
129
  ### ⚡ Multiple LLM Instance Implementation
130
130
 
131
131
  ```ts
132
- import { ChatGPTLLM, OllamaLLM, LLMRunner } from "@graf-research/llm-runner";
132
+ import { ChatGPTLLM, OllamaLLM, AnthropicLLM, LLMRunner } from "@graf-research/llm-runner";
133
133
 
134
- const chatgpt: LLMRunner.BaseLLM = new ChatGPTLLM('');
134
+ const chatgpt: LLMRunner.BaseLLM = new ChatGPTLLM('apikey', 'gpt-4o-mini');
135
135
  const ollama: LLMRunner.BaseLLM = new OllamaLLM('http://my-ollama-server', 'deepseek-r1:8b');
136
+ const anthropic: LLMRunner.BaseLLM = new AnthropicLLM('apikey', 'claude-3-opus-latest');
136
137
 
137
138
  // different platform implemented on but same signature BaseLLM class
138
139
  const llm: LLMRunner.BaseLLM = ollama;
@@ -14,7 +14,7 @@ const chatgpt_1 = require("../platform/chatgpt");
14
14
  // Stream Mode
15
15
  function abortStream(chat_gpt_api_key) {
16
16
  return __awaiter(this, void 0, void 0, function* () {
17
- const chatgpt = new chatgpt_1.ChatGPTLLM(chat_gpt_api_key);
17
+ const chatgpt = new chatgpt_1.ChatGPTLLM(chat_gpt_api_key, 'gpt-4o-mini');
18
18
  const ac = new AbortController();
19
19
  setTimeout(() => {
20
20
  ac.abort();
@@ -25,7 +25,7 @@ const open_list_answer_1 = require("../multistep/modules/open-list-answer");
25
25
  function simpleMultistepWithStream(chat_gpt_api_key) {
26
26
  return __awaiter(this, void 0, void 0, function* () {
27
27
  var _a, e_1, _b, _c;
28
- const chatgpt = new chatgpt_1.ChatGPTLLM(chat_gpt_api_key);
28
+ const chatgpt = new chatgpt_1.ChatGPTLLM(chat_gpt_api_key, 'gpt-4o-mini');
29
29
  const stream = new node_stream_1.Readable({ objectMode: true, read() { } });
30
30
  const q1 = 'Saya sedang berada di tempat banyak orang mengantri untuk menyimpan uang';
31
31
  const q1_options = ['Bank BCA', 'Istana Negara', 'POM Bensin'];
@@ -16,7 +16,7 @@ const open_list_answer_1 = require("../multistep/modules/open-list-answer");
16
16
  // Waiting Mode
17
17
  function simpleMultistep(chat_gpt_api_key) {
18
18
  return __awaiter(this, void 0, void 0, function* () {
19
- const chatgpt = new chatgpt_1.ChatGPTLLM(chat_gpt_api_key);
19
+ const chatgpt = new chatgpt_1.ChatGPTLLM(chat_gpt_api_key, 'gpt-4o-mini');
20
20
  const q1 = 'Saya sedang berada di tempat banyak orang mengantri untuk menyimpan uang';
21
21
  const q1_options = ['Bank BCA', 'Istana Negara', 'POM Bensin'];
22
22
  const q2 = 'Saya ingin belajar LLM';
@@ -15,7 +15,7 @@ const chatgpt_1 = require("../platform/chatgpt");
15
15
  // Waiting Mode
16
16
  function simpleWithContext(chat_gpt_api_key) {
17
17
  return __awaiter(this, void 0, void 0, function* () {
18
- const chatgpt = new chatgpt_1.ChatGPTLLM(chat_gpt_api_key);
18
+ const chatgpt = new chatgpt_1.ChatGPTLLM(chat_gpt_api_key, 'gpt-4o-mini');
19
19
  const session_id = 'sample-id';
20
20
  const response1 = yield chatgpt.ask(['Apa ibukota Indonesia?'], session_id);
21
21
  console.log(response1);
@@ -26,7 +26,7 @@ function simpleWithContext(chat_gpt_api_key) {
26
26
  // Stream Mode
27
27
  function simpleWithContext_stream(chat_gpt_api_key) {
28
28
  return __awaiter(this, void 0, void 0, function* () {
29
- const chatgpt = new chatgpt_1.ChatGPTLLM(chat_gpt_api_key);
29
+ const chatgpt = new chatgpt_1.ChatGPTLLM(chat_gpt_api_key, 'gpt-4o-mini');
30
30
  const session_id = 'sample-id';
31
31
  const response1 = yield chatgpt.stream(['Jelaskan proses metamorfosis pada kupu-kupu'], session_id);
32
32
  yield new Promise(resolve => {
@@ -1,2 +1,3 @@
1
1
  export declare function simple(chat_gpt_api_key: string): Promise<void>;
2
2
  export declare function simple_stream(chat_gpt_api_key: string): Promise<void>;
3
+ export declare function simple_stream2(anthropic_api_key: string): Promise<void>;
@@ -11,11 +11,13 @@ var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, ge
11
11
  Object.defineProperty(exports, "__esModule", { value: true });
12
12
  exports.simple = simple;
13
13
  exports.simple_stream = simple_stream;
14
+ exports.simple_stream2 = simple_stream2;
15
+ const anthropic_1 = require("../platform/anthropic");
14
16
  const chatgpt_1 = require("../platform/chatgpt");
15
17
  // Waiting Mode
16
18
  function simple(chat_gpt_api_key) {
17
19
  return __awaiter(this, void 0, void 0, function* () {
18
- const chatgpt = new chatgpt_1.ChatGPTLLM(chat_gpt_api_key);
20
+ const chatgpt = new chatgpt_1.ChatGPTLLM(chat_gpt_api_key, 'gpt-4o-mini');
19
21
  const response = yield chatgpt.askNoContext(['Apa ibukota Indonesia?']);
20
22
  console.log(response);
21
23
  });
@@ -23,7 +25,27 @@ function simple(chat_gpt_api_key) {
23
25
  // Stream Mode
24
26
  function simple_stream(chat_gpt_api_key) {
25
27
  return __awaiter(this, void 0, void 0, function* () {
26
- const chatgpt = new chatgpt_1.ChatGPTLLM(chat_gpt_api_key);
28
+ const chatgpt = new chatgpt_1.ChatGPTLLM(chat_gpt_api_key, 'gpt-4o-mini');
29
+ const response = yield chatgpt.streamNoContext(['Jelaskan proses metamorfosis pada kupu-kupu']);
30
+ yield new Promise(resolve => {
31
+ response.stream((chunk, is_complete) => {
32
+ if (!is_complete) {
33
+ process.stdout.write(chunk);
34
+ }
35
+ else {
36
+ console.log('\n');
37
+ console.log(`<selesai>`);
38
+ // resolve promise
39
+ resolve(null);
40
+ }
41
+ });
42
+ });
43
+ });
44
+ }
45
+ // Stream Mode
46
+ function simple_stream2(anthropic_api_key) {
47
+ return __awaiter(this, void 0, void 0, function* () {
48
+ const chatgpt = new anthropic_1.AnthropicLLM(anthropic_api_key, 'claude-3-opus-latest');
27
49
  const response = yield chatgpt.streamNoContext(['Jelaskan proses metamorfosis pada kupu-kupu']);
28
50
  yield new Promise(resolve => {
29
51
  response.stream((chunk, is_complete) => {
package/dist/index.d.ts CHANGED
@@ -1,6 +1,7 @@
1
1
  import { LLMRunner } from "./base/llm-runner";
2
2
  import { ChatGPTLLM } from "./platform/chatgpt";
3
3
  import { OllamaLLM } from "./platform/ollama";
4
+ import { AnthropicLLM } from "./platform/anthropic";
4
5
  import { GenericLLM } from "./base/generic-llm";
5
6
  import { MultistepTypes } from "./multistep/types";
6
7
  import { MSModule_Choose } from "./multistep/modules/choose";
@@ -9,4 +10,4 @@ import { MSModule_Normal } from "./multistep/modules/normal";
9
10
  import { MSModule_OpenListAnswer } from "./multistep/modules/open-list-answer";
10
11
  import { MSModule_Plan } from "./multistep/modules/plan";
11
12
  import { MSModule_YesNo } from "./multistep/modules/yes-no";
12
- export { GenericLLM, LLMRunner, ChatGPTLLM, OllamaLLM, MultistepTypes, MSModule_Choose, MSModule_MultipleChoiceAnswer, MSModule_Normal, MSModule_OpenListAnswer, MSModule_Plan, MSModule_YesNo };
13
+ export { GenericLLM, LLMRunner, ChatGPTLLM, OllamaLLM, AnthropicLLM, MultistepTypes, MSModule_Choose, MSModule_MultipleChoiceAnswer, MSModule_Normal, MSModule_OpenListAnswer, MSModule_Plan, MSModule_YesNo };
package/dist/index.js CHANGED
@@ -1,12 +1,14 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.MSModule_YesNo = exports.MSModule_Plan = exports.MSModule_OpenListAnswer = exports.MSModule_Normal = exports.MSModule_MultipleChoiceAnswer = exports.MSModule_Choose = exports.OllamaLLM = exports.ChatGPTLLM = exports.LLMRunner = exports.GenericLLM = void 0;
3
+ exports.MSModule_YesNo = exports.MSModule_Plan = exports.MSModule_OpenListAnswer = exports.MSModule_Normal = exports.MSModule_MultipleChoiceAnswer = exports.MSModule_Choose = exports.AnthropicLLM = exports.OllamaLLM = exports.ChatGPTLLM = exports.LLMRunner = exports.GenericLLM = void 0;
4
4
  const llm_runner_1 = require("./base/llm-runner");
5
5
  Object.defineProperty(exports, "LLMRunner", { enumerable: true, get: function () { return llm_runner_1.LLMRunner; } });
6
6
  const chatgpt_1 = require("./platform/chatgpt");
7
7
  Object.defineProperty(exports, "ChatGPTLLM", { enumerable: true, get: function () { return chatgpt_1.ChatGPTLLM; } });
8
8
  const ollama_1 = require("./platform/ollama");
9
9
  Object.defineProperty(exports, "OllamaLLM", { enumerable: true, get: function () { return ollama_1.OllamaLLM; } });
10
+ const anthropic_1 = require("./platform/anthropic");
11
+ Object.defineProperty(exports, "AnthropicLLM", { enumerable: true, get: function () { return anthropic_1.AnthropicLLM; } });
10
12
  const generic_llm_1 = require("./base/generic-llm");
11
13
  Object.defineProperty(exports, "GenericLLM", { enumerable: true, get: function () { return generic_llm_1.GenericLLM; } });
12
14
  const choose_1 = require("./multistep/modules/choose");
@@ -24,14 +24,14 @@ var MSModule_Choose;
24
24
  }
25
25
  function ask(llm, q, options, session_id) {
26
26
  return __awaiter(this, void 0, void 0, function* () {
27
- const prompt = `Respond this question: ${q}, choose the closest answer from: ${options.map(a => `"${a}"`).join(',')}. Answer with only one option, even when multiple answer is available`;
27
+ const prompt = `Respond this question: ${q}, choose the closest answer from: ${options.map(a => `"${a}"`).join(',')}. Answer with only one option, even when multiple answer is available, answer without quote symbol`;
28
28
  return yield wrapper_1.MultistepWrapper.ask(llm, prompt, generateChooseResolver(options), session_id);
29
29
  });
30
30
  }
31
31
  MSModule_Choose.ask = ask;
32
32
  function stream(llm, q, options, session_id) {
33
33
  return __awaiter(this, void 0, void 0, function* () {
34
- const prompt = `Respond this question: ${q}, choose the closest answer from: ${options.map(a => `"${a}"`).join(',')}. Answer with only one option, even when multiple answer is available`;
34
+ const prompt = `Respond this question: ${q}, choose the closest answer from: ${options.map(a => `"${a}"`).join(',')}. Answer with only one option, even when multiple answer is available, answer without quote symbol`;
35
35
  return yield wrapper_1.MultistepWrapper.stream(llm, prompt, generateChooseResolver(options), session_id);
36
36
  });
37
37
  }
@@ -23,14 +23,14 @@ var MSModule_MultipleChoiceAnswer;
23
23
  }
24
24
  function ask(llm, q, options, session_id) {
25
25
  return __awaiter(this, void 0, void 0, function* () {
26
- const prompt = `Respond this question: ${q}, choose one or more from this options as answer: ${options.map(a => `"${a}"`).join(',')}. Put all the answers into one line each. Don't make up an answer, only choose from available options`;
26
+ const prompt = `Respond this question: ${q}, choose one or more from this options as answer: ${options.map(a => `"${a}"`).join(',')}. Put all the answers into one line each. Don't make up an answer, only choose from available options, answer without quote symbol`;
27
27
  return yield wrapper_1.MultistepWrapper.ask(llm, prompt, generateMultipleChoiceAnswerResolver(options), session_id);
28
28
  });
29
29
  }
30
30
  MSModule_MultipleChoiceAnswer.ask = ask;
31
31
  function stream(llm, q, options, session_id) {
32
32
  return __awaiter(this, void 0, void 0, function* () {
33
- const prompt = `Respond this question: ${q}, choose one or more from this options as answer: ${options.map(a => `"${a}"`).join(',')}. Put all the answers into one line each. Don't make up an answer, only choose from available options`;
33
+ const prompt = `Respond this question: ${q}, choose one or more from this options as answer: ${options.map(a => `"${a}"`).join(',')}. Put all the answers into one line each. Don't make up an answer, only choose from available options, answer without quote symbol`;
34
34
  return yield wrapper_1.MultistepWrapper.stream(llm, prompt, generateMultipleChoiceAnswerResolver(options), session_id);
35
35
  });
36
36
  }
@@ -0,0 +1,15 @@
1
+ import { Model as AnthropicModel } from '@anthropic-ai/sdk/resources';
2
+ import { LLMRunner } from "../base/llm-runner";
3
+ import { GenericLLM } from "../base/generic-llm";
4
+ import { Readable } from 'node:stream';
5
+ /**
6
+ * Ollama Implementation
7
+ */
8
+ export declare class AnthropicLLM extends LLMRunner.BaseLLM {
9
+ private anthropic;
10
+ private model;
11
+ private max_tokens;
12
+ constructor(apikey: string, model: AnthropicModel, chat_session_manager?: GenericLLM.ChatSessionManager<LLMRunner.ChatSession, LLMRunner.Message>, max_tokens?: number);
13
+ protected streamChat(messages: string[], id_session: string | null, stream: Readable, ac: AbortController): Promise<void>;
14
+ protected chat(messages: string[], id_session: string | null): Promise<string>;
15
+ }
@@ -0,0 +1,100 @@
1
+ "use strict";
2
+ var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
3
+ function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
4
+ return new (P || (P = Promise))(function (resolve, reject) {
5
+ function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
6
+ function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
7
+ function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
8
+ step((generator = generator.apply(thisArg, _arguments || [])).next());
9
+ });
10
+ };
11
+ var __asyncValues = (this && this.__asyncValues) || function (o) {
12
+ if (!Symbol.asyncIterator) throw new TypeError("Symbol.asyncIterator is not defined.");
13
+ var m = o[Symbol.asyncIterator], i;
14
+ return m ? m.call(o) : (o = typeof __values === "function" ? __values(o) : o[Symbol.iterator](), i = {}, verb("next"), verb("throw"), verb("return"), i[Symbol.asyncIterator] = function () { return this; }, i);
15
+ function verb(n) { i[n] = o[n] && function (v) { return new Promise(function (resolve, reject) { v = o[n](v), settle(resolve, reject, v.done, v.value); }); }; }
16
+ function settle(resolve, reject, d, v) { Promise.resolve(v).then(function(v) { resolve({ value: v, done: d }); }, reject); }
17
+ };
18
+ var __importDefault = (this && this.__importDefault) || function (mod) {
19
+ return (mod && mod.__esModule) ? mod : { "default": mod };
20
+ };
21
+ Object.defineProperty(exports, "__esModule", { value: true });
22
+ exports.AnthropicLLM = void 0;
23
+ const sdk_1 = __importDefault(require("@anthropic-ai/sdk"));
24
+ const llm_runner_1 = require("../base/llm-runner");
25
+ /**
26
+ * Ollama Implementation
27
+ */
28
+ class AnthropicLLM extends llm_runner_1.LLMRunner.BaseLLM {
29
+ constructor(apikey, model, chat_session_manager, max_tokens = 1024) {
30
+ super(chat_session_manager !== null && chat_session_manager !== void 0 ? chat_session_manager : new llm_runner_1.LLMRunner.SessionManager());
31
+ this.max_tokens = 1024;
32
+ this.anthropic = new sdk_1.default({ apiKey: apikey });
33
+ this.model = model;
34
+ this.max_tokens = max_tokens;
35
+ }
36
+ streamChat(messages, id_session, stream, ac) {
37
+ return __awaiter(this, void 0, void 0, function* () {
38
+ var _a, e_1, _b, _c;
39
+ var _d;
40
+ const chat_history = id_session ? yield this.chat_session_manager.retrieveHistory(id_session) : [];
41
+ const cgpt_stream = yield this.anthropic.messages.create({
42
+ model: this.model,
43
+ max_tokens: 1024,
44
+ messages: [
45
+ ...chat_history.map((msg) => ({
46
+ role: msg.role,
47
+ content: msg.content
48
+ })),
49
+ ...messages.map(content => ({
50
+ role: 'user', content
51
+ }))
52
+ ],
53
+ stream: true
54
+ });
55
+ ac.signal.addEventListener('abort', () => cgpt_stream.controller.abort());
56
+ try {
57
+ for (var _e = true, cgpt_stream_1 = __asyncValues(cgpt_stream), cgpt_stream_1_1; cgpt_stream_1_1 = yield cgpt_stream_1.next(), _a = cgpt_stream_1_1.done, !_a; _e = true) {
58
+ _c = cgpt_stream_1_1.value;
59
+ _e = false;
60
+ const chunk = _c;
61
+ if (chunk.type === 'content_block_delta' && chunk.delta.type === 'text_delta') {
62
+ stream.push((_d = chunk.delta.text) !== null && _d !== void 0 ? _d : '');
63
+ }
64
+ }
65
+ }
66
+ catch (e_1_1) { e_1 = { error: e_1_1 }; }
67
+ finally {
68
+ try {
69
+ if (!_e && !_a && (_b = cgpt_stream_1.return)) yield _b.call(cgpt_stream_1);
70
+ }
71
+ finally { if (e_1) throw e_1.error; }
72
+ }
73
+ stream.push(null);
74
+ });
75
+ }
76
+ chat(messages, id_session) {
77
+ return __awaiter(this, void 0, void 0, function* () {
78
+ var _a;
79
+ const chat_history = id_session ? yield this.chat_session_manager.retrieveHistory(id_session) : [];
80
+ const res = yield this.anthropic.messages.create({
81
+ model: this.model,
82
+ max_tokens: 1024,
83
+ messages: [
84
+ ...chat_history.map((msg) => ({
85
+ role: msg.role,
86
+ content: msg.content
87
+ })),
88
+ ...messages.map(content => ({
89
+ role: 'user', content
90
+ }))
91
+ ],
92
+ });
93
+ if (((_a = res.content) === null || _a === void 0 ? void 0 : _a[0].type) === 'text') {
94
+ return res.content[0].text;
95
+ }
96
+ return '';
97
+ });
98
+ }
99
+ }
100
+ exports.AnthropicLLM = AnthropicLLM;
@@ -1,12 +1,14 @@
1
1
  import { LLMRunner } from "../base/llm-runner";
2
2
  import { GenericLLM } from "../base/generic-llm";
3
3
  import { Readable } from 'node:stream';
4
+ import { ChatModel as ChatGPTModel } from "openai/resources";
4
5
  /**
5
6
  * Chat GPT Implementation
6
7
  */
7
8
  export declare class ChatGPTLLM extends LLMRunner.BaseLLM {
8
9
  private cgpt;
9
- constructor(api_key: string, chat_session_manager?: GenericLLM.ChatSessionManager<LLMRunner.ChatSession, LLMRunner.Message>);
10
+ private model;
11
+ constructor(api_key: string, model: ChatGPTModel, chat_session_manager?: GenericLLM.ChatSessionManager<LLMRunner.ChatSession, LLMRunner.Message>);
10
12
  protected streamChat(messages: string[], id_session: string | null, stream: Readable, ac: AbortController): Promise<void>;
11
13
  protected chat(messages: string[], id_session: string | null): Promise<string>;
12
14
  }
@@ -26,9 +26,10 @@ const llm_runner_1 = require("../base/llm-runner");
26
26
  * Chat GPT Implementation
27
27
  */
28
28
  class ChatGPTLLM extends llm_runner_1.LLMRunner.BaseLLM {
29
- constructor(api_key, chat_session_manager) {
29
+ constructor(api_key, model, chat_session_manager) {
30
30
  super(chat_session_manager !== null && chat_session_manager !== void 0 ? chat_session_manager : new llm_runner_1.LLMRunner.SessionManager());
31
31
  this.cgpt = new openai_1.default({ apiKey: api_key });
32
+ this.model = model;
32
33
  }
33
34
  streamChat(messages, id_session, stream, ac) {
34
35
  return __awaiter(this, void 0, void 0, function* () {
@@ -44,7 +45,7 @@ class ChatGPTLLM extends llm_runner_1.LLMRunner.BaseLLM {
44
45
  return;
45
46
  }
46
47
  const cgpt_stream = yield this.cgpt.chat.completions.create({
47
- model: "gpt-4o-mini",
48
+ model: this.model,
48
49
  store: false,
49
50
  stream: true,
50
51
  n: 1,
@@ -84,7 +85,7 @@ class ChatGPTLLM extends llm_runner_1.LLMRunner.BaseLLM {
84
85
  ...messages.map(content => ({ role: 'user', content }))
85
86
  ];
86
87
  const res = yield this.cgpt.chat.completions.create({
87
- model: "gpt-4o-mini",
88
+ model: this.model,
88
89
  store: false,
89
90
  n: 1,
90
91
  messages: chat_messages
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@graf-research/llm-runner",
3
- "version": "0.0.12",
3
+ "version": "0.0.13",
4
4
  "main": "dist/index.js",
5
5
  "scripts": {
6
6
  "build": "rm -rf dist && tsc",
@@ -19,6 +19,7 @@
19
19
  "typescript": "^5.7.3"
20
20
  },
21
21
  "dependencies": {
22
+ "@anthropic-ai/sdk": "^0.38.0",
22
23
  "lodash": "^4.17.21",
23
24
  "ollama": "^0.5.13",
24
25
  "openai": "^4.85.3",