@graf-research/llm-runner 0.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,26 @@
1
+ import { Readable } from 'node:stream';
2
+ export declare namespace GenericLLM {
3
+ type OnMessage = (chunk: string, is_complete: boolean) => void;
4
+ interface StreamResponse {
5
+ cancel(): void;
6
+ stream(onMessage: OnMessage): Promise<void>;
7
+ }
8
+ abstract class ChatSessionManager<SessionModel, ChatModel> {
9
+ abstract newSession(): Promise<SessionModel>;
10
+ abstract getChatSession(id_session: string): Promise<SessionModel>;
11
+ abstract saveMessage(messages: string[], role: string, id_session: string): Promise<void>;
12
+ abstract retrieveHistory(id_session: string): Promise<ChatModel[]>;
13
+ }
14
+ abstract class GenericLLM {
15
+ abstract stream(messages: string[], id_session: string): Promise<StreamResponse>;
16
+ abstract streamNoContext(messages: string[]): Promise<GenericLLM.StreamResponse>;
17
+ abstract ask(messages: string[], id_session: string): Promise<string>;
18
+ abstract askNoContext(messages: string[]): Promise<string>;
19
+ }
20
+ abstract class BaseLLM<SessionModel, ChatModel> extends GenericLLM {
21
+ chat_session_manager: ChatSessionManager<SessionModel, ChatModel>;
22
+ constructor(chat_session_manager: ChatSessionManager<SessionModel, ChatModel>);
23
+ protected abstract streamChat(messages: string[], id_session: string | null, stream: Readable, ac: AbortController): Promise<void>;
24
+ protected abstract chat(messages: string[], id_session: string | null): Promise<string>;
25
+ }
26
+ }
@@ -0,0 +1,19 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.GenericLLM = void 0;
4
+ var GenericLLM;
5
+ (function (GenericLLM_1) {
6
+ class ChatSessionManager {
7
+ }
8
+ GenericLLM_1.ChatSessionManager = ChatSessionManager;
9
+ class GenericLLM {
10
+ }
11
+ GenericLLM_1.GenericLLM = GenericLLM;
12
+ class BaseLLM extends GenericLLM {
13
+ constructor(chat_session_manager) {
14
+ super();
15
+ this.chat_session_manager = chat_session_manager;
16
+ }
17
+ }
18
+ GenericLLM_1.BaseLLM = BaseLLM;
19
+ })(GenericLLM || (exports.GenericLLM = GenericLLM = {}));
@@ -0,0 +1,31 @@
1
+ import { GenericLLM } from "./generic-llm";
2
+ export declare namespace LLMRunner {
3
+ interface Message {
4
+ content: string;
5
+ role: string;
6
+ }
7
+ interface ChatSession {
8
+ id: string;
9
+ list_message: Message[];
10
+ }
11
+ /**
12
+ * Chat Session Manager Implementation
13
+ */
14
+ class SessionManager implements GenericLLM.ChatSessionManager<ChatSession, Message> {
15
+ list_session: ChatSession[];
16
+ newSession(): Promise<ChatSession>;
17
+ getSession(id_session: string): Promise<ChatSession | undefined>;
18
+ getChatSession(id_session: string): Promise<ChatSession>;
19
+ saveMessage(messages: string[], role: string, id_session: string): Promise<void>;
20
+ retrieveHistory(id_session: string): Promise<Message[]>;
21
+ }
22
+ /**
23
+ * Abstract Base LLM Class
24
+ */
25
+ abstract class BaseLLM extends GenericLLM.BaseLLM<ChatSession, Message> {
26
+ stream(messages: string[], id_session: string): Promise<GenericLLM.StreamResponse>;
27
+ streamNoContext(messages: string[]): Promise<GenericLLM.StreamResponse>;
28
+ ask(messages: string[], id_session: string): Promise<string>;
29
+ askNoContext(messages: string[]): Promise<string>;
30
+ }
31
+ }
@@ -0,0 +1,160 @@
1
+ "use strict";
2
+ var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
3
+ function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
4
+ return new (P || (P = Promise))(function (resolve, reject) {
5
+ function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
6
+ function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
7
+ function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
8
+ step((generator = generator.apply(thisArg, _arguments || [])).next());
9
+ });
10
+ };
11
+ var __asyncValues = (this && this.__asyncValues) || function (o) {
12
+ if (!Symbol.asyncIterator) throw new TypeError("Symbol.asyncIterator is not defined.");
13
+ var m = o[Symbol.asyncIterator], i;
14
+ return m ? m.call(o) : (o = typeof __values === "function" ? __values(o) : o[Symbol.iterator](), i = {}, verb("next"), verb("throw"), verb("return"), i[Symbol.asyncIterator] = function () { return this; }, i);
15
+ function verb(n) { i[n] = o[n] && function (v) { return new Promise(function (resolve, reject) { v = o[n](v), settle(resolve, reject, v.done, v.value); }); }; }
16
+ function settle(resolve, reject, d, v) { Promise.resolve(v).then(function(v) { resolve({ value: v, done: d }); }, reject); }
17
+ };
18
+ Object.defineProperty(exports, "__esModule", { value: true });
19
+ exports.LLMRunner = void 0;
20
+ const generic_llm_1 = require("./generic-llm");
21
+ const uuid_1 = require("uuid");
22
+ const node_stream_1 = require("node:stream");
23
+ var LLMRunner;
24
+ (function (LLMRunner) {
25
+ /**
26
+ * Chat Session Manager Implementation
27
+ */
28
+ class SessionManager {
29
+ constructor() {
30
+ this.list_session = [];
31
+ }
32
+ newSession() {
33
+ return __awaiter(this, void 0, void 0, function* () {
34
+ const session = {
35
+ id: (0, uuid_1.v4)(),
36
+ list_message: []
37
+ };
38
+ this.list_session.push(session);
39
+ return session;
40
+ });
41
+ }
42
+ getSession(id_session) {
43
+ return __awaiter(this, void 0, void 0, function* () {
44
+ return this.list_session.find(s => s.id == id_session);
45
+ });
46
+ }
47
+ getChatSession(id_session) {
48
+ return __awaiter(this, void 0, void 0, function* () {
49
+ const cs = this.list_session.find(s => s.id == id_session);
50
+ if (!cs) {
51
+ throw new Error('Session not found.');
52
+ }
53
+ return cs;
54
+ });
55
+ }
56
+ saveMessage(messages, role, id_session) {
57
+ return __awaiter(this, void 0, void 0, function* () {
58
+ const cs = yield this.getChatSession(id_session);
59
+ cs.list_message.push(...messages.map(content => ({
60
+ role,
61
+ content
62
+ })));
63
+ });
64
+ }
65
+ retrieveHistory(id_session) {
66
+ return __awaiter(this, void 0, void 0, function* () {
67
+ const cs = yield this.getChatSession(id_session);
68
+ return cs.list_message;
69
+ });
70
+ }
71
+ }
72
+ LLMRunner.SessionManager = SessionManager;
73
+ /**
74
+ * Abstract Base LLM Class
75
+ */
76
+ class BaseLLM extends generic_llm_1.GenericLLM.BaseLLM {
77
+ stream(messages, id_session) {
78
+ return __awaiter(this, void 0, void 0, function* () {
79
+ const ac = new AbortController();
80
+ const stream = new node_stream_1.Readable({ objectMode: true, read() { } });
81
+ this.streamChat(messages, id_session, stream, ac);
82
+ return {
83
+ stream: (onMessage) => __awaiter(this, void 0, void 0, function* () {
84
+ var _a, e_1, _b, _c;
85
+ let complete_message = ``;
86
+ try {
87
+ for (var _d = true, stream_1 = __asyncValues(stream), stream_1_1; stream_1_1 = yield stream_1.next(), _a = stream_1_1.done, !_a; _d = true) {
88
+ _c = stream_1_1.value;
89
+ _d = false;
90
+ const chunk = _c;
91
+ complete_message = `${complete_message}${chunk}`;
92
+ onMessage(chunk, false);
93
+ }
94
+ }
95
+ catch (e_1_1) { e_1 = { error: e_1_1 }; }
96
+ finally {
97
+ try {
98
+ if (!_d && !_a && (_b = stream_1.return)) yield _b.call(stream_1);
99
+ }
100
+ finally { if (e_1) throw e_1.error; }
101
+ }
102
+ yield this.chat_session_manager.saveMessage(messages, 'user', id_session);
103
+ yield this.chat_session_manager.saveMessage([complete_message], 'assistant', id_session);
104
+ onMessage('', true);
105
+ }),
106
+ cancel() {
107
+ ac.abort();
108
+ }
109
+ };
110
+ });
111
+ }
112
+ streamNoContext(messages) {
113
+ return __awaiter(this, void 0, void 0, function* () {
114
+ const ac = new AbortController();
115
+ const stream = new node_stream_1.Readable({ objectMode: true, read() { } });
116
+ this.streamChat(messages, null, stream, ac);
117
+ return {
118
+ stream: (onMessage) => __awaiter(this, void 0, void 0, function* () {
119
+ var _a, e_2, _b, _c;
120
+ let complete_message = ``;
121
+ try {
122
+ for (var _d = true, stream_2 = __asyncValues(stream), stream_2_1; stream_2_1 = yield stream_2.next(), _a = stream_2_1.done, !_a; _d = true) {
123
+ _c = stream_2_1.value;
124
+ _d = false;
125
+ const chunk = _c;
126
+ complete_message = `${complete_message}${chunk}`;
127
+ onMessage(chunk, false);
128
+ }
129
+ }
130
+ catch (e_2_1) { e_2 = { error: e_2_1 }; }
131
+ finally {
132
+ try {
133
+ if (!_d && !_a && (_b = stream_2.return)) yield _b.call(stream_2);
134
+ }
135
+ finally { if (e_2) throw e_2.error; }
136
+ }
137
+ onMessage('', true);
138
+ }),
139
+ cancel() {
140
+ ac.abort();
141
+ }
142
+ };
143
+ });
144
+ }
145
+ ask(messages, id_session) {
146
+ return __awaiter(this, void 0, void 0, function* () {
147
+ const res = yield this.chat(messages, id_session);
148
+ yield this.chat_session_manager.saveMessage(messages, 'user', id_session);
149
+ yield this.chat_session_manager.saveMessage([res], 'assistant', id_session);
150
+ return res;
151
+ });
152
+ }
153
+ askNoContext(messages) {
154
+ return __awaiter(this, void 0, void 0, function* () {
155
+ return yield this.chat(messages, null);
156
+ });
157
+ }
158
+ }
159
+ LLMRunner.BaseLLM = BaseLLM;
160
+ })(LLMRunner || (exports.LLMRunner = LLMRunner = {}));
@@ -0,0 +1,6 @@
1
+ import { LLMRunner } from "./base/llm-runner";
2
+ import { ChatGPTLLM } from "./platform/chatgpt";
3
+ import { OllamaLLM } from "./platform/ollama";
4
+ import { NativeStep } from "./multistep";
5
+ import { GenericLLM } from "./base/generic-llm";
6
+ export { GenericLLM, LLMRunner, ChatGPTLLM, OllamaLLM, NativeStep, };
package/dist/index.js ADDED
@@ -0,0 +1,13 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.NativeStep = exports.OllamaLLM = exports.ChatGPTLLM = exports.LLMRunner = exports.GenericLLM = void 0;
4
+ const llm_runner_1 = require("./base/llm-runner");
5
+ Object.defineProperty(exports, "LLMRunner", { enumerable: true, get: function () { return llm_runner_1.LLMRunner; } });
6
+ const chatgpt_1 = require("./platform/chatgpt");
7
+ Object.defineProperty(exports, "ChatGPTLLM", { enumerable: true, get: function () { return chatgpt_1.ChatGPTLLM; } });
8
+ const ollama_1 = require("./platform/ollama");
9
+ Object.defineProperty(exports, "OllamaLLM", { enumerable: true, get: function () { return ollama_1.OllamaLLM; } });
10
+ const multistep_1 = require("./multistep");
11
+ Object.defineProperty(exports, "NativeStep", { enumerable: true, get: function () { return multistep_1.NativeStep; } });
12
+ const generic_llm_1 = require("./base/generic-llm");
13
+ Object.defineProperty(exports, "GenericLLM", { enumerable: true, get: function () { return generic_llm_1.GenericLLM; } });
@@ -0,0 +1,17 @@
1
+ import { GenericLLM } from "./base/generic-llm";
2
+ import { LLMRunner } from "./base/llm-runner";
3
+ export declare namespace NativeStep {
4
+ interface StreamResponseWithFinalAnswer<T> {
5
+ cancel(): void;
6
+ stream(onMessage: GenericLLM.OnMessage): Promise<T>;
7
+ }
8
+ type Resolver<T, U = any> = (response: string, data?: U) => Promise<T>;
9
+ function askNormal(llm: LLMRunner.BaseLLM, q: string, session_id?: string): Promise<string>;
10
+ function streamNormal(llm: LLMRunner.BaseLLM, q: string, session_id?: string): Promise<StreamResponseWithFinalAnswer<string>>;
11
+ function askYesNo(llm: LLMRunner.BaseLLM, q: string, session_id?: string): Promise<boolean>;
12
+ function streamYesNo(llm: LLMRunner.BaseLLM, q: string, session_id?: string): Promise<StreamResponseWithFinalAnswer<boolean>>;
13
+ function askChoose(llm: LLMRunner.BaseLLM, q: string, options: string[], session_id?: string): Promise<string>;
14
+ function streamAskChoose(llm: LLMRunner.BaseLLM, q: string, options: string[], session_id?: string): Promise<StreamResponseWithFinalAnswer<string>>;
15
+ function askPlan(llm: LLMRunner.BaseLLM, q: string, session_id?: string): Promise<string[]>;
16
+ function streamPlan(llm: LLMRunner.BaseLLM, q: string, session_id?: string): Promise<StreamResponseWithFinalAnswer<string[]>>;
17
+ }
@@ -0,0 +1,149 @@
1
+ "use strict";
2
+ var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
3
+ function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
4
+ return new (P || (P = Promise))(function (resolve, reject) {
5
+ function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
6
+ function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
7
+ function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
8
+ step((generator = generator.apply(thisArg, _arguments || [])).next());
9
+ });
10
+ };
11
+ Object.defineProperty(exports, "__esModule", { value: true });
12
+ exports.NativeStep = void 0;
13
+ var NativeStep;
14
+ (function (NativeStep) {
15
+ function ask(llm, prompt, resolver, session_id) {
16
+ return __awaiter(this, void 0, void 0, function* () {
17
+ const res = session_id ? yield llm.ask([prompt], session_id) : yield llm.askNoContext([prompt]);
18
+ return yield resolver(res);
19
+ });
20
+ }
21
+ function stream(llm, prompt, resolver, session_id) {
22
+ return __awaiter(this, void 0, void 0, function* () {
23
+ const res = session_id ? yield llm.stream([prompt], session_id) : yield llm.streamNoContext([prompt]);
24
+ return {
25
+ cancel() {
26
+ res.cancel();
27
+ },
28
+ stream(onMessage) {
29
+ return __awaiter(this, void 0, void 0, function* () {
30
+ return new Promise((resolve, reject) => {
31
+ let complete_message = '';
32
+ res.stream((chunk, is_complete) => __awaiter(this, void 0, void 0, function* () {
33
+ onMessage(chunk, is_complete);
34
+ if (is_complete) {
35
+ try {
36
+ resolve(yield resolver(complete_message));
37
+ }
38
+ catch (err) {
39
+ reject(err);
40
+ }
41
+ return;
42
+ }
43
+ else {
44
+ complete_message = `${complete_message}${chunk}`;
45
+ }
46
+ }));
47
+ });
48
+ });
49
+ },
50
+ };
51
+ });
52
+ }
53
+ // ================================= //
54
+ // Normal
55
+ // ================================= //
56
+ const normal_resolver = (response) => __awaiter(this, void 0, void 0, function* () { return response; });
57
+ function askNormal(llm, q, session_id) {
58
+ return __awaiter(this, void 0, void 0, function* () {
59
+ return yield ask(llm, q, normal_resolver, session_id);
60
+ });
61
+ }
62
+ NativeStep.askNormal = askNormal;
63
+ function streamNormal(llm, q, session_id) {
64
+ return __awaiter(this, void 0, void 0, function* () {
65
+ return yield stream(llm, q, normal_resolver, session_id);
66
+ });
67
+ }
68
+ NativeStep.streamNormal = streamNormal;
69
+ // ================================= //
70
+ // Yes/No
71
+ // ================================= //
72
+ const yes_no_resolver = (response) => __awaiter(this, void 0, void 0, function* () { return response.includes('1'); });
73
+ function askYesNo(llm, q, session_id) {
74
+ return __awaiter(this, void 0, void 0, function* () {
75
+ const prompt = `Respond 1 if "Yes" or 0 for "No" to this question: ${q}`;
76
+ return yield ask(llm, prompt, yes_no_resolver, session_id);
77
+ });
78
+ }
79
+ NativeStep.askYesNo = askYesNo;
80
+ function streamYesNo(llm, q, session_id) {
81
+ return __awaiter(this, void 0, void 0, function* () {
82
+ const prompt = `Respond 1 if "Yes" or 0 for "No" to this question: ${q}`;
83
+ return yield stream(llm, prompt, yes_no_resolver, session_id);
84
+ });
85
+ }
86
+ NativeStep.streamYesNo = streamYesNo;
87
+ // ================================= //
88
+ // Choose
89
+ // ================================= //
90
+ function generateChooseResolver(options) {
91
+ return (response) => __awaiter(this, void 0, void 0, function* () {
92
+ const selected = options.sort((a, b) => b.length - a.length).find(a => response.includes(a));
93
+ if (!selected) {
94
+ throw new Error(`Unexpected LLM answer: ${response}`);
95
+ }
96
+ return selected;
97
+ });
98
+ }
99
+ function askChoose(llm, q, options, session_id) {
100
+ return __awaiter(this, void 0, void 0, function* () {
101
+ const prompt = `Respond this question: ${q}, with one of these options: ${options.map(a => `"${a}"`).join(',')}. Answer with only one option, even when multiple answer is available`;
102
+ return yield ask(llm, prompt, generateChooseResolver(options), session_id);
103
+ });
104
+ }
105
+ NativeStep.askChoose = askChoose;
106
+ function streamAskChoose(llm, q, options, session_id) {
107
+ return __awaiter(this, void 0, void 0, function* () {
108
+ const prompt = `Respond this question: ${q}, with one of these options: ${options.map(a => `"${a}"`).join(',')}. Answer with only one option, even when multiple answer is available`;
109
+ return yield stream(llm, prompt, generateChooseResolver(options), session_id);
110
+ });
111
+ }
112
+ NativeStep.streamAskChoose = streamAskChoose;
113
+ // ================================= //
114
+ // Plan
115
+ // ================================= //
116
+ const plan_resolver = (response) => __awaiter(this, void 0, void 0, function* () { return response.split('\n').map(a => a.trim()).filter(Boolean); });
117
+ function askPlan(llm, q, session_id) {
118
+ return __awaiter(this, void 0, void 0, function* () {
119
+ const session = yield llm.chat_session_manager.newSession();
120
+ const yn = yield askYesNo(llm, [
121
+ `Look at this instruction`,
122
+ q,
123
+ `Is the instruction above can be broken down into numbered step by step plan?`
124
+ ].join('\n'), session_id !== null && session_id !== void 0 ? session_id : session.id);
125
+ if (!yn) {
126
+ throw new Error(`Your prompt cant be broken down into numbered step by step plan`);
127
+ }
128
+ const prompt = `Responds previous instruction with numbered step by step plans, where each step must be one line only.`;
129
+ return yield ask(llm, prompt, plan_resolver, session_id);
130
+ });
131
+ }
132
+ NativeStep.askPlan = askPlan;
133
+ function streamPlan(llm, q, session_id) {
134
+ return __awaiter(this, void 0, void 0, function* () {
135
+ const session = yield llm.chat_session_manager.newSession();
136
+ const yn = yield askYesNo(llm, [
137
+ `Look at this instruction`,
138
+ q,
139
+ `Is the instruction above can be broken down into numbered step by step plan?`
140
+ ].join('\n'), session_id !== null && session_id !== void 0 ? session_id : session.id);
141
+ if (!yn) {
142
+ throw new Error(`Your prompt cant be broken down into numbered step by step plan`);
143
+ }
144
+ const prompt = `Responds previous instruction with numbered step by step plans, where each step must be one line only.`;
145
+ return yield stream(llm, prompt, plan_resolver, session_id !== null && session_id !== void 0 ? session_id : session.id);
146
+ });
147
+ }
148
+ NativeStep.streamPlan = streamPlan;
149
+ })(NativeStep || (exports.NativeStep = NativeStep = {}));
@@ -0,0 +1,12 @@
1
+ import { LLMRunner } from "../base/llm-runner";
2
+ import { GenericLLM } from "../base/generic-llm";
3
+ import { Readable } from 'node:stream';
4
+ /**
5
+ * Chat GPT Implementation
6
+ */
7
+ export declare class ChatGPTLLM extends LLMRunner.BaseLLM {
8
+ private cgpt;
9
+ constructor(api_key: string, chat_session_manager?: GenericLLM.ChatSessionManager<LLMRunner.ChatSession, LLMRunner.Message>);
10
+ protected streamChat(messages: string[], id_session: string | null, stream: Readable, ac: AbortController): Promise<void>;
11
+ protected chat(messages: string[], id_session: string | null): Promise<string>;
12
+ }
@@ -0,0 +1,96 @@
1
+ "use strict";
2
+ var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
3
+ function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
4
+ return new (P || (P = Promise))(function (resolve, reject) {
5
+ function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
6
+ function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
7
+ function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
8
+ step((generator = generator.apply(thisArg, _arguments || [])).next());
9
+ });
10
+ };
11
+ var __asyncValues = (this && this.__asyncValues) || function (o) {
12
+ if (!Symbol.asyncIterator) throw new TypeError("Symbol.asyncIterator is not defined.");
13
+ var m = o[Symbol.asyncIterator], i;
14
+ return m ? m.call(o) : (o = typeof __values === "function" ? __values(o) : o[Symbol.iterator](), i = {}, verb("next"), verb("throw"), verb("return"), i[Symbol.asyncIterator] = function () { return this; }, i);
15
+ function verb(n) { i[n] = o[n] && function (v) { return new Promise(function (resolve, reject) { v = o[n](v), settle(resolve, reject, v.done, v.value); }); }; }
16
+ function settle(resolve, reject, d, v) { Promise.resolve(v).then(function(v) { resolve({ value: v, done: d }); }, reject); }
17
+ };
18
+ var __importDefault = (this && this.__importDefault) || function (mod) {
19
+ return (mod && mod.__esModule) ? mod : { "default": mod };
20
+ };
21
+ Object.defineProperty(exports, "__esModule", { value: true });
22
+ exports.ChatGPTLLM = void 0;
23
+ const openai_1 = __importDefault(require("openai"));
24
+ const llm_runner_1 = require("../base/llm-runner");
25
+ /**
26
+ * Chat GPT Implementation
27
+ */
28
+ class ChatGPTLLM extends llm_runner_1.LLMRunner.BaseLLM {
29
+ constructor(api_key, chat_session_manager) {
30
+ super(chat_session_manager !== null && chat_session_manager !== void 0 ? chat_session_manager : new llm_runner_1.LLMRunner.SessionManager());
31
+ this.cgpt = new openai_1.default({ apiKey: api_key });
32
+ }
33
+ streamChat(messages, id_session, stream, ac) {
34
+ return __awaiter(this, void 0, void 0, function* () {
35
+ var _a, e_1, _b, _c;
36
+ var _d, _e;
37
+ const chat_history = id_session ? yield this.chat_session_manager.retrieveHistory(id_session) : [];
38
+ const chat_messages = [
39
+ ...chat_history.map((msg) => { var _a; return ({ role: msg.role, content: (_a = msg.content) !== null && _a !== void 0 ? _a : '' }); }),
40
+ ...messages.map(content => ({ role: 'user', content }))
41
+ ];
42
+ if (ac.signal.aborted) {
43
+ stream.push(null);
44
+ return;
45
+ }
46
+ const cgpt_stream = yield this.cgpt.chat.completions.create({
47
+ model: "gpt-4o-mini",
48
+ store: false,
49
+ stream: true,
50
+ n: 1,
51
+ messages: chat_messages
52
+ });
53
+ ac.signal.addEventListener('abort', () => cgpt_stream.controller.abort());
54
+ try {
55
+ for (var _f = true, cgpt_stream_1 = __asyncValues(cgpt_stream), cgpt_stream_1_1; cgpt_stream_1_1 = yield cgpt_stream_1.next(), _a = cgpt_stream_1_1.done, !_a; _f = true) {
56
+ _c = cgpt_stream_1_1.value;
57
+ _f = false;
58
+ const chunk = _c;
59
+ const c = chunk;
60
+ const first_choice = (_d = c.choices) === null || _d === void 0 ? void 0 : _d[0];
61
+ const delta = (_e = first_choice.delta.content) !== null && _e !== void 0 ? _e : '';
62
+ if (!delta) {
63
+ continue;
64
+ }
65
+ stream.push(delta);
66
+ }
67
+ }
68
+ catch (e_1_1) { e_1 = { error: e_1_1 }; }
69
+ finally {
70
+ try {
71
+ if (!_f && !_a && (_b = cgpt_stream_1.return)) yield _b.call(cgpt_stream_1);
72
+ }
73
+ finally { if (e_1) throw e_1.error; }
74
+ }
75
+ stream.push(null);
76
+ });
77
+ }
78
+ chat(messages, id_session) {
79
+ return __awaiter(this, void 0, void 0, function* () {
80
+ var _a, _b;
81
+ const chat_history = id_session ? yield this.chat_session_manager.retrieveHistory(id_session) : [];
82
+ const chat_messages = [
83
+ ...chat_history.map((msg) => { var _a; return ({ role: msg.role, content: (_a = msg.content) !== null && _a !== void 0 ? _a : '' }); }),
84
+ ...messages.map(content => ({ role: 'user', content }))
85
+ ];
86
+ const res = yield this.cgpt.chat.completions.create({
87
+ model: "gpt-4o-mini",
88
+ store: false,
89
+ n: 1,
90
+ messages: chat_messages
91
+ });
92
+ return (_b = (_a = res.choices) === null || _a === void 0 ? void 0 : _a[0].message.content) !== null && _b !== void 0 ? _b : '';
93
+ });
94
+ }
95
+ }
96
+ exports.ChatGPTLLM = ChatGPTLLM;
@@ -0,0 +1,13 @@
1
+ import { LLMRunner } from "../base/llm-runner";
2
+ import { GenericLLM } from "../base/generic-llm";
3
+ import { Readable } from 'node:stream';
4
+ /**
5
+ * Ollama Implementation
6
+ */
7
+ export declare class OllamaLLM extends LLMRunner.BaseLLM {
8
+ private ollama;
9
+ private model;
10
+ constructor(host: string, model: string, chat_session_manager?: GenericLLM.ChatSessionManager<LLMRunner.ChatSession, LLMRunner.Message>);
11
+ protected streamChat(messages: string[], id_session: string | null, stream: Readable, ac: AbortController): Promise<void>;
12
+ protected chat(messages: string[], id_session: string | null): Promise<string>;
13
+ }
@@ -0,0 +1,79 @@
1
+ "use strict";
2
+ var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
3
+ function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
4
+ return new (P || (P = Promise))(function (resolve, reject) {
5
+ function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
6
+ function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
7
+ function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
8
+ step((generator = generator.apply(thisArg, _arguments || [])).next());
9
+ });
10
+ };
11
+ var __asyncValues = (this && this.__asyncValues) || function (o) {
12
+ if (!Symbol.asyncIterator) throw new TypeError("Symbol.asyncIterator is not defined.");
13
+ var m = o[Symbol.asyncIterator], i;
14
+ return m ? m.call(o) : (o = typeof __values === "function" ? __values(o) : o[Symbol.iterator](), i = {}, verb("next"), verb("throw"), verb("return"), i[Symbol.asyncIterator] = function () { return this; }, i);
15
+ function verb(n) { i[n] = o[n] && function (v) { return new Promise(function (resolve, reject) { v = o[n](v), settle(resolve, reject, v.done, v.value); }); }; }
16
+ function settle(resolve, reject, d, v) { Promise.resolve(v).then(function(v) { resolve({ value: v, done: d }); }, reject); }
17
+ };
18
+ Object.defineProperty(exports, "__esModule", { value: true });
19
+ exports.OllamaLLM = void 0;
20
+ const ollama_1 = require("ollama");
21
+ const llm_runner_1 = require("../base/llm-runner");
22
+ /**
23
+ * Ollama Implementation
24
+ */
25
+ class OllamaLLM extends llm_runner_1.LLMRunner.BaseLLM {
26
+ constructor(host, model, chat_session_manager) {
27
+ super(chat_session_manager !== null && chat_session_manager !== void 0 ? chat_session_manager : new llm_runner_1.LLMRunner.SessionManager());
28
+ this.ollama = new ollama_1.Ollama({ host });
29
+ this.model = model;
30
+ }
31
+ streamChat(messages, id_session, stream, ac) {
32
+ return __awaiter(this, void 0, void 0, function* () {
33
+ var _a, e_1, _b, _c;
34
+ var _d;
35
+ const chat_history = id_session ? yield this.chat_session_manager.retrieveHistory(id_session) : [];
36
+ const cgpt_stream = yield this.ollama.chat({
37
+ model: this.model,
38
+ messages: [
39
+ ...chat_history,
40
+ ...messages.map(content => ({
41
+ role: 'user', content
42
+ }))
43
+ ],
44
+ stream: true
45
+ });
46
+ ac.signal.addEventListener('abort', () => cgpt_stream.abort());
47
+ try {
48
+ for (var _e = true, cgpt_stream_1 = __asyncValues(cgpt_stream), cgpt_stream_1_1; cgpt_stream_1_1 = yield cgpt_stream_1.next(), _a = cgpt_stream_1_1.done, !_a; _e = true) {
49
+ _c = cgpt_stream_1_1.value;
50
+ _e = false;
51
+ const chunk = _c;
52
+ stream.push((_d = chunk.message.content) !== null && _d !== void 0 ? _d : '');
53
+ }
54
+ }
55
+ catch (e_1_1) { e_1 = { error: e_1_1 }; }
56
+ finally {
57
+ try {
58
+ if (!_e && !_a && (_b = cgpt_stream_1.return)) yield _b.call(cgpt_stream_1);
59
+ }
60
+ finally { if (e_1) throw e_1.error; }
61
+ }
62
+ stream.push(null);
63
+ });
64
+ }
65
+ chat(messages, id_session) {
66
+ return __awaiter(this, void 0, void 0, function* () {
67
+ const chat_history = id_session ? yield this.chat_session_manager.retrieveHistory(id_session) : [];
68
+ const res = yield this.ollama.chat({
69
+ model: this.model,
70
+ messages: [
71
+ ...chat_history,
72
+ ...messages.map(content => ({ role: 'user', content }))
73
+ ]
74
+ });
75
+ return res.message.content;
76
+ });
77
+ }
78
+ }
79
+ exports.OllamaLLM = OllamaLLM;
package/package.json ADDED
@@ -0,0 +1,30 @@
1
+ {
2
+ "name": "@graf-research/llm-runner",
3
+ "version": "0.0.1",
4
+ "main": "dist/index.js",
5
+ "scripts": {
6
+ "build": "rm -rf dist && tsc",
7
+ "start": "node dist"
8
+ },
9
+ "files": [
10
+ "dist",
11
+ "package.json"
12
+ ],
13
+ "keywords": [],
14
+ "author": "",
15
+ "license": "ISC",
16
+ "description": "",
17
+ "devDependencies": {
18
+ "@types/compression": "^1.7.5",
19
+ "@types/express": "^5.0.0",
20
+ "@types/lodash": "^4.17.15",
21
+ "@types/pg": "^8.11.11",
22
+ "typescript": "^5.7.3"
23
+ },
24
+ "dependencies": {
25
+ "lodash": "^4.17.21",
26
+ "ollama": "^0.5.13",
27
+ "openai": "^4.85.3",
28
+ "uuid": "^11.1.0"
29
+ }
30
+ }