@node-llm/orm 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +64 -0
- package/README.md +353 -0
- package/bin/cli.js +157 -0
- package/package.json +72 -0
- package/schema.prisma +84 -0
- package/src/BaseChat.ts +168 -0
- package/src/adapters/prisma/Chat.ts +333 -0
- package/src/adapters/prisma/index.ts +27 -0
- package/src/index.ts +40 -0
- package/test/Chat.test.ts +543 -0
- package/tsconfig.json +14 -0
- package/vitest.config.ts +13 -0
package/schema.prisma
ADDED
|
@@ -0,0 +1,84 @@
|
|
|
1
|
+
// This is the reference Prisma schema for @node-llm/orm
|
|
2
|
+
// Users should copy this into their own prisma/schema.prisma
|
|
3
|
+
|
|
4
|
+
generator client {
|
|
5
|
+
provider = "prisma-client-js"
|
|
6
|
+
}
|
|
7
|
+
|
|
8
|
+
datasource db {
|
|
9
|
+
provider = "postgresql"
|
|
10
|
+
url = env("DATABASE_URL")
|
|
11
|
+
}
|
|
12
|
+
|
|
13
|
+
// NodeLLM ORM Models (matches @node-llm/orm schema)
|
|
14
|
+
model LlmChat {
|
|
15
|
+
id String @id @default(uuid())
|
|
16
|
+
model String?
|
|
17
|
+
provider String?
|
|
18
|
+
instructions String? // System instructions
|
|
19
|
+
metadata Json? // JSON metadata
|
|
20
|
+
createdAt DateTime @default(now())
|
|
21
|
+
updatedAt DateTime @updatedAt
|
|
22
|
+
messages LlmMessage[]
|
|
23
|
+
requests LlmRequest[]
|
|
24
|
+
}
|
|
25
|
+
|
|
26
|
+
model LlmMessage {
|
|
27
|
+
id String @id @default(uuid())
|
|
28
|
+
chatId String
|
|
29
|
+
role String // user, assistant, system, tool
|
|
30
|
+
content String?
|
|
31
|
+
contentRaw String? // JSON raw payload
|
|
32
|
+
reasoning String? // Chain of thought
|
|
33
|
+
inputTokens Int?
|
|
34
|
+
outputTokens Int?
|
|
35
|
+
modelId String?
|
|
36
|
+
provider String?
|
|
37
|
+
createdAt DateTime @default(now())
|
|
38
|
+
|
|
39
|
+
chat LlmChat @relation(fields: [chatId], references: [id], onDelete: Cascade)
|
|
40
|
+
toolCalls LlmToolCall[]
|
|
41
|
+
requests LlmRequest[]
|
|
42
|
+
|
|
43
|
+
@@index([chatId])
|
|
44
|
+
@@index([createdAt])
|
|
45
|
+
}
|
|
46
|
+
|
|
47
|
+
model LlmToolCall {
|
|
48
|
+
id String @id @default(uuid())
|
|
49
|
+
messageId String
|
|
50
|
+
toolCallId String // ID from the provider
|
|
51
|
+
name String
|
|
52
|
+
arguments String // JSON string
|
|
53
|
+
thought String? // The LLM's reasoning for this tool call
|
|
54
|
+
result String? // Tool execution result
|
|
55
|
+
createdAt DateTime @default(now())
|
|
56
|
+
|
|
57
|
+
message LlmMessage @relation(fields: [messageId], references: [id], onDelete: Cascade)
|
|
58
|
+
|
|
59
|
+
@@unique([messageId, toolCallId])
|
|
60
|
+
@@index([messageId])
|
|
61
|
+
@@index([createdAt])
|
|
62
|
+
}
|
|
63
|
+
|
|
64
|
+
model LlmRequest {
|
|
65
|
+
id String @id @default(uuid())
|
|
66
|
+
chatId String
|
|
67
|
+
messageId String? // Optional because requests might fail before message creation
|
|
68
|
+
|
|
69
|
+
provider String
|
|
70
|
+
model String
|
|
71
|
+
statusCode Int
|
|
72
|
+
duration Int // milliseconds
|
|
73
|
+
inputTokens Int
|
|
74
|
+
outputTokens Int
|
|
75
|
+
cost Float?
|
|
76
|
+
|
|
77
|
+
createdAt DateTime @default(now())
|
|
78
|
+
|
|
79
|
+
chat LlmChat @relation(fields: [chatId], references: [id], onDelete: Cascade)
|
|
80
|
+
message LlmMessage? @relation(fields: [messageId], references: [id], onDelete: Cascade)
|
|
81
|
+
|
|
82
|
+
@@index([chatId])
|
|
83
|
+
@@index([createdAt])
|
|
84
|
+
}
|
package/src/BaseChat.ts
ADDED
|
@@ -0,0 +1,168 @@
|
|
|
1
|
+
/* eslint-disable @typescript-eslint/no-explicit-any */
|
|
2
|
+
|
|
3
|
+
export interface ChatRecord {
|
|
4
|
+
id: string;
|
|
5
|
+
model?: string | null;
|
|
6
|
+
provider?: string | null;
|
|
7
|
+
instructions?: string | null;
|
|
8
|
+
metadata?: string | null;
|
|
9
|
+
createdAt: Date;
|
|
10
|
+
updatedAt: Date;
|
|
11
|
+
}
|
|
12
|
+
|
|
13
|
+
export interface ChatOptions {
|
|
14
|
+
model?: string;
|
|
15
|
+
provider?: string;
|
|
16
|
+
instructions?: string;
|
|
17
|
+
metadata?: Record<string, any>;
|
|
18
|
+
debug?: boolean;
|
|
19
|
+
persistence?: {
|
|
20
|
+
toolCalls?: boolean; // Default: true
|
|
21
|
+
requests?: boolean; // Default: true
|
|
22
|
+
};
|
|
23
|
+
}
|
|
24
|
+
|
|
25
|
+
export interface UserHooks {
|
|
26
|
+
onToolCallStart: ((call: any) => void | Promise<void>)[];
|
|
27
|
+
onToolCallEnd: ((call: any, result: any) => void | Promise<void>)[];
|
|
28
|
+
afterResponse: ((resp: any) => any | Promise<any>)[];
|
|
29
|
+
onNewMessage: (() => void | Promise<void>)[];
|
|
30
|
+
onEndMessage: ((message: any) => void | Promise<void>)[];
|
|
31
|
+
onToolCallError: ((call: any, error: Error) => any | Promise<any>)[];
|
|
32
|
+
onConfirmToolCall: ((call: any) => boolean | Promise<boolean>)[];
|
|
33
|
+
onBeforeRequest: ((messages: any[]) => any | Promise<any>)[];
|
|
34
|
+
}
|
|
35
|
+
|
|
36
|
+
/**
|
|
37
|
+
* BaseChat - A generic base class for ORM chat implementations.
|
|
38
|
+
*/
|
|
39
|
+
export abstract class BaseChat<
|
|
40
|
+
R extends ChatRecord = ChatRecord,
|
|
41
|
+
O extends ChatOptions = ChatOptions
|
|
42
|
+
> {
|
|
43
|
+
public id: string;
|
|
44
|
+
protected localOptions: any = {};
|
|
45
|
+
protected customTools: any[] = [];
|
|
46
|
+
protected userHooks: UserHooks = {
|
|
47
|
+
onToolCallStart: [],
|
|
48
|
+
onToolCallEnd: [],
|
|
49
|
+
afterResponse: [],
|
|
50
|
+
onNewMessage: [],
|
|
51
|
+
onEndMessage: [],
|
|
52
|
+
onToolCallError: [],
|
|
53
|
+
onConfirmToolCall: [],
|
|
54
|
+
onBeforeRequest: []
|
|
55
|
+
};
|
|
56
|
+
|
|
57
|
+
constructor(
|
|
58
|
+
public record: R,
|
|
59
|
+
public options: O = {} as O
|
|
60
|
+
) {
|
|
61
|
+
this.id = record.id;
|
|
62
|
+
|
|
63
|
+
// Initialize local options from record/options
|
|
64
|
+
this.localOptions.instructions = options.instructions || record.instructions;
|
|
65
|
+
this.localOptions.model = options.model || record.model;
|
|
66
|
+
this.localOptions.provider = options.provider || record.provider;
|
|
67
|
+
}
|
|
68
|
+
|
|
69
|
+
protected log(...args: any[]) {
|
|
70
|
+
if (this.options?.debug) {
|
|
71
|
+
console.log(`[@node-llm/orm]`, ...args);
|
|
72
|
+
}
|
|
73
|
+
}
|
|
74
|
+
|
|
75
|
+
// --- Fluent Configuration Methods ---
|
|
76
|
+
|
|
77
|
+
withInstructions(instruction: string, options?: { replace?: boolean }): this {
|
|
78
|
+
if (options?.replace) {
|
|
79
|
+
this.localOptions.instructions = instruction;
|
|
80
|
+
} else {
|
|
81
|
+
this.localOptions.instructions = (this.localOptions.instructions || "") + "\n" + instruction;
|
|
82
|
+
}
|
|
83
|
+
return this;
|
|
84
|
+
}
|
|
85
|
+
|
|
86
|
+
system(instruction: string, options?: { replace?: boolean }): this {
|
|
87
|
+
return this.withInstructions(instruction, options);
|
|
88
|
+
}
|
|
89
|
+
|
|
90
|
+
withTemperature(temp: number): this {
|
|
91
|
+
this.localOptions.temperature = temp;
|
|
92
|
+
return this;
|
|
93
|
+
}
|
|
94
|
+
|
|
95
|
+
withModel(model: string): this {
|
|
96
|
+
this.localOptions.model = model;
|
|
97
|
+
return this;
|
|
98
|
+
}
|
|
99
|
+
|
|
100
|
+
withProvider(provider: string): this {
|
|
101
|
+
this.localOptions.provider = provider;
|
|
102
|
+
return this;
|
|
103
|
+
}
|
|
104
|
+
|
|
105
|
+
withTools(tools: any[]): this {
|
|
106
|
+
this.customTools.push(...tools);
|
|
107
|
+
return this;
|
|
108
|
+
}
|
|
109
|
+
|
|
110
|
+
withTool(tool: any): this {
|
|
111
|
+
this.customTools.push(tool);
|
|
112
|
+
return this;
|
|
113
|
+
}
|
|
114
|
+
|
|
115
|
+
use(tool: any): this {
|
|
116
|
+
return this.withTool(tool);
|
|
117
|
+
}
|
|
118
|
+
|
|
119
|
+
withSchema(schema: any): this {
|
|
120
|
+
this.localOptions.schema = schema;
|
|
121
|
+
return this;
|
|
122
|
+
}
|
|
123
|
+
|
|
124
|
+
withParams(params: Record<string, any>): this {
|
|
125
|
+
this.localOptions.params = { ...(this.localOptions.params || {}), ...params };
|
|
126
|
+
return this;
|
|
127
|
+
}
|
|
128
|
+
|
|
129
|
+
// --- Hook Registration ---
|
|
130
|
+
|
|
131
|
+
onToolCallStart(callback: (call: any) => void | Promise<void>): this {
|
|
132
|
+
this.userHooks.onToolCallStart.push(callback);
|
|
133
|
+
return this;
|
|
134
|
+
}
|
|
135
|
+
|
|
136
|
+
onToolCall(callback: (call: any) => void | Promise<void>): this {
|
|
137
|
+
return this.onToolCallStart(callback);
|
|
138
|
+
}
|
|
139
|
+
|
|
140
|
+
onToolCallEnd(callback: (call: any, result: any) => void | Promise<void>): this {
|
|
141
|
+
this.userHooks.onToolCallEnd.push(callback);
|
|
142
|
+
return this;
|
|
143
|
+
}
|
|
144
|
+
|
|
145
|
+
onToolResult(callback: (result: any) => void | Promise<void>): this {
|
|
146
|
+
return this.onToolCallEnd((_call, result) => callback(result));
|
|
147
|
+
}
|
|
148
|
+
|
|
149
|
+
afterResponse(callback: (resp: any) => any | Promise<any>): this {
|
|
150
|
+
this.userHooks.afterResponse.push(callback);
|
|
151
|
+
return this;
|
|
152
|
+
}
|
|
153
|
+
|
|
154
|
+
onBeforeRequest(callback: (messages: any[]) => any | Promise<any>): this {
|
|
155
|
+
this.userHooks.onBeforeRequest.push(callback);
|
|
156
|
+
return this;
|
|
157
|
+
}
|
|
158
|
+
|
|
159
|
+
onNewMessage(callback: () => void | Promise<void>): this {
|
|
160
|
+
this.userHooks.onNewMessage.push(callback);
|
|
161
|
+
return this;
|
|
162
|
+
}
|
|
163
|
+
|
|
164
|
+
onEndMessage(callback: (message: any) => void | Promise<void>): this {
|
|
165
|
+
this.userHooks.onEndMessage.push(callback);
|
|
166
|
+
return this;
|
|
167
|
+
}
|
|
168
|
+
}
|
|
@@ -0,0 +1,333 @@
|
|
|
1
|
+
/* eslint-disable @typescript-eslint/no-explicit-any */
|
|
2
|
+
import type { PrismaClient } from "@prisma/client";
|
|
3
|
+
import type { NodeLLMCore } from "@node-llm/core";
|
|
4
|
+
import { BaseChat, type ChatRecord, type ChatOptions } from "../../BaseChat.js";
|
|
5
|
+
|
|
6
|
+
export { type ChatRecord, type ChatOptions };
|
|
7
|
+
|
|
8
|
+
export interface MessageRecord {
|
|
9
|
+
id: string;
|
|
10
|
+
chatId: string;
|
|
11
|
+
role: string;
|
|
12
|
+
content: string | null;
|
|
13
|
+
contentRaw: string | null;
|
|
14
|
+
reasoning: string | null;
|
|
15
|
+
inputTokens: number | null;
|
|
16
|
+
outputTokens: number | null;
|
|
17
|
+
modelId: string | null;
|
|
18
|
+
provider: string | null;
|
|
19
|
+
createdAt: Date;
|
|
20
|
+
}
|
|
21
|
+
|
|
22
|
+
export interface TableNames {
|
|
23
|
+
chat?: string;
|
|
24
|
+
message?: string;
|
|
25
|
+
toolCall?: string;
|
|
26
|
+
request?: string;
|
|
27
|
+
}
|
|
28
|
+
|
|
29
|
+
/**
|
|
30
|
+
* Prisma-based Chat Implementation.
|
|
31
|
+
*/
|
|
32
|
+
export class Chat extends BaseChat {
|
|
33
|
+
private tables: Required<TableNames>;
|
|
34
|
+
private persistenceConfig: Required<NonNullable<ChatOptions["persistence"]>>;
|
|
35
|
+
|
|
36
|
+
constructor(
|
|
37
|
+
private prisma: PrismaClient,
|
|
38
|
+
private llm: NodeLLMCore,
|
|
39
|
+
record: ChatRecord,
|
|
40
|
+
options: ChatOptions = {},
|
|
41
|
+
tableNames: TableNames = {}
|
|
42
|
+
) {
|
|
43
|
+
super(record, options);
|
|
44
|
+
this.tables = {
|
|
45
|
+
chat: tableNames.chat || "chat",
|
|
46
|
+
message: tableNames.message || "message",
|
|
47
|
+
toolCall: tableNames.toolCall || "toolCall",
|
|
48
|
+
request: tableNames.request || "assistantRequest"
|
|
49
|
+
};
|
|
50
|
+
|
|
51
|
+
this.persistenceConfig = {
|
|
52
|
+
toolCalls: options.persistence?.toolCalls ?? true,
|
|
53
|
+
requests: options.persistence?.requests ?? true
|
|
54
|
+
};
|
|
55
|
+
}
|
|
56
|
+
|
|
57
|
+
/**
|
|
58
|
+
* Internal prep for core Chat instance with persistence hooks.
|
|
59
|
+
*/
|
|
60
|
+
private async prepareCoreChat(history: any[] = [], assistantMessageId: string) {
|
|
61
|
+
const provider = this.localOptions.provider || this.record.provider;
|
|
62
|
+
const model = this.localOptions.model || this.record.model;
|
|
63
|
+
|
|
64
|
+
const llmInstance = provider ? this.llm.withProvider(provider as string) : this.llm;
|
|
65
|
+
|
|
66
|
+
const coreChat = llmInstance.chat(model || undefined, {
|
|
67
|
+
messages: history,
|
|
68
|
+
...this.localOptions
|
|
69
|
+
}) as any;
|
|
70
|
+
|
|
71
|
+
// Register tools
|
|
72
|
+
if (this.customTools.length > 0) {
|
|
73
|
+
coreChat.withTools(this.customTools);
|
|
74
|
+
}
|
|
75
|
+
|
|
76
|
+
// --- Persistence Hooks ---
|
|
77
|
+
|
|
78
|
+
coreChat.onToolCallStart(async (call: any) => {
|
|
79
|
+
// Only persist if toolCalls persistence is enabled
|
|
80
|
+
if (this.persistenceConfig.toolCalls) {
|
|
81
|
+
const toolCallModel = this.tables.toolCall;
|
|
82
|
+
await (this.prisma as any)[toolCallModel].create({
|
|
83
|
+
data: {
|
|
84
|
+
messageId: assistantMessageId,
|
|
85
|
+
toolCallId: call.id,
|
|
86
|
+
name: call.function?.name || "unknown",
|
|
87
|
+
arguments: JSON.stringify(call.function?.arguments || {}),
|
|
88
|
+
thought: (call as any).thought || null
|
|
89
|
+
}
|
|
90
|
+
});
|
|
91
|
+
}
|
|
92
|
+
|
|
93
|
+
// User hooks
|
|
94
|
+
for (const h of this.userHooks.onToolCallStart) await h(call);
|
|
95
|
+
});
|
|
96
|
+
|
|
97
|
+
coreChat.onToolCallEnd(async (call: any, result: any) => {
|
|
98
|
+
// Only persist if toolCalls persistence is enabled
|
|
99
|
+
if (this.persistenceConfig.toolCalls) {
|
|
100
|
+
const toolCallModel = this.tables.toolCall;
|
|
101
|
+
const resString = typeof result === "string" ? result : JSON.stringify(result);
|
|
102
|
+
|
|
103
|
+
await (this.prisma as any)[toolCallModel].update({
|
|
104
|
+
where: { messageId_toolCallId: { messageId: assistantMessageId, toolCallId: call.id } },
|
|
105
|
+
data: {
|
|
106
|
+
result: resString,
|
|
107
|
+
thought: (call as any).thought || null
|
|
108
|
+
}
|
|
109
|
+
});
|
|
110
|
+
}
|
|
111
|
+
|
|
112
|
+
// User hooks
|
|
113
|
+
for (const h of this.userHooks.onToolCallEnd) await h(call, result);
|
|
114
|
+
});
|
|
115
|
+
|
|
116
|
+
coreChat.afterResponse(async (finalResp: any) => {
|
|
117
|
+
this.log(
|
|
118
|
+
`Internal afterResponse triggered. Calling ${this.userHooks.afterResponse.length} user hooks.`
|
|
119
|
+
);
|
|
120
|
+
|
|
121
|
+
// User hooks
|
|
122
|
+
for (const h of this.userHooks.afterResponse) {
|
|
123
|
+
const modified = await h(finalResp);
|
|
124
|
+
if (modified) finalResp = modified;
|
|
125
|
+
}
|
|
126
|
+
|
|
127
|
+
// Only persist if requests persistence is enabled
|
|
128
|
+
if (this.persistenceConfig.requests) {
|
|
129
|
+
const modelName = this.tables.request;
|
|
130
|
+
await (this.prisma as any)[modelName].create({
|
|
131
|
+
data: {
|
|
132
|
+
chatId: this.id,
|
|
133
|
+
messageId: assistantMessageId,
|
|
134
|
+
provider: finalResp.provider || provider || "unknown",
|
|
135
|
+
model: finalResp.model || model || "unknown",
|
|
136
|
+
statusCode: 200,
|
|
137
|
+
duration: finalResp.latency || 0,
|
|
138
|
+
inputTokens: finalResp.usage?.input_tokens || 0,
|
|
139
|
+
outputTokens: finalResp.usage?.output_tokens || 0,
|
|
140
|
+
cost: finalResp.usage?.cost || 0
|
|
141
|
+
}
|
|
142
|
+
});
|
|
143
|
+
}
|
|
144
|
+
|
|
145
|
+
return finalResp;
|
|
146
|
+
});
|
|
147
|
+
|
|
148
|
+
// Other core hooks
|
|
149
|
+
if (this.userHooks.onNewMessage.length > 0) {
|
|
150
|
+
coreChat.onNewMessage(async () => {
|
|
151
|
+
for (const h of this.userHooks.onNewMessage) await h();
|
|
152
|
+
});
|
|
153
|
+
}
|
|
154
|
+
|
|
155
|
+
if (this.userHooks.onEndMessage.length > 0) {
|
|
156
|
+
coreChat.onEndMessage(async (msg: any) => {
|
|
157
|
+
for (const h of this.userHooks.onEndMessage) await h(msg);
|
|
158
|
+
});
|
|
159
|
+
}
|
|
160
|
+
|
|
161
|
+
if (this.userHooks.onBeforeRequest.length > 0) {
|
|
162
|
+
coreChat.beforeRequest(async (msgs: any) => {
|
|
163
|
+
let current = msgs;
|
|
164
|
+
for (const h of this.userHooks.onBeforeRequest) {
|
|
165
|
+
const mod = await h(current);
|
|
166
|
+
if (mod) current = mod;
|
|
167
|
+
}
|
|
168
|
+
return current;
|
|
169
|
+
});
|
|
170
|
+
}
|
|
171
|
+
|
|
172
|
+
return coreChat;
|
|
173
|
+
}
|
|
174
|
+
|
|
175
|
+
/**
|
|
176
|
+
* Send a message and persist the conversation.
|
|
177
|
+
*/
|
|
178
|
+
async ask(input: string): Promise<MessageRecord> {
|
|
179
|
+
const messageModel = this.tables.message;
|
|
180
|
+
const userMessage = await (this.prisma as any)[messageModel].create({
|
|
181
|
+
data: { chatId: this.id, role: "user", content: input }
|
|
182
|
+
});
|
|
183
|
+
|
|
184
|
+
const assistantMessage = await (this.prisma as any)[messageModel].create({
|
|
185
|
+
data: { chatId: this.id, role: "assistant", content: null }
|
|
186
|
+
});
|
|
187
|
+
|
|
188
|
+
try {
|
|
189
|
+
const historyRecords = await (this.prisma as any)[messageModel].findMany({
|
|
190
|
+
where: { chatId: this.id, id: { notIn: [userMessage!.id, assistantMessage!.id] } },
|
|
191
|
+
orderBy: { createdAt: "asc" }
|
|
192
|
+
});
|
|
193
|
+
|
|
194
|
+
const history = historyRecords.map((m: any) => ({
|
|
195
|
+
role: m.role,
|
|
196
|
+
content: m.content || ""
|
|
197
|
+
}));
|
|
198
|
+
|
|
199
|
+
const coreChat = await this.prepareCoreChat(history, assistantMessage!.id);
|
|
200
|
+
const response = await coreChat.ask(input);
|
|
201
|
+
|
|
202
|
+
return await (this.prisma as any)[messageModel].update({
|
|
203
|
+
where: { id: assistantMessage!.id },
|
|
204
|
+
data: {
|
|
205
|
+
content: response.content,
|
|
206
|
+
contentRaw: JSON.stringify(response.meta),
|
|
207
|
+
inputTokens: response.usage?.input_tokens || 0,
|
|
208
|
+
outputTokens: response.usage?.output_tokens || 0,
|
|
209
|
+
modelId: response.model || null,
|
|
210
|
+
provider: response.provider || null
|
|
211
|
+
}
|
|
212
|
+
});
|
|
213
|
+
} catch (error) {
|
|
214
|
+
await (this.prisma as any)[messageModel].delete({ where: { id: assistantMessage!.id } });
|
|
215
|
+
// await (this.prisma as any)[messageModel].delete({ where: { id: userMessage!.id } });
|
|
216
|
+
throw error;
|
|
217
|
+
}
|
|
218
|
+
}
|
|
219
|
+
|
|
220
|
+
/**
|
|
221
|
+
* Stream a response and persist the conversation.
|
|
222
|
+
*/
|
|
223
|
+
async *askStream(input: string): AsyncGenerator<string, MessageRecord, undefined> {
|
|
224
|
+
const messageModel = this.tables.message;
|
|
225
|
+
const userMessage = await (this.prisma as any)[messageModel].create({
|
|
226
|
+
data: { chatId: this.id, role: "user", content: input }
|
|
227
|
+
});
|
|
228
|
+
|
|
229
|
+
const assistantMessage = await (this.prisma as any)[messageModel].create({
|
|
230
|
+
data: { chatId: this.id, role: "assistant", content: null }
|
|
231
|
+
});
|
|
232
|
+
|
|
233
|
+
try {
|
|
234
|
+
const historyRecords = await (this.prisma as any)[messageModel].findMany({
|
|
235
|
+
where: { chatId: this.id, id: { notIn: [userMessage!.id, assistantMessage!.id] } },
|
|
236
|
+
orderBy: { createdAt: "asc" }
|
|
237
|
+
});
|
|
238
|
+
|
|
239
|
+
const history = historyRecords.map((m: any) => ({
|
|
240
|
+
role: m.role,
|
|
241
|
+
content: m.content || ""
|
|
242
|
+
}));
|
|
243
|
+
|
|
244
|
+
const coreChat = await this.prepareCoreChat(history, assistantMessage!.id);
|
|
245
|
+
const stream = coreChat.stream(input);
|
|
246
|
+
|
|
247
|
+
let fullContent = "";
|
|
248
|
+
let metadata: any = {};
|
|
249
|
+
|
|
250
|
+
for await (const chunk of stream) {
|
|
251
|
+
if (chunk.content) {
|
|
252
|
+
fullContent += chunk.content;
|
|
253
|
+
yield chunk.content;
|
|
254
|
+
}
|
|
255
|
+
if (chunk.usage) {
|
|
256
|
+
metadata = { ...metadata, ...chunk.usage };
|
|
257
|
+
}
|
|
258
|
+
}
|
|
259
|
+
|
|
260
|
+
return await (this.prisma as any)[messageModel].update({
|
|
261
|
+
where: { id: assistantMessage!.id },
|
|
262
|
+
data: {
|
|
263
|
+
content: fullContent,
|
|
264
|
+
contentRaw: JSON.stringify(metadata),
|
|
265
|
+
inputTokens: metadata.input_tokens || 0,
|
|
266
|
+
outputTokens: metadata.output_tokens || 0,
|
|
267
|
+
modelId: coreChat.model || null,
|
|
268
|
+
provider: coreChat.provider?.id || null
|
|
269
|
+
}
|
|
270
|
+
});
|
|
271
|
+
} catch (error) {
|
|
272
|
+
await (this.prisma as any)[messageModel].delete({ where: { id: assistantMessage!.id } });
|
|
273
|
+
// await (this.prisma as any)[messageModel].delete({ where: { id: userMessage!.id } });
|
|
274
|
+
throw error;
|
|
275
|
+
}
|
|
276
|
+
}
|
|
277
|
+
|
|
278
|
+
/**
|
|
279
|
+
* Get all messages for this chat.
|
|
280
|
+
*/
|
|
281
|
+
async messages(): Promise<MessageRecord[]> {
|
|
282
|
+
const messageModel = this.tables.message;
|
|
283
|
+
return await (this.prisma as any)[messageModel].findMany({
|
|
284
|
+
where: { chatId: this.id },
|
|
285
|
+
orderBy: { createdAt: "asc" }
|
|
286
|
+
});
|
|
287
|
+
}
|
|
288
|
+
}
|
|
289
|
+
|
|
290
|
+
/**
|
|
291
|
+
* Convenience method to create a new chat session.
|
|
292
|
+
*/
|
|
293
|
+
export async function createChat<T = Record<string, any>>(
|
|
294
|
+
prisma: PrismaClient,
|
|
295
|
+
llm: NodeLLMCore,
|
|
296
|
+
options: ChatOptions & { tableNames?: TableNames } & T = {} as any
|
|
297
|
+
): Promise<Chat> {
|
|
298
|
+
const chatTable = options.tableNames?.chat || "chat";
|
|
299
|
+
|
|
300
|
+
// Extract known options so we don't double-pass them or pass them incorrectly
|
|
301
|
+
const { model, provider, instructions, metadata, tableNames, debug, persistence, ...extras } =
|
|
302
|
+
options;
|
|
303
|
+
|
|
304
|
+
const record = await (prisma as any)[chatTable].create({
|
|
305
|
+
data: {
|
|
306
|
+
model,
|
|
307
|
+
provider,
|
|
308
|
+
instructions,
|
|
309
|
+
metadata: metadata ?? null,
|
|
310
|
+
...extras
|
|
311
|
+
}
|
|
312
|
+
});
|
|
313
|
+
|
|
314
|
+
return new Chat(prisma, llm, record, options, options.tableNames);
|
|
315
|
+
}
|
|
316
|
+
|
|
317
|
+
/**
|
|
318
|
+
* Convenience method to load an existing chat session.
|
|
319
|
+
*/
|
|
320
|
+
export async function loadChat(
|
|
321
|
+
prisma: PrismaClient,
|
|
322
|
+
llm: NodeLLMCore,
|
|
323
|
+
chatId: string,
|
|
324
|
+
options: ChatOptions & { tableNames?: TableNames } = {}
|
|
325
|
+
): Promise<Chat | null> {
|
|
326
|
+
const chatTable = options.tableNames?.chat || "chat";
|
|
327
|
+
const record = await (prisma as any)[chatTable].findUnique({
|
|
328
|
+
where: { id: chatId }
|
|
329
|
+
});
|
|
330
|
+
|
|
331
|
+
if (!record) return null;
|
|
332
|
+
return new Chat(prisma, llm, record, options, options.tableNames);
|
|
333
|
+
}
|
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* @node-llm/orm/prisma
|
|
3
|
+
*
|
|
4
|
+
* Prisma adapter for NodeLLM ORM.
|
|
5
|
+
* Provides automatic persistence of chats, messages, tool calls, and API requests.
|
|
6
|
+
*
|
|
7
|
+
* @example
|
|
8
|
+
* ```typescript
|
|
9
|
+
* import { PrismaClient } from '@prisma/client';
|
|
10
|
+
* import { createLLM } from '@node-llm/core';
|
|
11
|
+
* import { createChat } from '@node-llm/orm/prisma';
|
|
12
|
+
*
|
|
13
|
+
* const prisma = new PrismaClient();
|
|
14
|
+
* const llm = createLLM({ provider: 'openai' });
|
|
15
|
+
*
|
|
16
|
+
* const chat = await createChat(prisma, llm, {
|
|
17
|
+
* model: 'gpt-4',
|
|
18
|
+
* instructions: 'You are a helpful assistant.'
|
|
19
|
+
* });
|
|
20
|
+
*
|
|
21
|
+
* const response = await chat.ask('Hello!');
|
|
22
|
+
* console.log(response.content);
|
|
23
|
+
* ```
|
|
24
|
+
*/
|
|
25
|
+
|
|
26
|
+
export { Chat, createChat, loadChat } from "./Chat.js";
|
|
27
|
+
export type { ChatRecord, MessageRecord, ChatOptions, TableNames } from "./Chat.js";
|
package/src/index.ts
ADDED
|
@@ -0,0 +1,40 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* @node-llm/orm
|
|
3
|
+
*
|
|
4
|
+
* Database persistence layer for NodeLLM.
|
|
5
|
+
* Automatically tracks chats, messages, tool calls, and API requests.
|
|
6
|
+
*
|
|
7
|
+
* ## Quick Start
|
|
8
|
+
*
|
|
9
|
+
* 1. Copy `schema.prisma` from this package into your project
|
|
10
|
+
* 2. Run `npx prisma migrate dev`
|
|
11
|
+
* 3. Use the ORM:
|
|
12
|
+
*
|
|
13
|
+
* ```typescript
|
|
14
|
+
* import { createChat } from '@node-llm/orm/prisma';
|
|
15
|
+
* import { prisma } from './db.js';
|
|
16
|
+
* import { llm } from './llm.js';
|
|
17
|
+
*
|
|
18
|
+
* const chat = await createChat(prisma, llm, {
|
|
19
|
+
* model: 'gpt-4',
|
|
20
|
+
* instructions: 'You are a helpful assistant.'
|
|
21
|
+
* });
|
|
22
|
+
*
|
|
23
|
+
* await chat.ask('Hello!');
|
|
24
|
+
* ```
|
|
25
|
+
*
|
|
26
|
+
* ## Adapters
|
|
27
|
+
*
|
|
28
|
+
* - `@node-llm/orm/prisma` - Prisma adapter (recommended)
|
|
29
|
+
*
|
|
30
|
+
* ## Schema
|
|
31
|
+
*
|
|
32
|
+
* The ORM tracks four core entities:
|
|
33
|
+
* - **Chat** - Session container (model, provider, instructions)
|
|
34
|
+
* - **Message** - User/Assistant conversation history
|
|
35
|
+
* - **ToolCall** - Tool executions (name, arguments, results)
|
|
36
|
+
* - **Request** - API call metrics (tokens, latency, cost)
|
|
37
|
+
*/
|
|
38
|
+
|
|
39
|
+
// Re-export Prisma adapter as default
|
|
40
|
+
export * from "./adapters/prisma/index.js";
|