@node-llm/orm 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,543 @@
1
+ /* eslint-disable @typescript-eslint/no-explicit-any */
2
+ import { describe, it, expect, vi, beforeEach } from "vitest";
3
+ import { createChat, loadChat } from "../src/adapters/prisma/Chat.js";
4
+
5
+ // Mock Prisma Client
6
+ const createMockPrisma = () => {
7
+ const messages: any[] = [];
8
+ const chats: any[] = [];
9
+ const toolCalls: any[] = [];
10
+ const requests: any[] = [];
11
+
12
+ const tables: any = {
13
+ chat: {
14
+ create: vi.fn(async ({ data }) => {
15
+ const chat = { id: "chat-123", ...data, createdAt: new Date(), updatedAt: new Date() };
16
+ chats.push(chat);
17
+ return chat;
18
+ }),
19
+ findUnique: vi.fn(async ({ where }) => {
20
+ return chats.find((c) => c.id === where.id) || null;
21
+ })
22
+ },
23
+ message: {
24
+ create: vi.fn(async ({ data }) => {
25
+ const message = { id: `msg-${messages.length}`, ...data, createdAt: new Date() };
26
+ messages.push(message);
27
+ return message;
28
+ }),
29
+ findMany: vi.fn(async ({ where, orderBy: _orderBy }) => {
30
+ let filtered = messages.filter((m) => m.chatId === where.chatId);
31
+ if (where.id?.notIn) {
32
+ filtered = filtered.filter((m) => !where.id.notIn.includes(m.id));
33
+ }
34
+ return filtered;
35
+ }),
36
+ update: vi.fn(async ({ where, data }) => {
37
+ const message = messages.find((m) => m.id === where.id);
38
+ if (message) {
39
+ Object.assign(message, data);
40
+ }
41
+ return message;
42
+ }),
43
+ delete: vi.fn(async ({ where }) => {
44
+ const index = messages.findIndex((m) => m.id === where.id);
45
+ if (index !== -1) {
46
+ messages.splice(index, 1);
47
+ }
48
+ })
49
+ },
50
+ toolCall: {
51
+ create: vi.fn(async ({ data }) => {
52
+ const toolCall = { id: `tool-${toolCalls.length}`, ...data, createdAt: new Date() };
53
+ toolCalls.push(toolCall);
54
+ return toolCall;
55
+ }),
56
+ update: vi.fn(async ({ where, data }) => {
57
+ const toolCall = toolCalls.find(
58
+ (tc) =>
59
+ tc.messageId === where.messageId_toolCallId.messageId &&
60
+ tc.toolCallId === where.messageId_toolCallId.toolCallId
61
+ );
62
+ if (toolCall) {
63
+ Object.assign(toolCall, data);
64
+ }
65
+ return toolCall;
66
+ })
67
+ },
68
+ request: {
69
+ create: vi.fn(async ({ data }) => {
70
+ const request = { id: `req-${requests.length}`, ...data, createdAt: new Date() };
71
+ requests.push(request);
72
+ return request;
73
+ })
74
+ },
75
+ _messages: messages,
76
+ _chats: chats,
77
+ _toolCalls: toolCalls,
78
+ _requests: requests
79
+ };
80
+
81
+ // Use Proxy to support any custom table name by mapping it to the base table mocks
82
+ return new Proxy(tables, {
83
+ get(target, prop) {
84
+ if (prop in target) return target[prop];
85
+ if (typeof prop === "string") {
86
+ if (prop.toLowerCase().includes("chat")) return target.chat;
87
+ if (prop.toLowerCase().includes("message")) return target.message;
88
+ if (prop.toLowerCase().includes("toolcall")) return target.toolCall;
89
+ if (prop.toLowerCase().includes("request")) return target.request;
90
+ }
91
+ return undefined;
92
+ }
93
+ });
94
+ };
95
+
96
+ // Mock NodeLLM
97
+ const createMockLLM = () => {
98
+ let capturedAfterResponseCallback: any = null;
99
+ let capturedToolCallStartCallback: any = null;
100
+ let capturedToolCallEndCallback: any = null;
101
+
102
+ const mockChat = {
103
+ system: vi.fn().mockReturnThis(),
104
+ withTools: vi.fn().mockReturnThis(),
105
+ onToolCallStart: vi.fn((cb) => {
106
+ capturedToolCallStartCallback = cb;
107
+ return mockChat;
108
+ }),
109
+ onToolCallEnd: vi.fn((cb) => {
110
+ capturedToolCallEndCallback = cb;
111
+ return mockChat;
112
+ }),
113
+ afterResponse: vi.fn((cb) => {
114
+ capturedAfterResponseCallback = cb;
115
+ return mockChat;
116
+ }),
117
+ onNewMessage: vi.fn().mockReturnThis(),
118
+ onEndMessage: vi.fn().mockReturnThis(),
119
+ beforeRequest: vi.fn().mockReturnThis(),
120
+ ask: vi.fn(async () => {
121
+ const toolCall = {
122
+ id: "call-123",
123
+ thought: "I need to search for this",
124
+ function: { name: "search", arguments: '{"query":"test"}' }
125
+ };
126
+
127
+ // Simulate tool call lifecycle
128
+ if (capturedToolCallStartCallback) {
129
+ await capturedToolCallStartCallback(toolCall);
130
+ }
131
+ if (capturedToolCallEndCallback) {
132
+ await capturedToolCallEndCallback(toolCall, "Search results");
133
+ }
134
+
135
+ // Simulate response
136
+ const response = {
137
+ content: "Hello from LLM!",
138
+ meta: { model: "gpt-4", provider: "openai" },
139
+ reasoning: null,
140
+ usage: { input_tokens: 10, output_tokens: 5, total_tokens: 15 },
141
+ model: "gpt-4",
142
+ provider: "openai"
143
+ };
144
+
145
+ if (capturedAfterResponseCallback) {
146
+ await capturedAfterResponseCallback({
147
+ provider: "openai",
148
+ model: "gpt-4",
149
+ latency: 100,
150
+ usage: { input_tokens: 10, output_tokens: 5, cost: 0.001 }
151
+ });
152
+ }
153
+
154
+ return response;
155
+ }),
156
+ stream: vi.fn(async function* () {
157
+ const toolCall = {
158
+ id: "call-456",
159
+ function: { name: "search", arguments: '{"query":"stream"}' }
160
+ };
161
+
162
+ // Simulate tool call during streaming
163
+ if (capturedToolCallStartCallback) {
164
+ await capturedToolCallStartCallback(toolCall);
165
+ }
166
+ if (capturedToolCallEndCallback) {
167
+ await capturedToolCallEndCallback(toolCall, "Stream results");
168
+ }
169
+
170
+ // Yield tokens
171
+ const tokens = ["Hello", " from", " streaming", "!"];
172
+ for (const token of tokens) {
173
+ yield { content: token, meta: null };
174
+ }
175
+
176
+ // Final chunk with metadata
177
+ const finalMeta = {
178
+ model: "gpt-4",
179
+ provider: "openai",
180
+ inputTokens: 15,
181
+ outputTokens: 8,
182
+ reasoning: null
183
+ };
184
+
185
+ if (capturedAfterResponseCallback) {
186
+ await capturedAfterResponseCallback({
187
+ provider: "openai",
188
+ model: "gpt-4",
189
+ latency: 200,
190
+ usage: { input_tokens: 15, output_tokens: 8, cost: 0.002 }
191
+ });
192
+ }
193
+
194
+ yield {
195
+ content: "",
196
+ meta: finalMeta,
197
+ usage: { input_tokens: 15, output_tokens: 8, total_tokens: 23 }
198
+ };
199
+ })
200
+ };
201
+
202
+ return {
203
+ chat: vi.fn(() => mockChat),
204
+ withProvider: vi.fn(() => ({
205
+ chat: vi.fn(() => mockChat)
206
+ }))
207
+ };
208
+ };
209
+
210
+ describe("Chat ORM", () => {
211
+ let mockPrisma: any;
212
+ let mockLLM: any;
213
+
214
+ beforeEach(() => {
215
+ mockPrisma = createMockPrisma();
216
+ mockLLM = createMockLLM();
217
+ });
218
+
219
+ describe("createChat", () => {
220
+ it("should create a new chat session", async () => {
221
+ const chat = await createChat(mockPrisma, mockLLM, {
222
+ model: "gpt-4",
223
+ provider: "openai",
224
+ instructions: "You are helpful"
225
+ });
226
+
227
+ expect(chat.id).toBe("chat-123");
228
+ expect(mockPrisma.chat.create).toHaveBeenCalledWith({
229
+ data: {
230
+ model: "gpt-4",
231
+ provider: "openai",
232
+ instructions: "You are helpful",
233
+ metadata: null
234
+ }
235
+ });
236
+ });
237
+
238
+ it("should pass metadata as-is (native JSON support)", async () => {
239
+ await createChat(mockPrisma, mockLLM, {
240
+ metadata: { userId: "user-123" }
241
+ });
242
+
243
+ expect(mockPrisma.chat.create).toHaveBeenCalledWith({
244
+ data: expect.objectContaining({
245
+ metadata: { userId: "user-123" }
246
+ })
247
+ });
248
+ });
249
+
250
+ it("should spread extra custom fields into create data", async () => {
251
+ await createChat(mockPrisma, mockLLM, {
252
+ model: "gpt-4",
253
+ userId: "user-789",
254
+ projectId: "proj-abc"
255
+ } as any);
256
+
257
+ expect(mockPrisma.chat.create).toHaveBeenCalledWith({
258
+ data: expect.objectContaining({
259
+ model: "gpt-4",
260
+ userId: "user-789",
261
+ projectId: "proj-abc"
262
+ })
263
+ });
264
+ });
265
+ });
266
+
267
+ describe("loadChat", () => {
268
+ it("should load an existing chat", async () => {
269
+ await createChat(mockPrisma, mockLLM, { model: "gpt-4" });
270
+ const loaded = await loadChat(mockPrisma, mockLLM, "chat-123");
271
+
272
+ expect(loaded).not.toBeNull();
273
+ expect(loaded?.id).toBe("chat-123");
274
+ });
275
+
276
+ it("should return null for non-existent chat", async () => {
277
+ const loaded = await loadChat(mockPrisma, mockLLM, "nonexistent");
278
+ expect(loaded).toBeNull();
279
+ });
280
+ });
281
+
282
+ describe("ask", () => {
283
+ it("should persist user and assistant messages", async () => {
284
+ const chat = await createChat(mockPrisma, mockLLM, { model: "gpt-4" });
285
+ await chat.ask("Hello!");
286
+
287
+ expect(mockPrisma.message.create).toHaveBeenCalledTimes(2);
288
+ expect(mockPrisma._messages).toHaveLength(2);
289
+ expect(mockPrisma._messages[0].role).toBe("user");
290
+ expect(mockPrisma._messages[0].content).toBe("Hello!");
291
+ expect(mockPrisma._messages[1].role).toBe("assistant");
292
+ });
293
+
294
+ it("should update assistant message with LLM response", async () => {
295
+ const chat = await createChat(mockPrisma, mockLLM, { model: "gpt-4" });
296
+ const response = await chat.ask("Hello!");
297
+
298
+ expect(response.content).toBe("Hello from LLM!");
299
+ expect(response.inputTokens).toBe(10);
300
+ expect(response.outputTokens).toBe(5);
301
+ expect(mockPrisma.message.update).toHaveBeenCalled();
302
+ });
303
+
304
+ it("should persist tool calls", async () => {
305
+ const chat = await createChat(mockPrisma, mockLLM, { model: "gpt-4" });
306
+ await chat.ask("Search for something");
307
+
308
+ expect(mockPrisma.toolCall.create).toHaveBeenCalled();
309
+ expect(mockPrisma.toolCall.update).toHaveBeenCalledWith(
310
+ expect.objectContaining({
311
+ data: expect.objectContaining({
312
+ result: "Search results"
313
+ })
314
+ })
315
+ );
316
+ });
317
+
318
+ it("should persist API request metrics", async () => {
319
+ const chat = await createChat(mockPrisma, mockLLM, { model: "gpt-4" });
320
+ await chat.ask("Hello!");
321
+
322
+ expect(mockPrisma.request.create).toHaveBeenCalledWith({
323
+ data: expect.objectContaining({
324
+ provider: "openai",
325
+ model: "gpt-4",
326
+ inputTokens: 10,
327
+ outputTokens: 5,
328
+ cost: 0.001
329
+ })
330
+ });
331
+ });
332
+
333
+ it("should cleanup on failure", async () => {
334
+ const chat = await createChat(mockPrisma, mockLLM, { model: "gpt-4" });
335
+
336
+ // Force an error
337
+ mockLLM.chat().ask.mockRejectedValueOnce(new Error("API Error"));
338
+
339
+ await expect(chat.ask("Hello!")).rejects.toThrow("API Error");
340
+ expect(mockPrisma.message.delete).toHaveBeenCalled();
341
+ });
342
+ });
343
+
344
+ describe("askStream", () => {
345
+ it("should yield tokens in real-time", async () => {
346
+ const chat = await createChat(mockPrisma, mockLLM, { model: "gpt-4" });
347
+ const tokens: string[] = [];
348
+
349
+ for await (const token of chat.askStream("Tell me a story")) {
350
+ tokens.push(token);
351
+ }
352
+
353
+ expect(tokens).toEqual(["Hello", " from", " streaming", "!"]);
354
+ });
355
+
356
+ it("should persist complete message after streaming", async () => {
357
+ const chat = await createChat(mockPrisma, mockLLM, { model: "gpt-4" });
358
+ const tokens: string[] = [];
359
+
360
+ for await (const token of chat.askStream("Tell me a story")) {
361
+ tokens.push(token);
362
+ }
363
+
364
+ const messages = mockPrisma._messages.filter((m: any) => m.role === "assistant");
365
+ expect(messages[0].content).toBe("Hello from streaming!");
366
+ expect(messages[0].inputTokens).toBe(15);
367
+ expect(messages[0].outputTokens).toBe(8);
368
+ });
369
+
370
+ it("should persist tool calls during streaming", async () => {
371
+ const chat = await createChat(mockPrisma, mockLLM, { model: "gpt-4" });
372
+
373
+ for await (const _ of chat.askStream("Search")) {
374
+ // consume stream
375
+ }
376
+
377
+ expect(mockPrisma.toolCall.create).toHaveBeenCalled();
378
+ expect(mockPrisma.toolCall.update).toHaveBeenCalledWith(
379
+ expect.objectContaining({
380
+ data: expect.objectContaining({
381
+ result: "Stream results"
382
+ })
383
+ })
384
+ );
385
+ });
386
+
387
+ it("should cleanup on streaming failure", async () => {
388
+ const chat = await createChat(mockPrisma, mockLLM, { model: "gpt-4" });
389
+
390
+ // Force streaming error
391
+ mockLLM.chat().stream.mockImplementation(async function* () {
392
+ yield { content: "", meta: null }; // Need at least one yield for generator
393
+ throw new Error("Stream Error");
394
+ });
395
+
396
+ await expect(async () => {
397
+ for await (const _ of chat.askStream("Hello!")) {
398
+ // consume stream
399
+ }
400
+ }).rejects.toThrow("Stream Error");
401
+
402
+ expect(mockPrisma.message.delete).toHaveBeenCalled();
403
+ });
404
+ });
405
+
406
+ describe("messages", () => {
407
+ it("should retrieve all messages for a chat", async () => {
408
+ const chat = await createChat(mockPrisma, mockLLM, { model: "gpt-4" });
409
+ await chat.ask("First message");
410
+ await chat.ask("Second message");
411
+
412
+ const messages = await chat.messages();
413
+ expect(messages.length).toBeGreaterThanOrEqual(4); // 2 user + 2 assistant
414
+ });
415
+ });
416
+
417
+ describe("Custom Table Names", () => {
418
+ it("should use custom table names when creating chat", async () => {
419
+ const tableNames = {
420
+ chat: "llmChat",
421
+ message: "llmMessage",
422
+ toolCall: "llmToolCall",
423
+ request: "llmRequest"
424
+ };
425
+
426
+ const chat = await createChat(mockPrisma, mockLLM, { model: "gpt-4", tableNames });
427
+
428
+ expect(mockPrisma.llmChat.create).toHaveBeenCalled();
429
+ expect(chat.id).toBe("chat-123");
430
+ });
431
+
432
+ it("should use custom table names when loading chat", async () => {
433
+ const tableNames = {
434
+ chat: "llmChat"
435
+ };
436
+
437
+ // First create with custom names
438
+ await createChat(mockPrisma, mockLLM, { model: "gpt-4", tableNames });
439
+
440
+ // Then load with same custom names
441
+ const loaded = await loadChat(mockPrisma, mockLLM, "chat-123", { tableNames });
442
+
443
+ expect(mockPrisma.llmChat.findUnique).toHaveBeenCalled();
444
+ expect(loaded?.id).toBe("chat-123");
445
+ });
446
+
447
+ it("should use custom table names for messages", async () => {
448
+ const tableNames = {
449
+ message: "llmMessage"
450
+ };
451
+
452
+ const chat = await createChat(mockPrisma, mockLLM, { model: "gpt-4", tableNames });
453
+ await chat.ask("Hello!");
454
+
455
+ expect(mockPrisma.llmMessage.create).toHaveBeenCalled();
456
+ expect(mockPrisma.llmMessage.update).toHaveBeenCalled();
457
+ });
458
+
459
+ it("should use custom table names for tool calls", async () => {
460
+ const tableNames = {
461
+ toolCall: "llmToolCall"
462
+ };
463
+
464
+ const chat = await createChat(mockPrisma, mockLLM, { model: "gpt-4", tableNames });
465
+ await chat.ask("Search");
466
+
467
+ expect(mockPrisma.llmToolCall.create).toHaveBeenCalled();
468
+ });
469
+
470
+ it("should use custom table names for requests", async () => {
471
+ const tableNames = {
472
+ request: "llmRequest"
473
+ };
474
+
475
+ const chat = await createChat(mockPrisma, mockLLM, { model: "gpt-4", tableNames });
476
+ await chat.ask("Hello!");
477
+
478
+ expect(mockPrisma.llmRequest.create).toHaveBeenCalled();
479
+ });
480
+ });
481
+
482
+ describe("Persistence Configuration", () => {
483
+ it("should skip tool call persistence when disabled", async () => {
484
+ const chat = await createChat(mockPrisma, mockLLM, {
485
+ model: "gpt-4",
486
+ persistence: { toolCalls: false }
487
+ });
488
+
489
+ await chat.ask("Search for something");
490
+
491
+ // Tool calls should NOT be persisted
492
+ expect(mockPrisma.toolCall.create).not.toHaveBeenCalled();
493
+ expect(mockPrisma.toolCall.update).not.toHaveBeenCalled();
494
+
495
+ // But requests should still be persisted (default: true)
496
+ expect(mockPrisma.request.create).toHaveBeenCalled();
497
+ });
498
+
499
+ it("should skip request persistence when disabled", async () => {
500
+ const chat = await createChat(mockPrisma, mockLLM, {
501
+ model: "gpt-4",
502
+ persistence: { requests: false }
503
+ });
504
+
505
+ await chat.ask("Hello!");
506
+
507
+ // Requests should NOT be persisted
508
+ expect(mockPrisma.request.create).not.toHaveBeenCalled();
509
+
510
+ // But tool calls should still be persisted (default: true)
511
+ expect(mockPrisma.toolCall.create).toHaveBeenCalled();
512
+ });
513
+
514
+ it("should disable both toolCalls and requests when configured", async () => {
515
+ const chat = await createChat(mockPrisma, mockLLM, {
516
+ model: "gpt-4",
517
+ persistence: {
518
+ toolCalls: false,
519
+ requests: false
520
+ }
521
+ });
522
+
523
+ await chat.ask("Search");
524
+
525
+ expect(mockPrisma.toolCall.create).not.toHaveBeenCalled();
526
+ expect(mockPrisma.request.create).not.toHaveBeenCalled();
527
+
528
+ // Messages should still be persisted (always required)
529
+ expect(mockPrisma.message.create).toHaveBeenCalled();
530
+ });
531
+
532
+ it("should persist everything by default when persistence config is omitted", async () => {
533
+ const chat = await createChat(mockPrisma, mockLLM, { model: "gpt-4" });
534
+
535
+ await chat.ask("Search");
536
+
537
+ // All persistence should be enabled by default
538
+ expect(mockPrisma.toolCall.create).toHaveBeenCalled();
539
+ expect(mockPrisma.request.create).toHaveBeenCalled();
540
+ expect(mockPrisma.message.create).toHaveBeenCalled();
541
+ });
542
+ });
543
+ });
package/tsconfig.json ADDED
@@ -0,0 +1,14 @@
1
+ {
2
+ "extends": "../../tsconfig.base.json",
3
+ "compilerOptions": {
4
+ "outDir": "./dist",
5
+ "rootDir": "./src",
6
+ "declaration": true,
7
+ "declarationMap": true,
8
+ "composite": true,
9
+ "module": "NodeNext",
10
+ "moduleResolution": "NodeNext"
11
+ },
12
+ "include": ["src/**/*"],
13
+ "exclude": ["node_modules", "dist", "**/*.test.ts"]
14
+ }
@@ -0,0 +1,13 @@
1
+ import { defineConfig } from "vitest/config";
2
+
3
+ export default defineConfig({
4
+ test: {
5
+ globals: true,
6
+ environment: "node",
7
+ coverage: {
8
+ provider: "v8",
9
+ reporter: ["text", "json", "html"],
10
+ exclude: ["node_modules/", "dist/", "test/"]
11
+ }
12
+ }
13
+ });