@mtharrison/loupe 1.2.0 → 1.4.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +59 -34
- package/dist/client/app.css +115 -20
- package/dist/client/app.js +183 -122
- package/dist/index.d.ts +6 -8
- package/dist/index.js +81 -66
- package/dist/session-nav.d.ts +1 -1
- package/dist/session-nav.js +8 -1
- package/dist/store.d.ts +7 -7
- package/dist/store.js +286 -83
- package/dist/types.d.ts +44 -9
- package/dist/utils.d.ts +2 -1
- package/dist/utils.js +14 -0
- package/examples/nested-tool-call.js +234 -0
- package/package.json +1 -1
|
@@ -0,0 +1,234 @@
|
|
|
1
|
+
'use strict';
|
|
2
|
+
|
|
3
|
+
const { spawn } = require('node:child_process');
|
|
4
|
+
const { randomUUID } = require('node:crypto');
|
|
5
|
+
const readline = require('node:readline/promises');
|
|
6
|
+
const process = require('node:process');
|
|
7
|
+
|
|
8
|
+
const { getLocalLLMTracer, wrapChatModel } = require('../dist/index.js');
|
|
9
|
+
|
|
10
|
+
const DEMO_TIMEOUT_MS = 15000;
|
|
11
|
+
const PORT = Number(process.env.LLM_TRACE_PORT) || 4319;
|
|
12
|
+
const SESSION_ID = `nested-tool-demo-${randomUUID().slice(0, 8)}`;
|
|
13
|
+
|
|
14
|
+
async function main() {
|
|
15
|
+
process.env.LLM_TRACE_ENABLED ??= '1';
|
|
16
|
+
|
|
17
|
+
const tracer = getLocalLLMTracer({ port: PORT });
|
|
18
|
+
const serverInfo = await tracer.startServer();
|
|
19
|
+
if (!serverInfo?.url) {
|
|
20
|
+
throw new Error('Failed to start the Loupe dashboard.');
|
|
21
|
+
}
|
|
22
|
+
|
|
23
|
+
log(`[demo] Loupe dashboard: ${serverInfo.url}`);
|
|
24
|
+
openBrowser(serverInfo.url);
|
|
25
|
+
log(`[demo] Session: ${SESSION_ID}`);
|
|
26
|
+
|
|
27
|
+
const nestedResearchModel = wrapChatModel(
|
|
28
|
+
{
|
|
29
|
+
async invoke(input) {
|
|
30
|
+
const question = input?.messages?.[0]?.content || '';
|
|
31
|
+
return {
|
|
32
|
+
message: {
|
|
33
|
+
role: 'assistant',
|
|
34
|
+
content: `Tool research result for "${question}": compare rain gear, walking shoes, and a light sweater.`,
|
|
35
|
+
},
|
|
36
|
+
tool_calls: [],
|
|
37
|
+
usage: {
|
|
38
|
+
tokens: { prompt: 9, completion: 14 },
|
|
39
|
+
pricing: { prompt: 0.000001, completion: 0.000002 },
|
|
40
|
+
},
|
|
41
|
+
};
|
|
42
|
+
},
|
|
43
|
+
async *stream(input) {
|
|
44
|
+
const content = `Tool research stream for "${input?.messages?.[0]?.content || ''}".`;
|
|
45
|
+
yield { type: 'begin', role: 'assistant' };
|
|
46
|
+
yield { type: 'chunk', content };
|
|
47
|
+
yield {
|
|
48
|
+
type: 'finish',
|
|
49
|
+
message: { role: 'assistant', content },
|
|
50
|
+
tool_calls: [],
|
|
51
|
+
usage: {
|
|
52
|
+
tokens: { prompt: 9, completion: 14 },
|
|
53
|
+
pricing: { prompt: 0.000001, completion: 0.000002 },
|
|
54
|
+
},
|
|
55
|
+
};
|
|
56
|
+
},
|
|
57
|
+
},
|
|
58
|
+
() => ({
|
|
59
|
+
sessionId: SESSION_ID,
|
|
60
|
+
rootSessionId: SESSION_ID,
|
|
61
|
+
rootActorId: 'travel-assistant',
|
|
62
|
+
actorId: 'weather-research-tool',
|
|
63
|
+
provider: 'mock-llm',
|
|
64
|
+
model: 'tool-researcher-v1',
|
|
65
|
+
stage: 'tool:research',
|
|
66
|
+
tags: {
|
|
67
|
+
example: 'nested-tool-call',
|
|
68
|
+
role: 'tool-llm',
|
|
69
|
+
},
|
|
70
|
+
}),
|
|
71
|
+
{ port: PORT },
|
|
72
|
+
);
|
|
73
|
+
|
|
74
|
+
const rootAssistantModel = wrapChatModel(
|
|
75
|
+
{
|
|
76
|
+
async invoke(input) {
|
|
77
|
+
const question = input?.messages?.[0]?.content || '';
|
|
78
|
+
const toolResult = await nestedResearchModel.invoke(
|
|
79
|
+
{
|
|
80
|
+
messages: [
|
|
81
|
+
{
|
|
82
|
+
role: 'user',
|
|
83
|
+
content: `Research facts needed for: ${question}`,
|
|
84
|
+
},
|
|
85
|
+
],
|
|
86
|
+
},
|
|
87
|
+
{},
|
|
88
|
+
);
|
|
89
|
+
|
|
90
|
+
return {
|
|
91
|
+
message: {
|
|
92
|
+
role: 'assistant',
|
|
93
|
+
content: [
|
|
94
|
+
`Final answer for "${question}"`,
|
|
95
|
+
'',
|
|
96
|
+
toolResult.message.content,
|
|
97
|
+
'',
|
|
98
|
+
'Pack layers, waterproof gear, and comfortable shoes.',
|
|
99
|
+
].join('\n'),
|
|
100
|
+
},
|
|
101
|
+
tool_calls: [],
|
|
102
|
+
usage: {
|
|
103
|
+
tokens: { prompt: 12, completion: 18 },
|
|
104
|
+
pricing: { prompt: 0.000001, completion: 0.000002 },
|
|
105
|
+
},
|
|
106
|
+
};
|
|
107
|
+
},
|
|
108
|
+
async *stream(input) {
|
|
109
|
+
const question = input?.messages?.[0]?.content || '';
|
|
110
|
+
yield { type: 'begin', role: 'assistant' };
|
|
111
|
+
yield { type: 'chunk', content: 'Let me research that with the tool model.\n\n' };
|
|
112
|
+
|
|
113
|
+
let toolContent = '';
|
|
114
|
+
for await (const chunk of nestedResearchModel.stream(
|
|
115
|
+
{
|
|
116
|
+
messages: [
|
|
117
|
+
{
|
|
118
|
+
role: 'user',
|
|
119
|
+
content: `Research facts needed for: ${question}`,
|
|
120
|
+
},
|
|
121
|
+
],
|
|
122
|
+
},
|
|
123
|
+
{},
|
|
124
|
+
)) {
|
|
125
|
+
if (chunk?.type === 'chunk' && typeof chunk.content === 'string') {
|
|
126
|
+
toolContent += chunk.content;
|
|
127
|
+
}
|
|
128
|
+
}
|
|
129
|
+
|
|
130
|
+
const finalContent = [
|
|
131
|
+
`Final answer for "${question}"`,
|
|
132
|
+
'',
|
|
133
|
+
toolContent,
|
|
134
|
+
'',
|
|
135
|
+
'Pack layers, waterproof gear, and comfortable shoes.',
|
|
136
|
+
].join('\n');
|
|
137
|
+
|
|
138
|
+
yield { type: 'chunk', content: finalContent };
|
|
139
|
+
yield {
|
|
140
|
+
type: 'finish',
|
|
141
|
+
message: { role: 'assistant', content: finalContent },
|
|
142
|
+
tool_calls: [],
|
|
143
|
+
usage: {
|
|
144
|
+
tokens: { prompt: 12, completion: 18 },
|
|
145
|
+
pricing: { prompt: 0.000001, completion: 0.000002 },
|
|
146
|
+
},
|
|
147
|
+
};
|
|
148
|
+
},
|
|
149
|
+
},
|
|
150
|
+
() => ({
|
|
151
|
+
sessionId: SESSION_ID,
|
|
152
|
+
rootSessionId: SESSION_ID,
|
|
153
|
+
rootActorId: 'travel-assistant',
|
|
154
|
+
actorId: 'travel-assistant',
|
|
155
|
+
provider: 'mock-llm',
|
|
156
|
+
model: 'trip-planner-v1',
|
|
157
|
+
stage: 'assistant',
|
|
158
|
+
tags: {
|
|
159
|
+
example: 'nested-tool-call',
|
|
160
|
+
role: 'root-llm',
|
|
161
|
+
},
|
|
162
|
+
}),
|
|
163
|
+
{ port: PORT },
|
|
164
|
+
);
|
|
165
|
+
|
|
166
|
+
const prompt = 'I am taking a rainy weekend trip to London. What should I pack?';
|
|
167
|
+
log(`[demo] User: ${prompt}`);
|
|
168
|
+
const response = await rootAssistantModel.invoke({
|
|
169
|
+
messages: [{ role: 'user', content: prompt }],
|
|
170
|
+
});
|
|
171
|
+
log(`[demo] Assistant:\n${response.message.content}`);
|
|
172
|
+
log('[demo] This run creates a parent span for the assistant call and a child span for the tool LLM call.');
|
|
173
|
+
log(`[demo] Keep this process alive while you inspect ${serverInfo.url}`);
|
|
174
|
+
|
|
175
|
+
await waitForDashboardExit(serverInfo.url);
|
|
176
|
+
}
|
|
177
|
+
|
|
178
|
+
function openBrowser(url) {
|
|
179
|
+
if (!process.stdout.isTTY || process.env.CI || process.env.LOUPE_OPEN_BROWSER === '0') {
|
|
180
|
+
return;
|
|
181
|
+
}
|
|
182
|
+
|
|
183
|
+
const command =
|
|
184
|
+
process.platform === 'darwin'
|
|
185
|
+
? ['open', [url]]
|
|
186
|
+
: process.platform === 'win32'
|
|
187
|
+
? ['cmd', ['/c', 'start', '', url]]
|
|
188
|
+
: process.platform === 'linux'
|
|
189
|
+
? ['xdg-open', [url]]
|
|
190
|
+
: null;
|
|
191
|
+
|
|
192
|
+
if (!command) {
|
|
193
|
+
return;
|
|
194
|
+
}
|
|
195
|
+
|
|
196
|
+
try {
|
|
197
|
+
const child = spawn(command[0], command[1], {
|
|
198
|
+
detached: true,
|
|
199
|
+
stdio: 'ignore',
|
|
200
|
+
});
|
|
201
|
+
child.on('error', () => {});
|
|
202
|
+
child.unref();
|
|
203
|
+
} catch (_error) {
|
|
204
|
+
// Ignore browser launch failures. The dashboard URL is already printed.
|
|
205
|
+
}
|
|
206
|
+
}
|
|
207
|
+
|
|
208
|
+
async function waitForDashboardExit(url) {
|
|
209
|
+
if (!process.stdin.isTTY || !process.stdout.isTTY) {
|
|
210
|
+
log(`[demo] Non-interactive terminal detected. Leaving the dashboard up for ${Math.round(DEMO_TIMEOUT_MS / 1000)} seconds: ${url}`);
|
|
211
|
+
await new Promise((resolve) => setTimeout(resolve, DEMO_TIMEOUT_MS));
|
|
212
|
+
return;
|
|
213
|
+
}
|
|
214
|
+
|
|
215
|
+
const rl = readline.createInterface({
|
|
216
|
+
input: process.stdin,
|
|
217
|
+
output: process.stdout,
|
|
218
|
+
});
|
|
219
|
+
|
|
220
|
+
try {
|
|
221
|
+
await rl.question('[demo] Press Enter to stop the demo and close the dashboard.\n');
|
|
222
|
+
} finally {
|
|
223
|
+
rl.close();
|
|
224
|
+
}
|
|
225
|
+
}
|
|
226
|
+
|
|
227
|
+
function log(message) {
|
|
228
|
+
process.stdout.write(`${message}\n`);
|
|
229
|
+
}
|
|
230
|
+
|
|
231
|
+
main().catch((error) => {
|
|
232
|
+
process.stderr.write(`[demo] ${error.message}\n`);
|
|
233
|
+
process.exitCode = 1;
|
|
234
|
+
});
|