@donghanh/hono 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/package.json +22 -0
- package/src/chat.ts +484 -0
- package/src/gpt.ts +252 -0
- package/src/index.ts +10 -0
- package/src/middleware.ts +17 -0
- package/src/renderers/chatgpt.ts +127 -0
- package/src/renderers/llm.ts +106 -0
- package/tsconfig.json +15 -0
package/package.json
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "@donghanh/hono",
|
|
3
|
+
"version": "0.1.0",
|
|
4
|
+
"description": "Hono routes for GPT Store and LLM chat — renderers for ChatGPT and OpenAI-compatible APIs",
|
|
5
|
+
"license": "MIT",
|
|
6
|
+
"type": "module",
|
|
7
|
+
"exports": {
|
|
8
|
+
".": "./src/index.ts"
|
|
9
|
+
},
|
|
10
|
+
"repository": {
|
|
11
|
+
"type": "git",
|
|
12
|
+
"url": "https://github.com/rezzahub/donghanh",
|
|
13
|
+
"directory": "packages/hono"
|
|
14
|
+
},
|
|
15
|
+
"dependencies": {
|
|
16
|
+
"@donghanh/core": "workspace:*",
|
|
17
|
+
"hono": "^4.7.0"
|
|
18
|
+
},
|
|
19
|
+
"peerDependencies": {
|
|
20
|
+
"typescript": "^5.0.0"
|
|
21
|
+
}
|
|
22
|
+
}
|
package/src/chat.ts
ADDED
|
@@ -0,0 +1,484 @@
|
|
|
1
|
+
import type { Executor, Registry } from "@donghanh/core";
|
|
2
|
+
import { executeOperation } from "@donghanh/core";
|
|
3
|
+
import { Hono } from "hono";
|
|
4
|
+
import { streamSSE } from "hono/streaming";
|
|
5
|
+
import type { Authenticate } from "./middleware";
|
|
6
|
+
import { isAuthError } from "./middleware";
|
|
7
|
+
import { renderForLlm } from "./renderers/llm";
|
|
8
|
+
|
|
9
|
+
export interface ChatRoutesConfig {
|
|
10
|
+
registry: Registry;
|
|
11
|
+
executor: Executor;
|
|
12
|
+
authenticate: Authenticate;
|
|
13
|
+
/** System prompt for the LLM */
|
|
14
|
+
systemPrompt?: string;
|
|
15
|
+
/** Hook to enrich data after operation execution (e.g. inject formatted balances) */
|
|
16
|
+
enrichData?: (
|
|
17
|
+
data: unknown,
|
|
18
|
+
context: {
|
|
19
|
+
operationId: string;
|
|
20
|
+
variables: Record<string, unknown>;
|
|
21
|
+
userId: string;
|
|
22
|
+
},
|
|
23
|
+
) => Promise<void>;
|
|
24
|
+
/** LLM model identifier (e.g. "anthropic/claude-sonnet-4") */
|
|
25
|
+
model?: string;
|
|
26
|
+
/** OpenRouter API key */
|
|
27
|
+
apiKey?: string;
|
|
28
|
+
/** Base URL for OpenAI-compatible API (default: OpenRouter) */
|
|
29
|
+
baseUrl?: string;
|
|
30
|
+
}
|
|
31
|
+
|
|
32
|
+
export interface ChatMessage {
|
|
33
|
+
role: "user" | "assistant";
|
|
34
|
+
content: string;
|
|
35
|
+
actions?: Array<{
|
|
36
|
+
operation: string;
|
|
37
|
+
label: string;
|
|
38
|
+
variables?: Record<string, unknown>;
|
|
39
|
+
}>;
|
|
40
|
+
}
|
|
41
|
+
|
|
42
|
+
interface ChatAction {
|
|
43
|
+
operation: string;
|
|
44
|
+
label: string;
|
|
45
|
+
variables?: Record<string, unknown>;
|
|
46
|
+
}
|
|
47
|
+
|
|
48
|
+
// OpenAI-compatible types
|
|
49
|
+
interface OaiTool {
|
|
50
|
+
type: "function";
|
|
51
|
+
function: {
|
|
52
|
+
name: string;
|
|
53
|
+
description: string;
|
|
54
|
+
parameters: object;
|
|
55
|
+
};
|
|
56
|
+
}
|
|
57
|
+
|
|
58
|
+
interface OaiMessage {
|
|
59
|
+
role: "system" | "user" | "assistant" | "tool";
|
|
60
|
+
content?: string | null;
|
|
61
|
+
tool_calls?: OaiToolCall[];
|
|
62
|
+
tool_call_id?: string;
|
|
63
|
+
}
|
|
64
|
+
|
|
65
|
+
interface OaiToolCall {
|
|
66
|
+
id: string;
|
|
67
|
+
type: "function";
|
|
68
|
+
function: { name: string; arguments: string };
|
|
69
|
+
}
|
|
70
|
+
|
|
71
|
+
interface OaiChoice {
|
|
72
|
+
message: {
|
|
73
|
+
role: "assistant";
|
|
74
|
+
content?: string | null;
|
|
75
|
+
tool_calls?: OaiToolCall[];
|
|
76
|
+
};
|
|
77
|
+
finish_reason: string;
|
|
78
|
+
}
|
|
79
|
+
|
|
80
|
+
interface OaiResponse {
|
|
81
|
+
choices: OaiChoice[];
|
|
82
|
+
}
|
|
83
|
+
|
|
84
|
+
interface OaiStreamChunk {
|
|
85
|
+
choices: Array<{
|
|
86
|
+
delta: { content?: string | null };
|
|
87
|
+
finish_reason: string | null;
|
|
88
|
+
}>;
|
|
89
|
+
}
|
|
90
|
+
|
|
91
|
+
export function chatRoutes(config: ChatRoutesConfig): Hono {
|
|
92
|
+
const { registry, executor, authenticate } = config;
|
|
93
|
+
const baseUrl = config.baseUrl ?? "https://openrouter.ai/api/v1";
|
|
94
|
+
|
|
95
|
+
const app = new Hono();
|
|
96
|
+
|
|
97
|
+
app.post("/", async (c) => {
|
|
98
|
+
const authResult = await authenticate(c.req.raw);
|
|
99
|
+
if (isAuthError(authResult)) return authResult.error;
|
|
100
|
+
|
|
101
|
+
const body = await c.req.json();
|
|
102
|
+
const messages: ChatMessage[] = body.messages ?? [];
|
|
103
|
+
const initOperation:
|
|
104
|
+
| { id: string; variables?: Record<string, unknown> }
|
|
105
|
+
| undefined = body.initOperation;
|
|
106
|
+
const action: ChatAction | undefined = body.action;
|
|
107
|
+
|
|
108
|
+
const operations = registry.list();
|
|
109
|
+
const tools: OaiTool[] = operations.map((op) => {
|
|
110
|
+
const detail = registry.detail(op.id);
|
|
111
|
+
return {
|
|
112
|
+
type: "function" as const,
|
|
113
|
+
function: {
|
|
114
|
+
name: op.id,
|
|
115
|
+
description: `${op.description}. ${detail?.instruction ?? ""}`.trim(),
|
|
116
|
+
parameters: detail?.input ?? { type: "object", properties: {} },
|
|
117
|
+
},
|
|
118
|
+
};
|
|
119
|
+
});
|
|
120
|
+
|
|
121
|
+
const model = config.model ?? "google/gemini-2.5-flash";
|
|
122
|
+
const systemPrompt =
|
|
123
|
+
config.systemPrompt ??
|
|
124
|
+
"You are a helpful assistant. Use the tools to help the user.";
|
|
125
|
+
|
|
126
|
+
// Init operation context
|
|
127
|
+
let initContext = "";
|
|
128
|
+
if (initOperation && messages.length <= 1) {
|
|
129
|
+
try {
|
|
130
|
+
const initVars = initOperation.variables ?? {};
|
|
131
|
+
const { data, brief } = await executeOperation({
|
|
132
|
+
registry,
|
|
133
|
+
operationId: initOperation.id,
|
|
134
|
+
variables: initVars,
|
|
135
|
+
executor,
|
|
136
|
+
context: { userId: authResult.userId, request: c.req.raw },
|
|
137
|
+
});
|
|
138
|
+
if (config.enrichData) {
|
|
139
|
+
await config.enrichData(data, {
|
|
140
|
+
operationId: initOperation.id,
|
|
141
|
+
variables: initVars,
|
|
142
|
+
userId: authResult.userId,
|
|
143
|
+
});
|
|
144
|
+
}
|
|
145
|
+
const rendered = renderForLlm(brief);
|
|
146
|
+
initContext = `\n\nUser context:\n${JSON.stringify(data)}\n\n${rendered.text}`;
|
|
147
|
+
} catch {
|
|
148
|
+
// continue without context
|
|
149
|
+
}
|
|
150
|
+
}
|
|
151
|
+
|
|
152
|
+
const llmMessages: OaiMessage[] = [
|
|
153
|
+
{ role: "system", content: systemPrompt + initContext },
|
|
154
|
+
...messages.map(
|
|
155
|
+
(m): OaiMessage => ({ role: m.role, content: m.content }),
|
|
156
|
+
),
|
|
157
|
+
];
|
|
158
|
+
|
|
159
|
+
let currentActions: ChatMessage["actions"] = [];
|
|
160
|
+
|
|
161
|
+
// If user clicked an action button, execute the operation directly
|
|
162
|
+
// and inject the result as a synthetic tool call for the LLM to explain
|
|
163
|
+
if (action) {
|
|
164
|
+
const toolCallId = `action-${Date.now()}`;
|
|
165
|
+
|
|
166
|
+
try {
|
|
167
|
+
const variables = action.variables ?? {};
|
|
168
|
+
const { data, brief } = await executeOperation({
|
|
169
|
+
registry,
|
|
170
|
+
operationId: action.operation,
|
|
171
|
+
variables,
|
|
172
|
+
executor,
|
|
173
|
+
context: { userId: authResult.userId, request: c.req.raw },
|
|
174
|
+
});
|
|
175
|
+
|
|
176
|
+
if (config.enrichData) {
|
|
177
|
+
await config.enrichData(data, {
|
|
178
|
+
operationId: action.operation,
|
|
179
|
+
variables,
|
|
180
|
+
userId: authResult.userId,
|
|
181
|
+
});
|
|
182
|
+
}
|
|
183
|
+
|
|
184
|
+
const rendered = renderForLlm(brief);
|
|
185
|
+
currentActions = rendered.actions.map((a) => ({
|
|
186
|
+
operation: a.operation,
|
|
187
|
+
label: a.label,
|
|
188
|
+
variables: a.variables,
|
|
189
|
+
}));
|
|
190
|
+
|
|
191
|
+
const resultContent = `Data: ${JSON.stringify(data)}\n\n${rendered.text}`;
|
|
192
|
+
|
|
193
|
+
// Inject as if the LLM called the tool itself
|
|
194
|
+
llmMessages.push({
|
|
195
|
+
role: "assistant",
|
|
196
|
+
content: null,
|
|
197
|
+
tool_calls: [
|
|
198
|
+
{
|
|
199
|
+
id: toolCallId,
|
|
200
|
+
type: "function",
|
|
201
|
+
function: {
|
|
202
|
+
name: action.operation,
|
|
203
|
+
arguments: JSON.stringify(variables),
|
|
204
|
+
},
|
|
205
|
+
},
|
|
206
|
+
],
|
|
207
|
+
});
|
|
208
|
+
llmMessages.push({
|
|
209
|
+
role: "tool",
|
|
210
|
+
tool_call_id: toolCallId,
|
|
211
|
+
content: resultContent,
|
|
212
|
+
});
|
|
213
|
+
} catch (err: unknown) {
|
|
214
|
+
const error = err as { message?: string };
|
|
215
|
+
llmMessages.push({
|
|
216
|
+
role: "assistant",
|
|
217
|
+
content: null,
|
|
218
|
+
tool_calls: [
|
|
219
|
+
{
|
|
220
|
+
id: toolCallId,
|
|
221
|
+
type: "function",
|
|
222
|
+
function: {
|
|
223
|
+
name: action.operation,
|
|
224
|
+
arguments: JSON.stringify(action.variables ?? {}),
|
|
225
|
+
},
|
|
226
|
+
},
|
|
227
|
+
],
|
|
228
|
+
});
|
|
229
|
+
llmMessages.push({
|
|
230
|
+
role: "tool",
|
|
231
|
+
tool_call_id: toolCallId,
|
|
232
|
+
content: `Error: ${error?.message ?? "Unknown error"}`,
|
|
233
|
+
});
|
|
234
|
+
}
|
|
235
|
+
|
|
236
|
+
// Stream the LLM's explanation of the result
|
|
237
|
+
return streamSSE(c, async (stream) => {
|
|
238
|
+
try {
|
|
239
|
+
for await (const chunk of streamLlmCall(
|
|
240
|
+
baseUrl,
|
|
241
|
+
config.apiKey!,
|
|
242
|
+
model,
|
|
243
|
+
llmMessages,
|
|
244
|
+
tools,
|
|
245
|
+
)) {
|
|
246
|
+
await stream.writeSSE({ event: "text", data: chunk });
|
|
247
|
+
}
|
|
248
|
+
} catch {
|
|
249
|
+
await stream.writeSSE({
|
|
250
|
+
event: "text",
|
|
251
|
+
data: "Something went wrong generating the response.",
|
|
252
|
+
});
|
|
253
|
+
}
|
|
254
|
+
if (currentActions && currentActions.length > 0) {
|
|
255
|
+
await stream.writeSSE({
|
|
256
|
+
event: "actions",
|
|
257
|
+
data: JSON.stringify(currentActions),
|
|
258
|
+
});
|
|
259
|
+
}
|
|
260
|
+
await stream.writeSSE({ event: "done", data: "" });
|
|
261
|
+
});
|
|
262
|
+
}
|
|
263
|
+
|
|
264
|
+
// Normal flow: LLM decides what tools to call
|
|
265
|
+
// Tool use loop — non-streaming
|
|
266
|
+
let llmResponse = await callLlm(
|
|
267
|
+
baseUrl,
|
|
268
|
+
config.apiKey!,
|
|
269
|
+
model,
|
|
270
|
+
llmMessages,
|
|
271
|
+
tools,
|
|
272
|
+
);
|
|
273
|
+
|
|
274
|
+
while (llmResponse.choices[0]?.finish_reason === "tool_calls") {
|
|
275
|
+
const choice = llmResponse.choices[0];
|
|
276
|
+
const toolCalls = choice.message.tool_calls ?? [];
|
|
277
|
+
|
|
278
|
+
llmMessages.push({
|
|
279
|
+
role: "assistant",
|
|
280
|
+
content: choice.message.content,
|
|
281
|
+
tool_calls: toolCalls,
|
|
282
|
+
});
|
|
283
|
+
|
|
284
|
+
for (const toolCall of toolCalls) {
|
|
285
|
+
const resultContent = await executeToolCall(
|
|
286
|
+
toolCall,
|
|
287
|
+
registry,
|
|
288
|
+
executor,
|
|
289
|
+
config,
|
|
290
|
+
authResult.userId,
|
|
291
|
+
c.req.raw,
|
|
292
|
+
);
|
|
293
|
+
currentActions = resultContent.actions;
|
|
294
|
+
|
|
295
|
+
llmMessages.push({
|
|
296
|
+
role: "tool",
|
|
297
|
+
tool_call_id: toolCall.id,
|
|
298
|
+
content: resultContent.text,
|
|
299
|
+
});
|
|
300
|
+
}
|
|
301
|
+
|
|
302
|
+
llmResponse = await callLlm(
|
|
303
|
+
baseUrl,
|
|
304
|
+
config.apiKey!,
|
|
305
|
+
model,
|
|
306
|
+
llmMessages,
|
|
307
|
+
tools,
|
|
308
|
+
);
|
|
309
|
+
}
|
|
310
|
+
|
|
311
|
+
// Final response — stream it
|
|
312
|
+
const hadToolCalls = llmMessages.length > messages.length + 1;
|
|
313
|
+
|
|
314
|
+
if (hadToolCalls) {
|
|
315
|
+
// Tool loop done — stream the LLM's final explanation
|
|
316
|
+
return streamSSE(c, async (stream) => {
|
|
317
|
+
try {
|
|
318
|
+
for await (const chunk of streamLlmCall(
|
|
319
|
+
baseUrl,
|
|
320
|
+
config.apiKey!,
|
|
321
|
+
model,
|
|
322
|
+
llmMessages,
|
|
323
|
+
tools,
|
|
324
|
+
)) {
|
|
325
|
+
await stream.writeSSE({ event: "text", data: chunk });
|
|
326
|
+
}
|
|
327
|
+
} catch {
|
|
328
|
+
await stream.writeSSE({
|
|
329
|
+
event: "text",
|
|
330
|
+
data: "Something went wrong generating the response.",
|
|
331
|
+
});
|
|
332
|
+
}
|
|
333
|
+
if (currentActions && currentActions.length > 0) {
|
|
334
|
+
await stream.writeSSE({
|
|
335
|
+
event: "actions",
|
|
336
|
+
data: JSON.stringify(currentActions),
|
|
337
|
+
});
|
|
338
|
+
}
|
|
339
|
+
await stream.writeSSE({ event: "done", data: "" });
|
|
340
|
+
});
|
|
341
|
+
}
|
|
342
|
+
|
|
343
|
+
// No tool calls — stream the response we already have, or re-request streaming
|
|
344
|
+
const existingText = llmResponse.choices[0]?.message.content;
|
|
345
|
+
return streamSSE(c, async (stream) => {
|
|
346
|
+
if (existingText) {
|
|
347
|
+
await stream.writeSSE({ event: "text", data: existingText });
|
|
348
|
+
}
|
|
349
|
+
await stream.writeSSE({ event: "done", data: "" });
|
|
350
|
+
});
|
|
351
|
+
});
|
|
352
|
+
|
|
353
|
+
return app;
|
|
354
|
+
}
|
|
355
|
+
|
|
356
|
+
// --- Helpers ---
|
|
357
|
+
|
|
358
|
+
async function executeToolCall(
|
|
359
|
+
toolCall: OaiToolCall,
|
|
360
|
+
registry: Registry,
|
|
361
|
+
executor: Executor,
|
|
362
|
+
config: ChatRoutesConfig,
|
|
363
|
+
userId: string,
|
|
364
|
+
request: Request,
|
|
365
|
+
): Promise<{ text: string; actions: ChatMessage["actions"] }> {
|
|
366
|
+
try {
|
|
367
|
+
const variables = JSON.parse(toolCall.function.arguments) as Record<
|
|
368
|
+
string,
|
|
369
|
+
unknown
|
|
370
|
+
>;
|
|
371
|
+
|
|
372
|
+
const { data, brief } = await executeOperation({
|
|
373
|
+
registry,
|
|
374
|
+
operationId: toolCall.function.name,
|
|
375
|
+
variables,
|
|
376
|
+
executor,
|
|
377
|
+
context: { userId, request },
|
|
378
|
+
});
|
|
379
|
+
|
|
380
|
+
if (config.enrichData) {
|
|
381
|
+
await config.enrichData(data, {
|
|
382
|
+
operationId: toolCall.function.name,
|
|
383
|
+
variables,
|
|
384
|
+
userId,
|
|
385
|
+
});
|
|
386
|
+
}
|
|
387
|
+
|
|
388
|
+
const rendered = renderForLlm(brief);
|
|
389
|
+
return {
|
|
390
|
+
text: `Data: ${JSON.stringify(data)}\n\n${rendered.text}`,
|
|
391
|
+
actions: rendered.actions.map((a) => ({
|
|
392
|
+
operation: a.operation,
|
|
393
|
+
label: a.label,
|
|
394
|
+
variables: a.variables,
|
|
395
|
+
})),
|
|
396
|
+
};
|
|
397
|
+
} catch (err: unknown) {
|
|
398
|
+
const error = err as { message?: string };
|
|
399
|
+
return {
|
|
400
|
+
text: `Error: ${error?.message ?? "Unknown error"}`,
|
|
401
|
+
actions: [],
|
|
402
|
+
};
|
|
403
|
+
}
|
|
404
|
+
}
|
|
405
|
+
|
|
406
|
+
async function callLlm(
|
|
407
|
+
baseUrl: string,
|
|
408
|
+
apiKey: string,
|
|
409
|
+
model: string,
|
|
410
|
+
messages: OaiMessage[],
|
|
411
|
+
tools: OaiTool[],
|
|
412
|
+
): Promise<OaiResponse> {
|
|
413
|
+
const resp = await fetch(`${baseUrl}/chat/completions`, {
|
|
414
|
+
method: "POST",
|
|
415
|
+
headers: {
|
|
416
|
+
"Content-Type": "application/json",
|
|
417
|
+
Authorization: `Bearer ${apiKey}`,
|
|
418
|
+
},
|
|
419
|
+
body: JSON.stringify({ model, messages, tools, max_tokens: 4096 }),
|
|
420
|
+
});
|
|
421
|
+
|
|
422
|
+
if (!resp.ok) {
|
|
423
|
+
const text = await resp.text();
|
|
424
|
+
throw new Error(`LLM API error (${resp.status}): ${text}`);
|
|
425
|
+
}
|
|
426
|
+
|
|
427
|
+
return resp.json();
|
|
428
|
+
}
|
|
429
|
+
|
|
430
|
+
async function* streamLlmCall(
|
|
431
|
+
baseUrl: string,
|
|
432
|
+
apiKey: string,
|
|
433
|
+
model: string,
|
|
434
|
+
messages: OaiMessage[],
|
|
435
|
+
tools: OaiTool[],
|
|
436
|
+
): AsyncGenerator<string> {
|
|
437
|
+
const resp = await fetch(`${baseUrl}/chat/completions`, {
|
|
438
|
+
method: "POST",
|
|
439
|
+
headers: {
|
|
440
|
+
"Content-Type": "application/json",
|
|
441
|
+
Authorization: `Bearer ${apiKey}`,
|
|
442
|
+
},
|
|
443
|
+
body: JSON.stringify({
|
|
444
|
+
model,
|
|
445
|
+
messages,
|
|
446
|
+
tools,
|
|
447
|
+
max_tokens: 4096,
|
|
448
|
+
stream: true,
|
|
449
|
+
}),
|
|
450
|
+
});
|
|
451
|
+
|
|
452
|
+
if (!resp.ok) {
|
|
453
|
+
const text = await resp.text();
|
|
454
|
+
throw new Error(`LLM API error (${resp.status}): ${text}`);
|
|
455
|
+
}
|
|
456
|
+
|
|
457
|
+
const reader = resp.body!.getReader();
|
|
458
|
+
const decoder = new TextDecoder();
|
|
459
|
+
let buffer = "";
|
|
460
|
+
|
|
461
|
+
while (true) {
|
|
462
|
+
const { done, value } = await reader.read();
|
|
463
|
+
if (done) break;
|
|
464
|
+
|
|
465
|
+
buffer += decoder.decode(value, { stream: true });
|
|
466
|
+
const lines = buffer.split("\n");
|
|
467
|
+
buffer = lines.pop() ?? "";
|
|
468
|
+
|
|
469
|
+
for (const line of lines) {
|
|
470
|
+
const trimmed = line.trim();
|
|
471
|
+
if (!trimmed.startsWith("data: ")) continue;
|
|
472
|
+
const data = trimmed.slice(6);
|
|
473
|
+
if (data === "[DONE]") return;
|
|
474
|
+
|
|
475
|
+
try {
|
|
476
|
+
const chunk: OaiStreamChunk = JSON.parse(data);
|
|
477
|
+
const content = chunk.choices[0]?.delta?.content;
|
|
478
|
+
if (content) yield content;
|
|
479
|
+
} catch {
|
|
480
|
+
// skip malformed chunks
|
|
481
|
+
}
|
|
482
|
+
}
|
|
483
|
+
}
|
|
484
|
+
}
|
package/src/gpt.ts
ADDED
|
@@ -0,0 +1,252 @@
|
|
|
1
|
+
import type { Executor, Registry } from "@donghanh/core";
|
|
2
|
+
import { executeOperation } from "@donghanh/core";
|
|
3
|
+
import { Hono } from "hono";
|
|
4
|
+
import type { Authenticate } from "./middleware";
|
|
5
|
+
import { isAuthError } from "./middleware";
|
|
6
|
+
import { renderForChatGpt } from "./renderers/chatgpt";
|
|
7
|
+
|
|
8
|
+
export interface GptRoutesConfig {
|
|
9
|
+
registry: Registry;
|
|
10
|
+
executor: Executor;
|
|
11
|
+
authenticate: Authenticate;
|
|
12
|
+
encodeVariables?: (vars: Record<string, unknown>) => string;
|
|
13
|
+
responseSchemas?: Record<string, object>;
|
|
14
|
+
/** Hook to enrich the JSON response (e.g. inject display text) */
|
|
15
|
+
enrichResponse?: (
|
|
16
|
+
result: Record<string, unknown>,
|
|
17
|
+
opts: {
|
|
18
|
+
operationId: string;
|
|
19
|
+
data: unknown;
|
|
20
|
+
variables: Record<string, unknown>;
|
|
21
|
+
userId: string;
|
|
22
|
+
request: Request;
|
|
23
|
+
},
|
|
24
|
+
) => Promise<void>;
|
|
25
|
+
}
|
|
26
|
+
|
|
27
|
+
function jsonError(message: string, status = 400): Response {
|
|
28
|
+
return new Response(JSON.stringify({ errors: [{ message }] }), {
|
|
29
|
+
status,
|
|
30
|
+
headers: { "content-type": "application/json" },
|
|
31
|
+
});
|
|
32
|
+
}
|
|
33
|
+
|
|
34
|
+
export function gptRoutes(config: GptRoutesConfig): Hono {
|
|
35
|
+
const {
|
|
36
|
+
registry,
|
|
37
|
+
executor,
|
|
38
|
+
authenticate,
|
|
39
|
+
encodeVariables,
|
|
40
|
+
responseSchemas,
|
|
41
|
+
enrichResponse,
|
|
42
|
+
} = config;
|
|
43
|
+
|
|
44
|
+
const app = new Hono();
|
|
45
|
+
|
|
46
|
+
// List operations
|
|
47
|
+
app.get("/operations", (c) => {
|
|
48
|
+
const url = new URL(c.req.url);
|
|
49
|
+
const search = url.searchParams.get("search")?.toLowerCase();
|
|
50
|
+
const typeFilter = url.searchParams.get("type");
|
|
51
|
+
|
|
52
|
+
let operations = registry.list();
|
|
53
|
+
|
|
54
|
+
if (typeFilter || search) {
|
|
55
|
+
operations = operations.filter((op) => {
|
|
56
|
+
if (typeFilter && op.type !== typeFilter) return false;
|
|
57
|
+
if (search) {
|
|
58
|
+
return (
|
|
59
|
+
op.id.includes(search) ||
|
|
60
|
+
op.description.toLowerCase().includes(search)
|
|
61
|
+
);
|
|
62
|
+
}
|
|
63
|
+
return true;
|
|
64
|
+
});
|
|
65
|
+
}
|
|
66
|
+
|
|
67
|
+
return c.json({ operations });
|
|
68
|
+
});
|
|
69
|
+
|
|
70
|
+
// Operation detail
|
|
71
|
+
app.get("/operations/:name", (c) => {
|
|
72
|
+
const name = c.req.param("name");
|
|
73
|
+
const detail = registry.detail(name);
|
|
74
|
+
|
|
75
|
+
if (!detail) {
|
|
76
|
+
return c.json(
|
|
77
|
+
{
|
|
78
|
+
error: `Unknown operation "${name}". Call GET /api/gpt/operations to list available operations.`,
|
|
79
|
+
},
|
|
80
|
+
404,
|
|
81
|
+
);
|
|
82
|
+
}
|
|
83
|
+
|
|
84
|
+
return c.json(detail);
|
|
85
|
+
});
|
|
86
|
+
|
|
87
|
+
// Execute query
|
|
88
|
+
app.get("/query/:operation", async (c) => {
|
|
89
|
+
const authResult = await authenticate(c.req.raw);
|
|
90
|
+
if (isAuthError(authResult)) return authResult.error;
|
|
91
|
+
|
|
92
|
+
const operationId = c.req.param("operation");
|
|
93
|
+
const op = registry.get(operationId);
|
|
94
|
+
|
|
95
|
+
if (!op) {
|
|
96
|
+
return jsonError(
|
|
97
|
+
`Unknown operation "${operationId}". Call GET /api/gpt/operations to discover available operations.`,
|
|
98
|
+
);
|
|
99
|
+
}
|
|
100
|
+
if (op.operationConfig.type !== "query") {
|
|
101
|
+
return jsonError(
|
|
102
|
+
`"${operationId}" is a mutation. Use POST /api/gpt/mutate/${operationId} instead.`,
|
|
103
|
+
);
|
|
104
|
+
}
|
|
105
|
+
|
|
106
|
+
return handleExecution(c.req.raw, authResult.userId, operationId, () => {
|
|
107
|
+
const url = new URL(c.req.url);
|
|
108
|
+
return url.searchParams.get("$variables") ?? undefined;
|
|
109
|
+
});
|
|
110
|
+
});
|
|
111
|
+
|
|
112
|
+
// Execute mutation
|
|
113
|
+
app.post("/mutate/:operation", async (c) => {
|
|
114
|
+
const authResult = await authenticate(c.req.raw);
|
|
115
|
+
if (isAuthError(authResult)) return authResult.error;
|
|
116
|
+
|
|
117
|
+
const operationId = c.req.param("operation");
|
|
118
|
+
const op = registry.get(operationId);
|
|
119
|
+
|
|
120
|
+
if (!op) {
|
|
121
|
+
return jsonError(
|
|
122
|
+
`Unknown operation "${operationId}". Call GET /api/gpt/operations to discover available operations.`,
|
|
123
|
+
);
|
|
124
|
+
}
|
|
125
|
+
if (op.operationConfig.type !== "mutation") {
|
|
126
|
+
return jsonError(
|
|
127
|
+
`"${operationId}" is a query. Use GET /api/gpt/query/${operationId} instead.`,
|
|
128
|
+
);
|
|
129
|
+
}
|
|
130
|
+
|
|
131
|
+
return handleExecution(
|
|
132
|
+
c.req.raw,
|
|
133
|
+
authResult.userId,
|
|
134
|
+
operationId,
|
|
135
|
+
async () => {
|
|
136
|
+
const body = await c.req.json();
|
|
137
|
+
return body.$variables;
|
|
138
|
+
},
|
|
139
|
+
);
|
|
140
|
+
});
|
|
141
|
+
|
|
142
|
+
async function handleExecution(
|
|
143
|
+
request: Request,
|
|
144
|
+
userId: string,
|
|
145
|
+
operationId: string,
|
|
146
|
+
getVariables: () => string | undefined | Promise<string | undefined>,
|
|
147
|
+
): Promise<Response> {
|
|
148
|
+
let rawVariables: string | undefined;
|
|
149
|
+
try {
|
|
150
|
+
rawVariables = await getVariables();
|
|
151
|
+
} catch {
|
|
152
|
+
return jsonError("Invalid request body");
|
|
153
|
+
}
|
|
154
|
+
|
|
155
|
+
let variables: Record<string, unknown> = {};
|
|
156
|
+
if (typeof rawVariables === "string" && encodeVariables) {
|
|
157
|
+
// Decode using the inverse - we expect TOON format
|
|
158
|
+
// The app provides its own decode in the executor or via a decode option
|
|
159
|
+
// For now, pass raw through and let the executor handle it
|
|
160
|
+
}
|
|
161
|
+
|
|
162
|
+
// We need a decode function. Let's accept raw variables as TOON or JSON.
|
|
163
|
+
if (typeof rawVariables === "string") {
|
|
164
|
+
try {
|
|
165
|
+
// Try JSON first
|
|
166
|
+
variables = JSON.parse(rawVariables);
|
|
167
|
+
} catch {
|
|
168
|
+
// Pass as-is, let the app's decode handle it
|
|
169
|
+
// Store raw so executor can decode
|
|
170
|
+
variables = { $raw: rawVariables };
|
|
171
|
+
}
|
|
172
|
+
}
|
|
173
|
+
|
|
174
|
+
try {
|
|
175
|
+
const { data, brief } = await executeOperation({
|
|
176
|
+
registry,
|
|
177
|
+
operationId,
|
|
178
|
+
variables,
|
|
179
|
+
executor,
|
|
180
|
+
context: { userId, request },
|
|
181
|
+
});
|
|
182
|
+
|
|
183
|
+
// Render JSX tree through ChatGPT renderer
|
|
184
|
+
const rendered = renderForChatGpt(brief, {
|
|
185
|
+
encodeVariables,
|
|
186
|
+
getOperationInput: (id) => {
|
|
187
|
+
const detail = registry.detail(id);
|
|
188
|
+
return detail?.input;
|
|
189
|
+
},
|
|
190
|
+
});
|
|
191
|
+
|
|
192
|
+
// Build response
|
|
193
|
+
const result: Record<string, unknown> = {
|
|
194
|
+
data: {
|
|
195
|
+
[registry.get(operationId)!.operationConfig.responseKey]: data,
|
|
196
|
+
},
|
|
197
|
+
};
|
|
198
|
+
|
|
199
|
+
if (rendered.suggestedActions.length > 0) {
|
|
200
|
+
result.suggestedActions = rendered.suggestedActions;
|
|
201
|
+
}
|
|
202
|
+
if (rendered.nextSteps) {
|
|
203
|
+
result.nextSteps = rendered.nextSteps;
|
|
204
|
+
}
|
|
205
|
+
|
|
206
|
+
// Enrich response (inject display, etc)
|
|
207
|
+
if (enrichResponse) {
|
|
208
|
+
await enrichResponse(result, {
|
|
209
|
+
operationId,
|
|
210
|
+
data,
|
|
211
|
+
variables,
|
|
212
|
+
userId,
|
|
213
|
+
request,
|
|
214
|
+
});
|
|
215
|
+
}
|
|
216
|
+
|
|
217
|
+
// Include response schema for start
|
|
218
|
+
if (operationId === "start" && responseSchemas?.start) {
|
|
219
|
+
result.responseSchema = responseSchemas.start;
|
|
220
|
+
}
|
|
221
|
+
|
|
222
|
+
// Always include operations list
|
|
223
|
+
result.operations = registry.list();
|
|
224
|
+
|
|
225
|
+
return new Response(JSON.stringify(result), {
|
|
226
|
+
headers: { "content-type": "application/json" },
|
|
227
|
+
});
|
|
228
|
+
} catch (err: any) {
|
|
229
|
+
const message = err?.message ?? "Internal error";
|
|
230
|
+
|
|
231
|
+
const errorResult: Record<string, unknown> = {
|
|
232
|
+
errors: [{ message }],
|
|
233
|
+
};
|
|
234
|
+
|
|
235
|
+
// Include operation detail for self-correction
|
|
236
|
+
const detail = registry.detail(operationId);
|
|
237
|
+
if (detail) {
|
|
238
|
+
errorResult.operationDetail = {
|
|
239
|
+
instruction: detail.instruction,
|
|
240
|
+
input: detail.input,
|
|
241
|
+
};
|
|
242
|
+
}
|
|
243
|
+
|
|
244
|
+
return new Response(JSON.stringify(errorResult), {
|
|
245
|
+
status: 400,
|
|
246
|
+
headers: { "content-type": "application/json" },
|
|
247
|
+
});
|
|
248
|
+
}
|
|
249
|
+
}
|
|
250
|
+
|
|
251
|
+
return app;
|
|
252
|
+
}
|
package/src/index.ts
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
export type { ChatMessage, ChatRoutesConfig } from "./chat";
|
|
2
|
+
export { chatRoutes } from "./chat";
|
|
3
|
+
export type { GptRoutesConfig } from "./gpt";
|
|
4
|
+
export { gptRoutes } from "./gpt";
|
|
5
|
+
export type { AuthError, Authenticate, AuthResult } from "./middleware";
|
|
6
|
+
export { isAuthError } from "./middleware";
|
|
7
|
+
export type { ChatGptResult, SuggestedAction } from "./renderers/chatgpt";
|
|
8
|
+
export { renderForChatGpt } from "./renderers/chatgpt";
|
|
9
|
+
export type { LlmAction, LlmResult } from "./renderers/llm";
|
|
10
|
+
export { renderForLlm } from "./renderers/llm";
|
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
export interface AuthResult {
|
|
2
|
+
userId: string;
|
|
3
|
+
}
|
|
4
|
+
|
|
5
|
+
export interface AuthError {
|
|
6
|
+
error: Response;
|
|
7
|
+
}
|
|
8
|
+
|
|
9
|
+
export type Authenticate = (
|
|
10
|
+
request: Request,
|
|
11
|
+
) => Promise<AuthResult | AuthError>;
|
|
12
|
+
|
|
13
|
+
export function isAuthError(
|
|
14
|
+
result: AuthResult | AuthError,
|
|
15
|
+
): result is AuthError {
|
|
16
|
+
return "error" in result;
|
|
17
|
+
}
|
|
@@ -0,0 +1,127 @@
|
|
|
1
|
+
import type {
|
|
2
|
+
ActionNode,
|
|
3
|
+
BriefNode,
|
|
4
|
+
ChatNode,
|
|
5
|
+
ContextNode,
|
|
6
|
+
DisplayNode,
|
|
7
|
+
FormNode,
|
|
8
|
+
MessageNode,
|
|
9
|
+
Renderer,
|
|
10
|
+
} from "@donghanh/core";
|
|
11
|
+
import { renderNode } from "@donghanh/core";
|
|
12
|
+
|
|
13
|
+
export interface ChatGptResult {
|
|
14
|
+
nextSteps?: string;
|
|
15
|
+
suggestedActions: SuggestedAction[];
|
|
16
|
+
display?: string;
|
|
17
|
+
context?: unknown[];
|
|
18
|
+
}
|
|
19
|
+
|
|
20
|
+
export interface SuggestedAction {
|
|
21
|
+
id: string;
|
|
22
|
+
type: string;
|
|
23
|
+
description: string;
|
|
24
|
+
variables?: Record<string, unknown>;
|
|
25
|
+
$variables?: string;
|
|
26
|
+
input?: object;
|
|
27
|
+
}
|
|
28
|
+
|
|
29
|
+
interface Fragment {
|
|
30
|
+
kind: "message" | "action" | "display" | "context";
|
|
31
|
+
value: unknown;
|
|
32
|
+
}
|
|
33
|
+
|
|
34
|
+
const chatGptRenderer: Renderer<Fragment | Fragment[]> = {
|
|
35
|
+
brief(_node: BriefNode, children: (Fragment | Fragment[])[]): Fragment[] {
|
|
36
|
+
return children.flat();
|
|
37
|
+
},
|
|
38
|
+
|
|
39
|
+
message(node: MessageNode): Fragment {
|
|
40
|
+
return { kind: "message", value: node.content };
|
|
41
|
+
},
|
|
42
|
+
|
|
43
|
+
action(node: ActionNode): Fragment {
|
|
44
|
+
const action: SuggestedAction = {
|
|
45
|
+
id: node.operation,
|
|
46
|
+
type: "action",
|
|
47
|
+
description: node.label,
|
|
48
|
+
variables: node.variables,
|
|
49
|
+
};
|
|
50
|
+
return { kind: "action", value: action };
|
|
51
|
+
},
|
|
52
|
+
|
|
53
|
+
form(_node: FormNode): Fragment {
|
|
54
|
+
// Forms are exposed via operation detail, not in response
|
|
55
|
+
return { kind: "context", value: null };
|
|
56
|
+
},
|
|
57
|
+
|
|
58
|
+
display(node: DisplayNode): Fragment {
|
|
59
|
+
return { kind: "display", value: node.data };
|
|
60
|
+
},
|
|
61
|
+
|
|
62
|
+
context(node: ContextNode): Fragment {
|
|
63
|
+
return { kind: "context", value: node.value };
|
|
64
|
+
},
|
|
65
|
+
};
|
|
66
|
+
|
|
67
|
+
export function renderForChatGpt(
|
|
68
|
+
brief: ChatNode,
|
|
69
|
+
opts?: {
|
|
70
|
+
encodeVariables?: (vars: Record<string, unknown>) => string;
|
|
71
|
+
getOperationInput?: (id: string) => object | undefined;
|
|
72
|
+
},
|
|
73
|
+
): ChatGptResult {
|
|
74
|
+
const fragments = renderNode(brief, chatGptRenderer) as Fragment | Fragment[];
|
|
75
|
+
const flat = Array.isArray(fragments) ? fragments : [fragments];
|
|
76
|
+
|
|
77
|
+
const messages: string[] = [];
|
|
78
|
+
const actions: SuggestedAction[] = [];
|
|
79
|
+
const displays: unknown[] = [];
|
|
80
|
+
const contexts: unknown[] = [];
|
|
81
|
+
|
|
82
|
+
for (const f of flat) {
|
|
83
|
+
if (f.value == null) continue;
|
|
84
|
+
switch (f.kind) {
|
|
85
|
+
case "message":
|
|
86
|
+
messages.push(f.value as string);
|
|
87
|
+
break;
|
|
88
|
+
case "action": {
|
|
89
|
+
const action = f.value as SuggestedAction;
|
|
90
|
+
if (action.variables && opts?.encodeVariables) {
|
|
91
|
+
action.$variables = opts.encodeVariables(action.variables);
|
|
92
|
+
}
|
|
93
|
+
if (opts?.getOperationInput) {
|
|
94
|
+
action.input = opts.getOperationInput(action.id) ?? {};
|
|
95
|
+
}
|
|
96
|
+
actions.push(action);
|
|
97
|
+
break;
|
|
98
|
+
}
|
|
99
|
+
case "display":
|
|
100
|
+
displays.push(f.value);
|
|
101
|
+
break;
|
|
102
|
+
case "context":
|
|
103
|
+
contexts.push(f.value);
|
|
104
|
+
break;
|
|
105
|
+
}
|
|
106
|
+
}
|
|
107
|
+
|
|
108
|
+
const result: ChatGptResult = {
|
|
109
|
+
suggestedActions: actions,
|
|
110
|
+
};
|
|
111
|
+
|
|
112
|
+
if (messages.length > 0) {
|
|
113
|
+
result.nextSteps = messages.join(" ");
|
|
114
|
+
}
|
|
115
|
+
|
|
116
|
+
if (displays.length === 1) {
|
|
117
|
+
result.display = displays[0] as string;
|
|
118
|
+
} else if (displays.length > 1) {
|
|
119
|
+
result.display = displays as any;
|
|
120
|
+
}
|
|
121
|
+
|
|
122
|
+
if (contexts.length > 0) {
|
|
123
|
+
result.context = contexts;
|
|
124
|
+
}
|
|
125
|
+
|
|
126
|
+
return result;
|
|
127
|
+
}
|
|
@@ -0,0 +1,106 @@
|
|
|
1
|
+
import type {
|
|
2
|
+
ActionNode,
|
|
3
|
+
BriefNode,
|
|
4
|
+
ChatNode,
|
|
5
|
+
ContextNode,
|
|
6
|
+
DisplayNode,
|
|
7
|
+
FormNode,
|
|
8
|
+
MessageNode,
|
|
9
|
+
Renderer,
|
|
10
|
+
} from "@donghanh/core";
|
|
11
|
+
import { renderNode } from "@donghanh/core";
|
|
12
|
+
|
|
13
|
+
export interface LlmResult {
|
|
14
|
+
text: string;
|
|
15
|
+
actions: LlmAction[];
|
|
16
|
+
context?: unknown[];
|
|
17
|
+
}
|
|
18
|
+
|
|
19
|
+
export interface LlmAction {
|
|
20
|
+
operation: string;
|
|
21
|
+
label: string;
|
|
22
|
+
variables?: Record<string, unknown>;
|
|
23
|
+
}
|
|
24
|
+
|
|
25
|
+
interface Fragment {
|
|
26
|
+
kind: "message" | "action" | "display" | "context";
|
|
27
|
+
value: unknown;
|
|
28
|
+
}
|
|
29
|
+
|
|
30
|
+
const llmRenderer: Renderer<Fragment | Fragment[]> = {
|
|
31
|
+
brief(_node: BriefNode, children: (Fragment | Fragment[])[]): Fragment[] {
|
|
32
|
+
return children.flat();
|
|
33
|
+
},
|
|
34
|
+
|
|
35
|
+
message(node: MessageNode): Fragment {
|
|
36
|
+
return { kind: "message", value: node.content };
|
|
37
|
+
},
|
|
38
|
+
|
|
39
|
+
action(node: ActionNode): Fragment {
|
|
40
|
+
const action: LlmAction = {
|
|
41
|
+
operation: node.operation,
|
|
42
|
+
label: node.label,
|
|
43
|
+
variables: node.variables,
|
|
44
|
+
};
|
|
45
|
+
return { kind: "action", value: action };
|
|
46
|
+
},
|
|
47
|
+
|
|
48
|
+
form(_node: FormNode): Fragment {
|
|
49
|
+
return { kind: "context", value: null };
|
|
50
|
+
},
|
|
51
|
+
|
|
52
|
+
display(node: DisplayNode): Fragment {
|
|
53
|
+
return { kind: "display", value: node.data };
|
|
54
|
+
},
|
|
55
|
+
|
|
56
|
+
context(node: ContextNode): Fragment {
|
|
57
|
+
return { kind: "context", value: node.value };
|
|
58
|
+
},
|
|
59
|
+
};
|
|
60
|
+
|
|
61
|
+
export function renderForLlm(brief: ChatNode): LlmResult {
|
|
62
|
+
const fragments = renderNode(brief, llmRenderer) as Fragment | Fragment[];
|
|
63
|
+
const flat = Array.isArray(fragments) ? fragments : [fragments];
|
|
64
|
+
|
|
65
|
+
const messages: string[] = [];
|
|
66
|
+
const actions: LlmAction[] = [];
|
|
67
|
+
const displays: string[] = [];
|
|
68
|
+
const contexts: unknown[] = [];
|
|
69
|
+
|
|
70
|
+
for (const f of flat) {
|
|
71
|
+
if (f.value == null) continue;
|
|
72
|
+
switch (f.kind) {
|
|
73
|
+
case "message":
|
|
74
|
+
messages.push(f.value as string);
|
|
75
|
+
break;
|
|
76
|
+
case "action":
|
|
77
|
+
actions.push(f.value as LlmAction);
|
|
78
|
+
break;
|
|
79
|
+
case "display":
|
|
80
|
+
displays.push(
|
|
81
|
+
typeof f.value === "string" ? f.value : JSON.stringify(f.value),
|
|
82
|
+
);
|
|
83
|
+
break;
|
|
84
|
+
case "context":
|
|
85
|
+
contexts.push(f.value);
|
|
86
|
+
break;
|
|
87
|
+
}
|
|
88
|
+
}
|
|
89
|
+
|
|
90
|
+
const parts: string[] = [];
|
|
91
|
+
if (messages.length > 0) parts.push(messages.join("\n"));
|
|
92
|
+
if (displays.length > 0) parts.push(displays.join("\n"));
|
|
93
|
+
|
|
94
|
+
if (actions.length > 0) {
|
|
95
|
+
const actionList = actions
|
|
96
|
+
.map((a) => `- ${a.label} (${a.operation})`)
|
|
97
|
+
.join("\n");
|
|
98
|
+
parts.push(`Suggested actions:\n${actionList}`);
|
|
99
|
+
}
|
|
100
|
+
|
|
101
|
+
return {
|
|
102
|
+
text: parts.join("\n\n"),
|
|
103
|
+
actions,
|
|
104
|
+
context: contexts.length > 0 ? contexts : undefined,
|
|
105
|
+
};
|
|
106
|
+
}
|
package/tsconfig.json
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
{
|
|
2
|
+
"compilerOptions": {
|
|
3
|
+
"target": "ES2017",
|
|
4
|
+
"module": "esnext",
|
|
5
|
+
"moduleResolution": "bundler",
|
|
6
|
+
"strict": true,
|
|
7
|
+
"noEmit": true,
|
|
8
|
+
"esModuleInterop": true,
|
|
9
|
+
"isolatedModules": true,
|
|
10
|
+
"jsx": "react-jsx",
|
|
11
|
+
"jsxImportSource": "@donghanh/core",
|
|
12
|
+
"skipLibCheck": true
|
|
13
|
+
},
|
|
14
|
+
"include": ["src"]
|
|
15
|
+
}
|