@promptbook/cli 0.103.0-54 → 0.103.0-56
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/apps/agents-server/config.ts +0 -2
- package/apps/agents-server/package-lock.json +1163 -0
- package/apps/agents-server/package.json +6 -0
- package/apps/agents-server/src/app/admin/chat-feedback/ChatFeedbackClient.tsx +79 -6
- package/apps/agents-server/src/app/admin/chat-history/ChatHistoryClient.tsx +171 -69
- package/apps/agents-server/src/app/agents/[agentName]/AgentChatWrapper.tsx +3 -1
- package/apps/agents-server/src/app/agents/[agentName]/AgentOptionsMenu.tsx +216 -0
- package/apps/agents-server/src/app/agents/[agentName]/AgentProfileChat.tsx +78 -0
- package/apps/agents-server/src/app/agents/[agentName]/AgentProfileView.tsx +233 -0
- package/apps/agents-server/src/app/agents/[agentName]/CloneAgentButton.tsx +4 -4
- package/apps/agents-server/src/app/agents/[agentName]/InstallPwaButton.tsx +2 -2
- package/apps/agents-server/src/app/agents/[agentName]/QrCodeModal.tsx +90 -0
- package/apps/agents-server/src/app/agents/[agentName]/agentLinks.tsx +80 -0
- package/apps/agents-server/src/app/agents/[agentName]/api/book/route.ts +3 -1
- package/apps/agents-server/src/app/agents/[agentName]/api/chat/route.ts +11 -1
- package/apps/agents-server/src/app/agents/[agentName]/api/mcp/route.ts +203 -0
- package/apps/agents-server/src/app/agents/[agentName]/api/modelRequirements/route.ts +3 -1
- package/apps/agents-server/src/app/agents/[agentName]/api/modelRequirements/systemMessage/route.ts +3 -1
- package/apps/agents-server/src/app/agents/[agentName]/api/openai/chat/completions/route.ts +3 -169
- package/apps/agents-server/src/app/agents/[agentName]/api/openai/models/route.ts +93 -0
- package/apps/agents-server/src/app/agents/[agentName]/api/openai/v1/chat/completions/route.ts +10 -0
- package/apps/agents-server/src/app/agents/[agentName]/api/openai/v1/models/route.ts +93 -0
- package/apps/agents-server/src/app/agents/[agentName]/api/openrouter/chat/completions/route.ts +10 -0
- package/apps/agents-server/src/app/agents/[agentName]/api/voice/route.ts +4 -0
- package/apps/agents-server/src/app/agents/[agentName]/chat/page.tsx +9 -2
- package/apps/agents-server/src/app/agents/[agentName]/integration/SdkCodeTabs.tsx +31 -0
- package/apps/agents-server/src/app/agents/[agentName]/integration/page.tsx +271 -30
- package/apps/agents-server/src/app/agents/[agentName]/links/page.tsx +182 -0
- package/apps/agents-server/src/app/agents/[agentName]/page.tsx +108 -165
- package/apps/agents-server/src/app/agents/[agentName]/website-integration/page.tsx +61 -0
- package/apps/agents-server/src/app/api/auth/change-password/route.ts +75 -0
- package/apps/agents-server/src/app/api/chat-feedback/export/route.ts +55 -0
- package/apps/agents-server/src/app/api/chat-history/export/route.ts +55 -0
- package/apps/agents-server/src/app/api/openai/v1/chat/completions/route.ts +6 -0
- package/apps/agents-server/src/app/api/openai/v1/models/route.ts +65 -0
- package/apps/agents-server/src/app/docs/[docId]/page.tsx +12 -32
- package/apps/agents-server/src/app/docs/page.tsx +42 -17
- package/apps/agents-server/src/app/globals.css +129 -0
- package/apps/agents-server/src/app/layout.tsx +8 -2
- package/apps/agents-server/src/app/manifest.ts +1 -1
- package/apps/agents-server/src/components/ChangePasswordDialog/ChangePasswordDialog.tsx +41 -0
- package/apps/agents-server/src/components/ChangePasswordForm/ChangePasswordForm.tsx +159 -0
- package/apps/agents-server/src/components/DocumentationContent/DocumentationContent.tsx +87 -0
- package/apps/agents-server/src/components/Header/Header.tsx +94 -38
- package/apps/agents-server/src/components/OpenMojiIcon/OpenMojiIcon.tsx +20 -0
- package/apps/agents-server/src/components/PrintButton/PrintButton.tsx +18 -0
- package/apps/agents-server/src/components/PrintHeader/PrintHeader.tsx +18 -0
- package/apps/agents-server/src/database/migrations/2025-12-0070-chat-history-source.sql +2 -0
- package/apps/agents-server/src/database/schema.ts +6 -0
- package/apps/agents-server/src/middleware.ts +1 -1
- package/apps/agents-server/src/utils/convertToCsv.ts +31 -0
- package/apps/agents-server/src/utils/handleChatCompletion.ts +355 -0
- package/apps/agents-server/src/utils/resolveInheritedAgentSource.ts +100 -0
- package/apps/agents-server/src/utils/validateApiKey.ts +128 -0
- package/apps/agents-server/tailwind.config.ts +1 -1
- package/esm/index.es.js +1188 -175
- package/esm/index.es.js.map +1 -1
- package/esm/typings/src/book-2.0/agent-source/AgentModelRequirements.d.ts +4 -0
- package/esm/typings/src/book-components/Chat/LlmChat/LlmChatProps.d.ts +5 -0
- package/esm/typings/src/commitments/CLOSED/CLOSED.d.ts +35 -0
- package/esm/typings/src/commitments/COMPONENT/COMPONENT.d.ts +28 -0
- package/esm/typings/src/commitments/FROM/FROM.d.ts +34 -0
- package/esm/typings/src/commitments/LANGUAGE/LANGUAGE.d.ts +35 -0
- package/esm/typings/src/commitments/META_COLOR/META_COLOR.d.ts +6 -0
- package/esm/typings/src/commitments/META_FONT/META_FONT.d.ts +42 -0
- package/esm/typings/src/commitments/OPEN/OPEN.d.ts +35 -0
- package/esm/typings/src/commitments/USE/USE.d.ts +53 -0
- package/esm/typings/src/commitments/USE_BROWSER/USE_BROWSER.d.ts +38 -0
- package/esm/typings/src/commitments/USE_BROWSER/USE_BROWSER.test.d.ts +1 -0
- package/esm/typings/src/commitments/USE_MCP/USE_MCP.d.ts +37 -0
- package/esm/typings/src/commitments/USE_SEARCH_ENGINE/USE_SEARCH_ENGINE.d.ts +38 -0
- package/esm/typings/src/commitments/index.d.ts +12 -1
- package/esm/typings/src/playground/playground.d.ts +3 -0
- package/esm/typings/src/utils/color/Color.d.ts +8 -0
- package/esm/typings/src/utils/color/css-colors.d.ts +1 -0
- package/esm/typings/src/version.d.ts +1 -1
- package/package.json +2 -2
- package/umd/index.umd.js +1180 -167
- package/umd/index.umd.js.map +1 -1
- package/esm/typings/src/playground/playground1.d.ts +0 -2
|
@@ -0,0 +1,80 @@
|
|
|
1
|
+
import {
|
|
2
|
+
BookOpenIcon,
|
|
3
|
+
CodeIcon,
|
|
4
|
+
GlobeIcon,
|
|
5
|
+
HistoryIcon,
|
|
6
|
+
LinkIcon,
|
|
7
|
+
MessageSquareIcon,
|
|
8
|
+
NotebookPenIcon,
|
|
9
|
+
} from 'lucide-react';
|
|
10
|
+
import type { ComponentType } from 'react';
|
|
11
|
+
|
|
12
|
+
type AgentLink = {
|
|
13
|
+
title: string;
|
|
14
|
+
href: string;
|
|
15
|
+
icon: ComponentType<{ className?: string }>;
|
|
16
|
+
description?: string;
|
|
17
|
+
target?: '_blank' | '_self';
|
|
18
|
+
rel?: string;
|
|
19
|
+
};
|
|
20
|
+
|
|
21
|
+
export const getAgentLinks = (agentName: string): AgentLink[] => {
|
|
22
|
+
const encodedName = encodeURIComponent(agentName);
|
|
23
|
+
return [
|
|
24
|
+
{
|
|
25
|
+
title: 'Chat with Agent',
|
|
26
|
+
href: `/agents/${encodedName}`,
|
|
27
|
+
icon: MessageSquareIcon,
|
|
28
|
+
description: 'Direct interface to converse with the agent.',
|
|
29
|
+
},
|
|
30
|
+
{
|
|
31
|
+
title: 'Edit Book',
|
|
32
|
+
href: `/agents/${encodedName}/book`,
|
|
33
|
+
icon: NotebookPenIcon,
|
|
34
|
+
description: "Edit the agent's knowledge book.",
|
|
35
|
+
},
|
|
36
|
+
{
|
|
37
|
+
title: 'Integration',
|
|
38
|
+
href: `/agents/${encodedName}/integration`,
|
|
39
|
+
icon: CodeIcon,
|
|
40
|
+
description: 'Learn how to integrate this agent into your applications.',
|
|
41
|
+
},
|
|
42
|
+
{
|
|
43
|
+
title: 'History & Feedback',
|
|
44
|
+
href: `/agents/${encodedName}/history`,
|
|
45
|
+
icon: HistoryIcon,
|
|
46
|
+
description: 'View past conversations and provide feedback.',
|
|
47
|
+
},
|
|
48
|
+
{
|
|
49
|
+
title: 'All Links',
|
|
50
|
+
href: `/agents/${encodedName}/links`,
|
|
51
|
+
icon: LinkIcon,
|
|
52
|
+
description: 'Signpost & Links',
|
|
53
|
+
},
|
|
54
|
+
{
|
|
55
|
+
title: 'Website Integration',
|
|
56
|
+
href: `/agents/${encodedName}/website-integration`,
|
|
57
|
+
icon: GlobeIcon,
|
|
58
|
+
description: 'Embed the agent chat widget directly into your React application.',
|
|
59
|
+
},
|
|
60
|
+
];
|
|
61
|
+
};
|
|
62
|
+
|
|
63
|
+
export const getAgentExternalLinks = (): AgentLink[] => [
|
|
64
|
+
{
|
|
65
|
+
title: 'Promptbook Studio',
|
|
66
|
+
href: 'https://promptbook.studio',
|
|
67
|
+
icon: BookOpenIcon,
|
|
68
|
+
description: 'Create and manage your own agents',
|
|
69
|
+
target: '_blank',
|
|
70
|
+
rel: 'noopener noreferrer',
|
|
71
|
+
},
|
|
72
|
+
{
|
|
73
|
+
title: 'GitHub Repository',
|
|
74
|
+
href: 'https://github.com/webgptorg/promptbook',
|
|
75
|
+
icon: CodeIcon,
|
|
76
|
+
description: 'Star us and contribute to the project',
|
|
77
|
+
target: '_blank',
|
|
78
|
+
rel: 'noopener noreferrer',
|
|
79
|
+
},
|
|
80
|
+
];
|
|
@@ -1,4 +1,5 @@
|
|
|
1
1
|
import { $provideAgentCollectionForServer } from '@/src/tools/$provideAgentCollectionForServer';
|
|
2
|
+
import { resolveInheritedAgentSource } from '@/src/utils/resolveInheritedAgentSource';
|
|
2
3
|
import { padBook, validateBook } from '@promptbook-local/core';
|
|
3
4
|
import { serializeError } from '@promptbook-local/utils';
|
|
4
5
|
import spaceTrim from 'spacetrim';
|
|
@@ -13,8 +14,9 @@ export async function GET(request: Request, { params }: { params: Promise<{ agen
|
|
|
13
14
|
try {
|
|
14
15
|
const collection = await $provideAgentCollectionForServer();
|
|
15
16
|
const agentSource = await collection.getAgentSource(agentName);
|
|
17
|
+
const effectiveAgentSource = await resolveInheritedAgentSource(agentSource, collection);
|
|
16
18
|
|
|
17
|
-
return new Response(
|
|
19
|
+
return new Response(effectiveAgentSource, {
|
|
18
20
|
status: 200,
|
|
19
21
|
headers: { 'Content-Type': 'text/plain' /* <- TODO: [🎳] Mime type of book */ },
|
|
20
22
|
});
|
|
@@ -77,11 +77,15 @@ export async function POST(request: Request, { params }: { params: Promise<{ age
|
|
|
77
77
|
userAgent,
|
|
78
78
|
language,
|
|
79
79
|
platform,
|
|
80
|
+
source: 'AGENT_PAGE_CHAT',
|
|
81
|
+
apiKey: null,
|
|
80
82
|
});
|
|
81
83
|
|
|
82
84
|
const encoder = new TextEncoder();
|
|
83
85
|
const readableStream = new ReadableStream({
|
|
84
86
|
start(controller) {
|
|
87
|
+
let previousContent = '';
|
|
88
|
+
|
|
85
89
|
agent.callChatModelStream!(
|
|
86
90
|
{
|
|
87
91
|
title: `Chat with agent ${
|
|
@@ -95,7 +99,11 @@ export async function POST(request: Request, { params }: { params: Promise<{ age
|
|
|
95
99
|
thread,
|
|
96
100
|
},
|
|
97
101
|
(chunk) => {
|
|
98
|
-
|
|
102
|
+
const fullContent = chunk.content;
|
|
103
|
+
const deltaContent = fullContent.substring(previousContent.length);
|
|
104
|
+
previousContent = fullContent;
|
|
105
|
+
|
|
106
|
+
controller.enqueue(encoder.encode(deltaContent));
|
|
99
107
|
},
|
|
100
108
|
)
|
|
101
109
|
.then(async (response) => {
|
|
@@ -119,6 +127,8 @@ export async function POST(request: Request, { params }: { params: Promise<{ age
|
|
|
119
127
|
userAgent,
|
|
120
128
|
language,
|
|
121
129
|
platform,
|
|
130
|
+
source: 'AGENT_PAGE_CHAT',
|
|
131
|
+
apiKey: null,
|
|
122
132
|
});
|
|
123
133
|
|
|
124
134
|
// Note: [🐱🚀] Save the learned data
|
|
@@ -0,0 +1,203 @@
|
|
|
1
|
+
import { $provideAgentCollectionForServer } from '@/src/tools/$provideAgentCollectionForServer';
|
|
2
|
+
import { $provideExecutionToolsForServer } from '@/src/tools/$provideExecutionToolsForServer';
|
|
3
|
+
import { McpServer } from '@modelcontextprotocol/sdk/server/mcp.js';
|
|
4
|
+
import { Transport } from '@modelcontextprotocol/sdk/shared/transport.js';
|
|
5
|
+
import { JSONRPCMessage } from '@modelcontextprotocol/sdk/types.js';
|
|
6
|
+
import { Agent } from '@promptbook-local/core';
|
|
7
|
+
import { ChatMessage, ChatPromptResult, Prompt, TODO_any } from '@promptbook-local/types';
|
|
8
|
+
import { NextRequest, NextResponse } from 'next/server';
|
|
9
|
+
import { z } from 'zod';
|
|
10
|
+
|
|
11
|
+
// Global map to store active transports
|
|
12
|
+
// Note: This works in stateful environments or single-instance deployments.
|
|
13
|
+
// In serverless with multiple instances, this will fail if POST lands on a different instance.
|
|
14
|
+
// However, for standard deployments or sticky sessions, it works.
|
|
15
|
+
const sessions = new Map<string, SSENextJsTransport>();
|
|
16
|
+
|
|
17
|
+
class SSENextJsTransport implements Transport {
|
|
18
|
+
public onmessage?: (message: JSONRPCMessage) => void;
|
|
19
|
+
public onclose?: () => void;
|
|
20
|
+
public onError?: (error: Error) => void;
|
|
21
|
+
private controller: ReadableStreamDefaultController<TODO_any>;
|
|
22
|
+
private encoder = new TextEncoder();
|
|
23
|
+
public sessionId: string;
|
|
24
|
+
|
|
25
|
+
constructor(controller: ReadableStreamDefaultController<TODO_any>, sessionId: string) {
|
|
26
|
+
this.controller = controller;
|
|
27
|
+
this.sessionId = sessionId;
|
|
28
|
+
}
|
|
29
|
+
|
|
30
|
+
async start(): Promise<void> {
|
|
31
|
+
// No-op for SSE
|
|
32
|
+
}
|
|
33
|
+
|
|
34
|
+
async close(): Promise<void> {
|
|
35
|
+
this.onclose?.();
|
|
36
|
+
sessions.delete(this.sessionId);
|
|
37
|
+
}
|
|
38
|
+
|
|
39
|
+
async send(message: JSONRPCMessage): Promise<void> {
|
|
40
|
+
const event = `event: message\ndata: ${JSON.stringify(message)}\n\n`;
|
|
41
|
+
this.controller.enqueue(this.encoder.encode(event));
|
|
42
|
+
}
|
|
43
|
+
|
|
44
|
+
async handlePostMessage(message: JSONRPCMessage): Promise<void> {
|
|
45
|
+
this.onmessage?.(message);
|
|
46
|
+
}
|
|
47
|
+
|
|
48
|
+
sendEndpointEvent(endpoint: string) {
|
|
49
|
+
const event = `event: endpoint\ndata: ${endpoint}\n\n`;
|
|
50
|
+
this.controller.enqueue(this.encoder.encode(event));
|
|
51
|
+
}
|
|
52
|
+
}
|
|
53
|
+
|
|
54
|
+
export async function GET(
|
|
55
|
+
request: NextRequest,
|
|
56
|
+
{ params }: { params: Promise<{ agentName: string }> },
|
|
57
|
+
) {
|
|
58
|
+
const { agentName } = await params;
|
|
59
|
+
|
|
60
|
+
// Check if agent exists
|
|
61
|
+
try {
|
|
62
|
+
const collection = await $provideAgentCollectionForServer();
|
|
63
|
+
await collection.getAgentSource(agentName);
|
|
64
|
+
} catch (error) {
|
|
65
|
+
return NextResponse.json({ error: 'Agent not found' }, { status: 404 });
|
|
66
|
+
}
|
|
67
|
+
|
|
68
|
+
const sessionId = crypto.randomUUID();
|
|
69
|
+
|
|
70
|
+
const stream = new ReadableStream({
|
|
71
|
+
start: async (controller) => {
|
|
72
|
+
const transport = new SSENextJsTransport(controller, sessionId);
|
|
73
|
+
sessions.set(sessionId, transport);
|
|
74
|
+
|
|
75
|
+
// Send endpoint event
|
|
76
|
+
// Construct the POST endpoint URL
|
|
77
|
+
// We assume the client can construct it or we send relative/absolute path
|
|
78
|
+
// The user wants: /agents/[agentName]/api/mcp
|
|
79
|
+
// We can send specific query param
|
|
80
|
+
const endpoint = `/agents/${agentName}/api/mcp?sessionId=${sessionId}`;
|
|
81
|
+
transport.sendEndpointEvent(endpoint);
|
|
82
|
+
|
|
83
|
+
// Initialize MCP Server
|
|
84
|
+
const server = new McpServer({
|
|
85
|
+
name: `Agent ${agentName}`,
|
|
86
|
+
version: '1.0.0',
|
|
87
|
+
});
|
|
88
|
+
|
|
89
|
+
// Register Chat Tool
|
|
90
|
+
server.tool(
|
|
91
|
+
'chat',
|
|
92
|
+
{
|
|
93
|
+
messages: z.array(
|
|
94
|
+
z.object({
|
|
95
|
+
role: z.enum(['user', 'assistant', 'system']),
|
|
96
|
+
content: z.string(),
|
|
97
|
+
}),
|
|
98
|
+
),
|
|
99
|
+
model: z.string().optional(),
|
|
100
|
+
},
|
|
101
|
+
async ({ messages, model }) => {
|
|
102
|
+
try {
|
|
103
|
+
const collection = await $provideAgentCollectionForServer();
|
|
104
|
+
const agentSource = await collection.getAgentSource(agentName);
|
|
105
|
+
|
|
106
|
+
const executionTools = await $provideExecutionToolsForServer();
|
|
107
|
+
const agent = new Agent({
|
|
108
|
+
agentSource,
|
|
109
|
+
executionTools,
|
|
110
|
+
isVerbose: true,
|
|
111
|
+
});
|
|
112
|
+
|
|
113
|
+
// Prepare thread and content
|
|
114
|
+
const lastMessage = messages[messages.length - 1];
|
|
115
|
+
const previousMessages = messages.slice(0, -1);
|
|
116
|
+
|
|
117
|
+
const thread: ChatMessage[] = previousMessages.map((msg: TODO_any, index: number) => ({
|
|
118
|
+
id: `msg-${index}`,
|
|
119
|
+
from: msg.role === 'assistant' ? 'agent' : 'user', // Mapping standard roles
|
|
120
|
+
content: msg.content,
|
|
121
|
+
isComplete: true,
|
|
122
|
+
date: new Date(),
|
|
123
|
+
}));
|
|
124
|
+
|
|
125
|
+
const prompt: Prompt = {
|
|
126
|
+
title: 'MCP Chat Completion',
|
|
127
|
+
content: lastMessage.content,
|
|
128
|
+
modelRequirements: {
|
|
129
|
+
modelVariant: 'CHAT',
|
|
130
|
+
},
|
|
131
|
+
parameters: {},
|
|
132
|
+
thread,
|
|
133
|
+
} as Prompt;
|
|
134
|
+
|
|
135
|
+
const result = await agent.callChatModel(prompt);
|
|
136
|
+
|
|
137
|
+
return {
|
|
138
|
+
content: [
|
|
139
|
+
{
|
|
140
|
+
type: 'text',
|
|
141
|
+
text: result.content,
|
|
142
|
+
},
|
|
143
|
+
],
|
|
144
|
+
};
|
|
145
|
+
} catch (error) {
|
|
146
|
+
return {
|
|
147
|
+
content: [
|
|
148
|
+
{
|
|
149
|
+
type: 'text',
|
|
150
|
+
text: `Error: ${(error as Error).message}`,
|
|
151
|
+
},
|
|
152
|
+
],
|
|
153
|
+
isError: true,
|
|
154
|
+
};
|
|
155
|
+
}
|
|
156
|
+
},
|
|
157
|
+
);
|
|
158
|
+
|
|
159
|
+
await server.connect(transport);
|
|
160
|
+
|
|
161
|
+
// Handle connection close
|
|
162
|
+
// In ReadableStream, verify if there is a way to detect close from client side in Next.js?
|
|
163
|
+
// Usually if client disconnects, the stream might be cancelled.
|
|
164
|
+
// But we don't have a direct hook here unless we return logic in 'cancel'.
|
|
165
|
+
},
|
|
166
|
+
cancel() {
|
|
167
|
+
sessions.delete(sessionId);
|
|
168
|
+
},
|
|
169
|
+
});
|
|
170
|
+
|
|
171
|
+
return new Response(stream, {
|
|
172
|
+
headers: {
|
|
173
|
+
'Content-Type': 'text/event-stream',
|
|
174
|
+
'Cache-Control': 'no-cache',
|
|
175
|
+
Connection: 'keep-alive',
|
|
176
|
+
},
|
|
177
|
+
});
|
|
178
|
+
}
|
|
179
|
+
|
|
180
|
+
export async function POST(
|
|
181
|
+
request: NextRequest,
|
|
182
|
+
{ params }: { params: Promise<{ agentName: string }> },
|
|
183
|
+
) {
|
|
184
|
+
const { searchParams } = new URL(request.url);
|
|
185
|
+
const sessionId = searchParams.get('sessionId');
|
|
186
|
+
|
|
187
|
+
if (!sessionId) {
|
|
188
|
+
return NextResponse.json({ error: 'Session ID required' }, { status: 400 });
|
|
189
|
+
}
|
|
190
|
+
|
|
191
|
+
const transport = sessions.get(sessionId);
|
|
192
|
+
if (!transport) {
|
|
193
|
+
return NextResponse.json({ error: 'Session not found' }, { status: 404 });
|
|
194
|
+
}
|
|
195
|
+
|
|
196
|
+
try {
|
|
197
|
+
const body = await request.json();
|
|
198
|
+
await transport.handlePostMessage(body);
|
|
199
|
+
return NextResponse.json({ success: true }); // Accepted
|
|
200
|
+
} catch (error) {
|
|
201
|
+
return NextResponse.json({ error: 'Invalid request' }, { status: 400 });
|
|
202
|
+
}
|
|
203
|
+
}
|
|
@@ -1,4 +1,5 @@
|
|
|
1
1
|
import { $provideAgentCollectionForServer } from '@/src/tools/$provideAgentCollectionForServer';
|
|
2
|
+
import { resolveInheritedAgentSource } from '@/src/utils/resolveInheritedAgentSource';
|
|
2
3
|
import { createAgentModelRequirements } from '@promptbook-local/core';
|
|
3
4
|
import { serializeError } from '@promptbook-local/utils';
|
|
4
5
|
import { assertsError } from '../../../../../../../../src/errors/assertsError';
|
|
@@ -12,7 +13,8 @@ export async function GET(request: Request, { params }: { params: Promise<{ agen
|
|
|
12
13
|
try {
|
|
13
14
|
const collection = await $provideAgentCollectionForServer();
|
|
14
15
|
const agentSource = await collection.getAgentSource(agentName);
|
|
15
|
-
const
|
|
16
|
+
const effectiveAgentSource = await resolveInheritedAgentSource(agentSource, collection);
|
|
17
|
+
const modelRequirements = await createAgentModelRequirements(effectiveAgentSource);
|
|
16
18
|
|
|
17
19
|
return new Response(
|
|
18
20
|
JSON.stringify(
|
package/apps/agents-server/src/app/agents/[agentName]/api/modelRequirements/systemMessage/route.ts
CHANGED
|
@@ -1,4 +1,5 @@
|
|
|
1
1
|
import { $provideAgentCollectionForServer } from '@/src/tools/$provideAgentCollectionForServer';
|
|
2
|
+
import { resolveInheritedAgentSource } from '@/src/utils/resolveInheritedAgentSource';
|
|
2
3
|
import { createAgentModelRequirements } from '@promptbook-local/core';
|
|
3
4
|
import { serializeError } from '@promptbook-local/utils';
|
|
4
5
|
import { assertsError } from '../../../../../../../../../src/errors/assertsError';
|
|
@@ -12,7 +13,8 @@ export async function GET(request: Request, { params }: { params: Promise<{ agen
|
|
|
12
13
|
try {
|
|
13
14
|
const collection = await $provideAgentCollectionForServer();
|
|
14
15
|
const agentSource = await collection.getAgentSource(agentName);
|
|
15
|
-
const
|
|
16
|
+
const effectiveAgentSource = await resolveInheritedAgentSource(agentSource, collection);
|
|
17
|
+
const modelRequirements = await createAgentModelRequirements(effectiveAgentSource);
|
|
16
18
|
const { systemMessage } = modelRequirements;
|
|
17
19
|
|
|
18
20
|
return new Response(systemMessage, {
|
|
@@ -1,176 +1,10 @@
|
|
|
1
|
-
import {
|
|
2
|
-
import {
|
|
3
|
-
import { Agent } from '@promptbook-local/core';
|
|
4
|
-
import { ChatMessage, ChatPromptResult, Prompt, TODO_any } from '@promptbook-local/types';
|
|
5
|
-
import { NextRequest, NextResponse } from 'next/server';
|
|
1
|
+
import { handleChatCompletion } from '@/src/utils/handleChatCompletion';
|
|
2
|
+
import { NextRequest } from 'next/server';
|
|
6
3
|
|
|
7
4
|
export async function POST(
|
|
8
5
|
request: NextRequest,
|
|
9
6
|
{ params }: { params: Promise<{ agentName: string }> },
|
|
10
7
|
) {
|
|
11
8
|
const { agentName } = await params;
|
|
12
|
-
|
|
13
|
-
// Note: Authentication is handled by middleware
|
|
14
|
-
// If we are here, the request is either authenticated or public access is allowed (but middleware blocks it if not)
|
|
15
|
-
|
|
16
|
-
try {
|
|
17
|
-
const body = await request.json();
|
|
18
|
-
const { messages, stream, model } = body;
|
|
19
|
-
|
|
20
|
-
if (!messages || !Array.isArray(messages) || messages.length === 0) {
|
|
21
|
-
return NextResponse.json(
|
|
22
|
-
{ error: { message: 'Messages array is required and cannot be empty.', type: 'invalid_request_error' } },
|
|
23
|
-
{ status: 400 },
|
|
24
|
-
);
|
|
25
|
-
}
|
|
26
|
-
|
|
27
|
-
const collection = await $provideAgentCollectionForServer();
|
|
28
|
-
let agentSource;
|
|
29
|
-
try {
|
|
30
|
-
agentSource = await collection.getAgentSource(agentName);
|
|
31
|
-
} catch (error) {
|
|
32
|
-
return NextResponse.json(
|
|
33
|
-
{ error: { message: `Agent '${agentName}' not found.`, type: 'invalid_request_error' } },
|
|
34
|
-
{ status: 404 },
|
|
35
|
-
);
|
|
36
|
-
}
|
|
37
|
-
|
|
38
|
-
if (!agentSource) {
|
|
39
|
-
return NextResponse.json(
|
|
40
|
-
{ error: { message: `Agent '${agentName}' not found.`, type: 'invalid_request_error' } },
|
|
41
|
-
{ status: 404 },
|
|
42
|
-
);
|
|
43
|
-
}
|
|
44
|
-
|
|
45
|
-
const executionTools = await $provideExecutionToolsForServer();
|
|
46
|
-
const agent = new Agent({
|
|
47
|
-
agentSource,
|
|
48
|
-
executionTools,
|
|
49
|
-
isVerbose: true, // or false
|
|
50
|
-
});
|
|
51
|
-
|
|
52
|
-
// Prepare thread and content
|
|
53
|
-
const lastMessage = messages[messages.length - 1];
|
|
54
|
-
const previousMessages = messages.slice(0, -1);
|
|
55
|
-
|
|
56
|
-
const thread: ChatMessage[] = previousMessages.map((msg: TODO_any, index: number) => ({
|
|
57
|
-
id: `msg-${index}`, // Placeholder ID
|
|
58
|
-
from: msg.role === 'assistant' ? 'agent' : 'user', // Mapping standard OpenAI roles
|
|
59
|
-
content: msg.content,
|
|
60
|
-
isComplete: true,
|
|
61
|
-
date: new Date(), // We don't have the real date, using current
|
|
62
|
-
}));
|
|
63
|
-
|
|
64
|
-
const prompt: Prompt = {
|
|
65
|
-
title: 'OpenAI API Chat Completion',
|
|
66
|
-
content: lastMessage.content,
|
|
67
|
-
modelRequirements: {
|
|
68
|
-
modelVariant: 'CHAT',
|
|
69
|
-
// We could pass 'model' from body if we wanted to enforce it, but Agent usually has its own config
|
|
70
|
-
},
|
|
71
|
-
parameters: {},
|
|
72
|
-
thread,
|
|
73
|
-
} as Prompt;
|
|
74
|
-
// Note: Casting as Prompt because the type definition might require properties we don't strictly use or that are optional but TS complains
|
|
75
|
-
|
|
76
|
-
if (stream) {
|
|
77
|
-
const encoder = new TextEncoder();
|
|
78
|
-
const readableStream = new ReadableStream({
|
|
79
|
-
async start(controller) {
|
|
80
|
-
const runId = `chatcmpl-${Math.random().toString(36).substring(2, 15)}`;
|
|
81
|
-
const created = Math.floor(Date.now() / 1000);
|
|
82
|
-
|
|
83
|
-
let previousContent = '';
|
|
84
|
-
|
|
85
|
-
try {
|
|
86
|
-
await agent.callChatModelStream(prompt, (chunk: ChatPromptResult) => {
|
|
87
|
-
const fullContent = chunk.content;
|
|
88
|
-
const deltaContent = fullContent.substring(previousContent.length);
|
|
89
|
-
previousContent = fullContent;
|
|
90
|
-
|
|
91
|
-
if (deltaContent) {
|
|
92
|
-
const chunkData = {
|
|
93
|
-
id: runId,
|
|
94
|
-
object: 'chat.completion.chunk',
|
|
95
|
-
created,
|
|
96
|
-
model: model || 'promptbook-agent',
|
|
97
|
-
choices: [
|
|
98
|
-
{
|
|
99
|
-
index: 0,
|
|
100
|
-
delta: {
|
|
101
|
-
content: deltaContent,
|
|
102
|
-
},
|
|
103
|
-
finish_reason: null,
|
|
104
|
-
},
|
|
105
|
-
],
|
|
106
|
-
};
|
|
107
|
-
controller.enqueue(encoder.encode(`data: ${JSON.stringify(chunkData)}\n\n`));
|
|
108
|
-
}
|
|
109
|
-
});
|
|
110
|
-
|
|
111
|
-
const doneChunkData = {
|
|
112
|
-
id: runId,
|
|
113
|
-
object: 'chat.completion.chunk',
|
|
114
|
-
created,
|
|
115
|
-
model: model || 'promptbook-agent',
|
|
116
|
-
choices: [
|
|
117
|
-
{
|
|
118
|
-
index: 0,
|
|
119
|
-
delta: {},
|
|
120
|
-
finish_reason: 'stop',
|
|
121
|
-
},
|
|
122
|
-
],
|
|
123
|
-
};
|
|
124
|
-
controller.enqueue(encoder.encode(`data: ${JSON.stringify(doneChunkData)}\n\n`));
|
|
125
|
-
controller.enqueue(encoder.encode('[DONE]'));
|
|
126
|
-
} catch (error) {
|
|
127
|
-
console.error('Error during streaming:', error);
|
|
128
|
-
// OpenAI stream doesn't usually send error JSON in stream, just closes or sends error text?
|
|
129
|
-
// But we should try to close gracefully or error.
|
|
130
|
-
controller.error(error);
|
|
131
|
-
}
|
|
132
|
-
controller.close();
|
|
133
|
-
},
|
|
134
|
-
});
|
|
135
|
-
|
|
136
|
-
return new Response(readableStream, {
|
|
137
|
-
headers: {
|
|
138
|
-
'Content-Type': 'text/event-stream',
|
|
139
|
-
'Cache-Control': 'no-cache',
|
|
140
|
-
Connection: 'keep-alive',
|
|
141
|
-
},
|
|
142
|
-
});
|
|
143
|
-
} else {
|
|
144
|
-
const result = await agent.callChatModel(prompt);
|
|
145
|
-
|
|
146
|
-
return NextResponse.json({
|
|
147
|
-
id: `chatcmpl-${Math.random().toString(36).substring(2, 15)}`,
|
|
148
|
-
object: 'chat.completion',
|
|
149
|
-
created: Math.floor(Date.now() / 1000),
|
|
150
|
-
model: model || 'promptbook-agent',
|
|
151
|
-
choices: [
|
|
152
|
-
{
|
|
153
|
-
index: 0,
|
|
154
|
-
message: {
|
|
155
|
-
role: 'assistant',
|
|
156
|
-
content: result.content,
|
|
157
|
-
},
|
|
158
|
-
finish_reason: 'stop',
|
|
159
|
-
},
|
|
160
|
-
],
|
|
161
|
-
usage: {
|
|
162
|
-
prompt_tokens: result.usage?.input?.tokensCount?.value || 0,
|
|
163
|
-
completion_tokens: result.usage?.output?.tokensCount?.value || 0,
|
|
164
|
-
total_tokens: (result.usage?.input?.tokensCount?.value || 0) + (result.usage?.output?.tokensCount?.value || 0),
|
|
165
|
-
},
|
|
166
|
-
});
|
|
167
|
-
}
|
|
168
|
-
|
|
169
|
-
} catch (error) {
|
|
170
|
-
console.error('Error in OpenAI API handler:', error);
|
|
171
|
-
return NextResponse.json(
|
|
172
|
-
{ error: { message: (error as Error).message || 'Internal Server Error', type: 'server_error' } },
|
|
173
|
-
{ status: 500 },
|
|
174
|
-
);
|
|
175
|
-
}
|
|
9
|
+
return handleChatCompletion(request, { agentName }, 'OpenAI API Chat Completion');
|
|
176
10
|
}
|
|
@@ -0,0 +1,93 @@
|
|
|
1
|
+
import { $provideAgentCollectionForServer } from '@/src/tools/$provideAgentCollectionForServer';
|
|
2
|
+
import { validateApiKey } from '@/src/utils/validateApiKey';
|
|
3
|
+
import { NextRequest, NextResponse } from 'next/server';
|
|
4
|
+
|
|
5
|
+
/**
|
|
6
|
+
* GET /agents/[agentName]/api/openai/models
|
|
7
|
+
*
|
|
8
|
+
* Lists available models for the OpenAI-compatible API.
|
|
9
|
+
* This endpoint is required for OpenAI-compatible clients (like Jan, LM Studio, etc.)
|
|
10
|
+
* to discover available models.
|
|
11
|
+
*/
|
|
12
|
+
export async function GET(request: NextRequest, { params }: { params: Promise<{ agentName: string }> }) {
|
|
13
|
+
const { agentName } = await params;
|
|
14
|
+
|
|
15
|
+
// Validate API key explicitly (in addition to middleware)
|
|
16
|
+
const apiKeyValidation = await validateApiKey(request);
|
|
17
|
+
if (!apiKeyValidation.isValid) {
|
|
18
|
+
return NextResponse.json(
|
|
19
|
+
{
|
|
20
|
+
error: {
|
|
21
|
+
message: apiKeyValidation.error || 'Invalid API key',
|
|
22
|
+
type: 'authentication_error',
|
|
23
|
+
},
|
|
24
|
+
},
|
|
25
|
+
{ status: 401 },
|
|
26
|
+
);
|
|
27
|
+
}
|
|
28
|
+
|
|
29
|
+
try {
|
|
30
|
+
const collection = await $provideAgentCollectionForServer();
|
|
31
|
+
|
|
32
|
+
let agentSource;
|
|
33
|
+
try {
|
|
34
|
+
agentSource = await collection.getAgentSource(agentName);
|
|
35
|
+
} catch (error) {
|
|
36
|
+
return NextResponse.json(
|
|
37
|
+
{ error: { message: `Agent '${agentName}' not found.`, type: 'invalid_request_error' } },
|
|
38
|
+
{ status: 404 },
|
|
39
|
+
);
|
|
40
|
+
}
|
|
41
|
+
|
|
42
|
+
if (!agentSource) {
|
|
43
|
+
return NextResponse.json(
|
|
44
|
+
{ error: { message: `Agent '${agentName}' not found.`, type: 'invalid_request_error' } },
|
|
45
|
+
{ status: 404 },
|
|
46
|
+
);
|
|
47
|
+
}
|
|
48
|
+
|
|
49
|
+
// Return the agent as a single model in OpenAI format
|
|
50
|
+
// The model ID is the agent name, which clients will use when making chat completion requests
|
|
51
|
+
const models = [
|
|
52
|
+
{
|
|
53
|
+
id: agentName,
|
|
54
|
+
object: 'model',
|
|
55
|
+
created: Math.floor(Date.now() / 1000),
|
|
56
|
+
owned_by: 'promptbook',
|
|
57
|
+
permission: [
|
|
58
|
+
{
|
|
59
|
+
id: `modelperm-${agentName}`,
|
|
60
|
+
object: 'model_permission',
|
|
61
|
+
created: Math.floor(Date.now() / 1000),
|
|
62
|
+
allow_create_engine: false,
|
|
63
|
+
allow_sampling: true,
|
|
64
|
+
allow_logprobs: false,
|
|
65
|
+
allow_search_indices: false,
|
|
66
|
+
allow_view: true,
|
|
67
|
+
allow_fine_tuning: false,
|
|
68
|
+
organization: '*',
|
|
69
|
+
group: null,
|
|
70
|
+
is_blocking: false,
|
|
71
|
+
},
|
|
72
|
+
],
|
|
73
|
+
root: agentName,
|
|
74
|
+
parent: null,
|
|
75
|
+
},
|
|
76
|
+
];
|
|
77
|
+
|
|
78
|
+
return NextResponse.json({
|
|
79
|
+
object: 'list',
|
|
80
|
+
data: models,
|
|
81
|
+
});
|
|
82
|
+
} catch (error) {
|
|
83
|
+
console.error('Error in models listing handler:', error);
|
|
84
|
+
return NextResponse.json(
|
|
85
|
+
{ error: { message: (error as Error).message || 'Internal Server Error', type: 'server_error' } },
|
|
86
|
+
{ status: 500 },
|
|
87
|
+
);
|
|
88
|
+
}
|
|
89
|
+
}
|
|
90
|
+
|
|
91
|
+
/**
|
|
92
|
+
* TODO: [🧠] Consider listing all available agents as models when agentName is a wildcard or special value
|
|
93
|
+
*/
|
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
import { handleChatCompletion } from '@/src/utils/handleChatCompletion';
|
|
2
|
+
import { NextRequest } from 'next/server';
|
|
3
|
+
|
|
4
|
+
export async function POST(
|
|
5
|
+
request: NextRequest,
|
|
6
|
+
{ params }: { params: Promise<{ agentName: string }> },
|
|
7
|
+
) {
|
|
8
|
+
const { agentName } = await params;
|
|
9
|
+
return handleChatCompletion(request, { agentName }, 'OpenAI API Chat Completion');
|
|
10
|
+
}
|