nitrostack 1.0.65 → 1.0.67
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/package.json +3 -2
- package/src/studio/README.md +140 -0
- package/src/studio/app/api/auth/fetch-metadata/route.ts +71 -0
- package/src/studio/app/api/auth/register-client/route.ts +67 -0
- package/src/studio/app/api/chat/route.ts +250 -0
- package/src/studio/app/api/health/checks/route.ts +42 -0
- package/src/studio/app/api/health/route.ts +13 -0
- package/src/studio/app/api/init/route.ts +109 -0
- package/src/studio/app/api/ping/route.ts +13 -0
- package/src/studio/app/api/prompts/[name]/route.ts +21 -0
- package/src/studio/app/api/prompts/route.ts +13 -0
- package/src/studio/app/api/resources/[...uri]/route.ts +18 -0
- package/src/studio/app/api/resources/route.ts +13 -0
- package/src/studio/app/api/roots/route.ts +13 -0
- package/src/studio/app/api/sampling/route.ts +14 -0
- package/src/studio/app/api/tools/[name]/call/route.ts +41 -0
- package/src/studio/app/api/tools/route.ts +23 -0
- package/src/studio/app/api/widget-examples/route.ts +44 -0
- package/src/studio/app/auth/callback/page.tsx +175 -0
- package/src/studio/app/auth/page.tsx +560 -0
- package/src/studio/app/chat/page.tsx +1133 -0
- package/src/studio/app/chat/page.tsx.backup +390 -0
- package/src/studio/app/globals.css +486 -0
- package/src/studio/app/health/page.tsx +179 -0
- package/src/studio/app/layout.tsx +68 -0
- package/src/studio/app/logs/page.tsx +279 -0
- package/src/studio/app/page.tsx +351 -0
- package/src/studio/app/page.tsx.backup +346 -0
- package/src/studio/app/ping/page.tsx +209 -0
- package/src/studio/app/prompts/page.tsx +230 -0
- package/src/studio/app/resources/page.tsx +315 -0
- package/src/studio/app/settings/page.tsx +199 -0
- package/src/studio/branding.md +807 -0
- package/src/studio/components/EnlargeModal.tsx +138 -0
- package/src/studio/components/LogMessage.tsx +153 -0
- package/src/studio/components/MarkdownRenderer.tsx +410 -0
- package/src/studio/components/Sidebar.tsx +295 -0
- package/src/studio/components/ToolCard.tsx +139 -0
- package/src/studio/components/WidgetRenderer.tsx +346 -0
- package/src/studio/lib/api.ts +207 -0
- package/src/studio/lib/http-client-transport.ts +222 -0
- package/src/studio/lib/llm-service.ts +480 -0
- package/src/studio/lib/log-manager.ts +76 -0
- package/src/studio/lib/mcp-client.ts +258 -0
- package/src/studio/lib/store.ts +192 -0
- package/src/studio/lib/theme-provider.tsx +50 -0
- package/src/studio/lib/types.ts +107 -0
- package/src/studio/lib/widget-loader.ts +90 -0
- package/src/studio/middleware.ts +27 -0
- package/src/studio/next.config.js +38 -0
- package/src/studio/package.json +35 -0
- package/src/studio/postcss.config.mjs +10 -0
- package/src/studio/public/nitrocloud.png +0 -0
- package/src/studio/tailwind.config.ts +67 -0
- package/src/studio/tsconfig.json +42 -0
- package/templates/typescript-oauth/AI_AGENT_CLI_REFERENCE.md +0 -701
- package/templates/typescript-oauth/AI_AGENT_SDK_REFERENCE.md +0 -1260
- package/templates/typescript-oauth/package-lock.json +0 -4253
- package/templates/typescript-pizzaz/IMPLEMENTATION.md +0 -98
- package/templates/typescript-starter/AI_AGENT_CLI_REFERENCE.md +0 -701
- package/templates/typescript-starter/AI_AGENT_SDK_REFERENCE.md +0 -1260
|
@@ -0,0 +1,222 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* HTTP Client Transport for MCP
|
|
3
|
+
*
|
|
4
|
+
* Implements HTTP-based communication with MCP servers using SSE for server-to-client messages
|
|
5
|
+
* and POST for client-to-server messages.
|
|
6
|
+
*
|
|
7
|
+
* Note: This uses EventSource which is browser-only. For server-side usage, you need eventsource polyfill.
|
|
8
|
+
*/
|
|
9
|
+
|
|
10
|
+
import { Transport } from '@modelcontextprotocol/sdk/shared/transport.js';
|
|
11
|
+
import { JSONRPCMessage } from '@modelcontextprotocol/sdk/types.js';
|
|
12
|
+
|
|
13
|
+
// EventSource type that works in both browser and Node.js
|
|
14
|
+
type EventSourceType = typeof EventSource extends { prototype: infer T } ? T : any;
|
|
15
|
+
|
|
16
|
+
export interface HttpClientTransportOptions {
|
|
17
|
+
/**
|
|
18
|
+
* Base URL of the MCP server (e.g., http://localhost:3000)
|
|
19
|
+
*/
|
|
20
|
+
baseUrl: string;
|
|
21
|
+
|
|
22
|
+
/**
|
|
23
|
+
* Base path for MCP endpoints (default: '/mcp')
|
|
24
|
+
*/
|
|
25
|
+
basePath?: string;
|
|
26
|
+
|
|
27
|
+
/**
|
|
28
|
+
* Optional headers to include in requests (e.g., Authorization)
|
|
29
|
+
*/
|
|
30
|
+
headers?: Record<string, string>;
|
|
31
|
+
}
|
|
32
|
+
|
|
33
|
+
/**
|
|
34
|
+
* HTTP Client Transport
|
|
35
|
+
*
|
|
36
|
+
* Connects to an MCP server over HTTP using:
|
|
37
|
+
* - SSE (Server-Sent Events) for receiving messages from server
|
|
38
|
+
* - HTTP POST for sending messages to server
|
|
39
|
+
*/
|
|
40
|
+
export class HttpClientTransport implements Transport {
|
|
41
|
+
private baseUrl: string;
|
|
42
|
+
private basePath: string;
|
|
43
|
+
private headers: Record<string, string>;
|
|
44
|
+
private eventSource: EventSourceType | null = null;
|
|
45
|
+
private clientId: string;
|
|
46
|
+
private messageHandler?: (message: JSONRPCMessage) => Promise<void>;
|
|
47
|
+
private closeHandler?: () => void;
|
|
48
|
+
private errorHandler?: (error: Error) => void;
|
|
49
|
+
private isConnected = false;
|
|
50
|
+
private EventSourceImpl: any;
|
|
51
|
+
|
|
52
|
+
constructor(options: HttpClientTransportOptions) {
|
|
53
|
+
this.baseUrl = options.baseUrl.replace(/\/$/, ''); // Remove trailing slash
|
|
54
|
+
this.basePath = options.basePath || '/mcp';
|
|
55
|
+
this.headers = options.headers || {};
|
|
56
|
+
this.clientId = `client_${Date.now()}_${Math.random().toString(36).substr(2, 9)}`;
|
|
57
|
+
|
|
58
|
+
// Use native EventSource in browser, require polyfill in Node.js
|
|
59
|
+
if (typeof EventSource !== 'undefined') {
|
|
60
|
+
this.EventSourceImpl = EventSource;
|
|
61
|
+
} else {
|
|
62
|
+
// In Node.js environment, try to load eventsource package
|
|
63
|
+
try {
|
|
64
|
+
this.EventSourceImpl = require('eventsource');
|
|
65
|
+
} catch (e) {
|
|
66
|
+
throw new Error(
|
|
67
|
+
'EventSource is not available. In Node.js, install "eventsource" package: npm install eventsource'
|
|
68
|
+
);
|
|
69
|
+
}
|
|
70
|
+
}
|
|
71
|
+
}
|
|
72
|
+
|
|
73
|
+
/**
|
|
74
|
+
* Start the transport by connecting to SSE endpoint
|
|
75
|
+
*/
|
|
76
|
+
async start(): Promise<void> {
|
|
77
|
+
if (this.isConnected) {
|
|
78
|
+
console.warn('⚠️ HTTP transport already connected');
|
|
79
|
+
return;
|
|
80
|
+
}
|
|
81
|
+
|
|
82
|
+
return new Promise((resolve, reject) => {
|
|
83
|
+
try {
|
|
84
|
+
const sseUrl = `${this.baseUrl}${this.basePath}/sse?clientId=${this.clientId}`;
|
|
85
|
+
console.log('🔌 Connecting to SSE endpoint:', sseUrl);
|
|
86
|
+
|
|
87
|
+
// Create EventSource for receiving server messages
|
|
88
|
+
this.eventSource = new this.EventSourceImpl(sseUrl) as EventSourceType;
|
|
89
|
+
|
|
90
|
+
this.eventSource.onopen = () => {
|
|
91
|
+
console.log('✅ SSE connection established');
|
|
92
|
+
this.isConnected = true;
|
|
93
|
+
resolve();
|
|
94
|
+
};
|
|
95
|
+
|
|
96
|
+
this.eventSource.onmessage = (event) => {
|
|
97
|
+
try {
|
|
98
|
+
const message = JSON.parse(event.data) as JSONRPCMessage;
|
|
99
|
+
console.log('📨 Received message from server:', message);
|
|
100
|
+
|
|
101
|
+
if (this.messageHandler) {
|
|
102
|
+
this.messageHandler(message).catch((error) => {
|
|
103
|
+
console.error('Error handling message:', error);
|
|
104
|
+
if (this.errorHandler) {
|
|
105
|
+
this.errorHandler(error);
|
|
106
|
+
}
|
|
107
|
+
});
|
|
108
|
+
}
|
|
109
|
+
} catch (error) {
|
|
110
|
+
console.error('Failed to parse SSE message:', error);
|
|
111
|
+
if (this.errorHandler) {
|
|
112
|
+
this.errorHandler(error as Error);
|
|
113
|
+
}
|
|
114
|
+
}
|
|
115
|
+
};
|
|
116
|
+
|
|
117
|
+
this.eventSource.onerror = (error) => {
|
|
118
|
+
console.error('❌ SSE connection error:', error);
|
|
119
|
+
this.isConnected = false;
|
|
120
|
+
|
|
121
|
+
if (this.errorHandler) {
|
|
122
|
+
this.errorHandler(new Error('SSE connection failed'));
|
|
123
|
+
}
|
|
124
|
+
|
|
125
|
+
if (!this.isConnected) {
|
|
126
|
+
// Connection failed during initial setup
|
|
127
|
+
reject(new Error('Failed to establish SSE connection'));
|
|
128
|
+
}
|
|
129
|
+
};
|
|
130
|
+
|
|
131
|
+
// Set a timeout for connection establishment
|
|
132
|
+
setTimeout(() => {
|
|
133
|
+
if (!this.isConnected) {
|
|
134
|
+
this.eventSource?.close();
|
|
135
|
+
reject(new Error('SSE connection timeout'));
|
|
136
|
+
}
|
|
137
|
+
}, 10000); // 10 second timeout
|
|
138
|
+
|
|
139
|
+
} catch (error) {
|
|
140
|
+
reject(error);
|
|
141
|
+
}
|
|
142
|
+
});
|
|
143
|
+
}
|
|
144
|
+
|
|
145
|
+
/**
|
|
146
|
+
* Send a message to the server
|
|
147
|
+
*/
|
|
148
|
+
async send(message: JSONRPCMessage): Promise<void> {
|
|
149
|
+
if (!this.isConnected) {
|
|
150
|
+
throw new Error('HTTP transport not connected');
|
|
151
|
+
}
|
|
152
|
+
|
|
153
|
+
try {
|
|
154
|
+
const url = `${this.baseUrl}${this.basePath}/message`;
|
|
155
|
+
console.log('📤 Sending message to server:', message);
|
|
156
|
+
|
|
157
|
+
const response = await fetch(url, {
|
|
158
|
+
method: 'POST',
|
|
159
|
+
headers: {
|
|
160
|
+
'Content-Type': 'application/json',
|
|
161
|
+
...this.headers,
|
|
162
|
+
},
|
|
163
|
+
body: JSON.stringify({
|
|
164
|
+
clientId: this.clientId,
|
|
165
|
+
message,
|
|
166
|
+
}),
|
|
167
|
+
});
|
|
168
|
+
|
|
169
|
+
if (!response.ok) {
|
|
170
|
+
const errorText = await response.text();
|
|
171
|
+
throw new Error(`HTTP ${response.status}: ${errorText}`);
|
|
172
|
+
}
|
|
173
|
+
|
|
174
|
+
console.log('✅ Message sent successfully');
|
|
175
|
+
} catch (error) {
|
|
176
|
+
console.error('Failed to send message:', error);
|
|
177
|
+
throw error;
|
|
178
|
+
}
|
|
179
|
+
}
|
|
180
|
+
|
|
181
|
+
/**
|
|
182
|
+
* Close the transport
|
|
183
|
+
*/
|
|
184
|
+
async close(): Promise<void> {
|
|
185
|
+
console.log('🛑 Closing HTTP transport...');
|
|
186
|
+
|
|
187
|
+
if (this.eventSource) {
|
|
188
|
+
this.eventSource.close();
|
|
189
|
+
this.eventSource = null;
|
|
190
|
+
}
|
|
191
|
+
|
|
192
|
+
this.isConnected = false;
|
|
193
|
+
|
|
194
|
+
if (this.closeHandler) {
|
|
195
|
+
this.closeHandler();
|
|
196
|
+
}
|
|
197
|
+
|
|
198
|
+
console.log('✅ HTTP transport closed');
|
|
199
|
+
}
|
|
200
|
+
|
|
201
|
+
/**
|
|
202
|
+
* Set handler for incoming messages
|
|
203
|
+
*/
|
|
204
|
+
onmessage = (handler: (message: JSONRPCMessage) => Promise<void>): void => {
|
|
205
|
+
this.messageHandler = handler;
|
|
206
|
+
};
|
|
207
|
+
|
|
208
|
+
/**
|
|
209
|
+
* Set handler for connection close
|
|
210
|
+
*/
|
|
211
|
+
onclose = (handler: () => void): void => {
|
|
212
|
+
this.closeHandler = handler;
|
|
213
|
+
};
|
|
214
|
+
|
|
215
|
+
/**
|
|
216
|
+
* Set handler for errors
|
|
217
|
+
*/
|
|
218
|
+
onerror = (handler: (error: Error) => void): void => {
|
|
219
|
+
this.errorHandler = handler;
|
|
220
|
+
};
|
|
221
|
+
}
|
|
222
|
+
|
|
@@ -0,0 +1,480 @@
|
|
|
1
|
+
// LLM Service for Studio
|
|
2
|
+
// Supports OpenAI and Gemini
|
|
3
|
+
|
|
4
|
+
export type LLMProvider = 'openai' | 'gemini';
|
|
5
|
+
|
|
6
|
+
export interface ChatMessage {
|
|
7
|
+
role: 'user' | 'assistant' | 'tool' | 'system';
|
|
8
|
+
content: string;
|
|
9
|
+
toolCalls?: ToolCall[];
|
|
10
|
+
toolCallId?: string; // For tool responses - the ID of the call being responded to
|
|
11
|
+
toolName?: string; // For tool responses - the name of the tool (required by Gemini)
|
|
12
|
+
}
|
|
13
|
+
|
|
14
|
+
export interface ToolCall {
|
|
15
|
+
id: string;
|
|
16
|
+
name: string;
|
|
17
|
+
arguments: any;
|
|
18
|
+
}
|
|
19
|
+
|
|
20
|
+
export interface ChatResponse {
|
|
21
|
+
message: ChatMessage;
|
|
22
|
+
toolCalls?: ToolCall[];
|
|
23
|
+
finishReason?: string;
|
|
24
|
+
}
|
|
25
|
+
|
|
26
|
+
export class LLMService {
|
|
27
|
+
// System prompt to improve tool usage (especially for Gemini)
|
|
28
|
+
private getSystemPrompt(tools: any[]): string {
|
|
29
|
+
return `You are an intelligent AI assistant with access to ${tools.length} powerful tools. Your goal is to help users accomplish their tasks efficiently and intelligently.
|
|
30
|
+
|
|
31
|
+
**CORE PRINCIPLES FOR TOOL USAGE:**
|
|
32
|
+
|
|
33
|
+
1. **Be Proactive & Infer Context**:
|
|
34
|
+
- Infer obvious information instead of asking (e.g., "Bangalore" → Karnataka, India; "New York" → NY, USA)
|
|
35
|
+
- Use common sense defaults when reasonable
|
|
36
|
+
- Don't ask for information you can deduce from context
|
|
37
|
+
|
|
38
|
+
2. **Chain Tools Intelligently**:
|
|
39
|
+
- Use multiple tools in sequence to accomplish complex tasks
|
|
40
|
+
- If a task requires multiple steps, execute them automatically
|
|
41
|
+
- Example: login → fetch data → process → display results
|
|
42
|
+
- Don't ask permission for each step in an obvious workflow
|
|
43
|
+
|
|
44
|
+
3. **Maintain Context Awareness**:
|
|
45
|
+
- Remember information from previous tool calls in THIS conversation
|
|
46
|
+
- Extract and reuse data (IDs, names, values) from previous tool results
|
|
47
|
+
- Example: If browse_products returned products with IDs, use those IDs for add_to_cart
|
|
48
|
+
- Match user requests to previous data (e.g., "apple" → find product with name "Apple" → use its ID)
|
|
49
|
+
- Track state across the conversation (logged in status, product IDs, order IDs, etc.)
|
|
50
|
+
- NEVER ask for information you already have from a previous tool call
|
|
51
|
+
|
|
52
|
+
4. **Use Smart Defaults**:
|
|
53
|
+
- Apply sensible default values when parameters are optional
|
|
54
|
+
- Common defaults: page=1, limit=10, sort=recent, etc.
|
|
55
|
+
- Only ask for clarification when truly ambiguous
|
|
56
|
+
|
|
57
|
+
5. **Minimize User Friction**:
|
|
58
|
+
- Don't ask for every detail - use inference and defaults
|
|
59
|
+
- Chain related operations seamlessly
|
|
60
|
+
- Provide concise summaries after multi-step operations
|
|
61
|
+
- Be conversational but efficient
|
|
62
|
+
|
|
63
|
+
6. **Handle Errors Gracefully**:
|
|
64
|
+
- If authentication required, guide user to login
|
|
65
|
+
- If prerequisite missing, suggest the required step
|
|
66
|
+
- If operation fails, explain why and suggest alternatives
|
|
67
|
+
- Always provide a helpful next step
|
|
68
|
+
|
|
69
|
+
7. **Tool Call Best Practices**:
|
|
70
|
+
- Read tool descriptions carefully to understand their purpose
|
|
71
|
+
- Use exact parameter names as specified in the schema
|
|
72
|
+
- Pay attention to required vs optional parameters
|
|
73
|
+
- If a tool says "Requires authentication" - CALL IT ANYWAY! Auth is handled automatically
|
|
74
|
+
- Don't ask for credentials preemptively - only if a tool explicitly fails
|
|
75
|
+
- Look for examples in tool schemas for guidance
|
|
76
|
+
|
|
77
|
+
**AUTHENTICATION HANDLING:**
|
|
78
|
+
|
|
79
|
+
- Authentication tokens are AUTOMATICALLY handled in the background
|
|
80
|
+
- If a tool says "Requires authentication", you can STILL call it directly - auth is transparent
|
|
81
|
+
- NEVER ask users for credentials unless a tool explicitly fails with an auth error
|
|
82
|
+
- If you get an auth error, THEN suggest the user login using the login tool
|
|
83
|
+
- Once a user logs in (or if already logged in), the session persists automatically
|
|
84
|
+
- You don't need to check if user is authenticated - just call the tool and let the system handle it
|
|
85
|
+
|
|
86
|
+
**EXAMPLES:**
|
|
87
|
+
|
|
88
|
+
**Authentication:**
|
|
89
|
+
User: "whoami" → Call whoami tool directly (don't ask for login)
|
|
90
|
+
User: "show my orders" → Call get_order_history directly (don't ask for login)
|
|
91
|
+
User: "what's in my cart" → Call view_cart directly (don't ask for login)
|
|
92
|
+
|
|
93
|
+
**Context Awareness:**
|
|
94
|
+
1. browse_products returns: [{id: "prod-3", name: "Apple", price: 0.99}, ...]
|
|
95
|
+
2. User: "add apple to cart" → Extract ID "prod-3" from previous result → Call add_to_cart({product_id: "prod-3", quantity: 1})
|
|
96
|
+
3. User: "add 2 more" → Remember prod-3 → Call add_to_cart({product_id: "prod-3", quantity: 2})
|
|
97
|
+
|
|
98
|
+
**NEVER do this:**
|
|
99
|
+
❌ User: "add apple to cart" → "What is the product ID?" (You already have it!)
|
|
100
|
+
❌ User: "add to cart" → "Which product?" (Look at conversation context!)
|
|
101
|
+
|
|
102
|
+
Only if tool returns auth error → THEN suggest: "Please login with your credentials"
|
|
103
|
+
|
|
104
|
+
**PRESENTING TOOL RESULTS:**
|
|
105
|
+
|
|
106
|
+
When you call a tool and receive results, you MUST present the information to the user in a clear, formatted way:
|
|
107
|
+
|
|
108
|
+
- For **list_resources**: Show each resource's URI, name, description, and mime type in a formatted list
|
|
109
|
+
- For **list_prompts**: Show each prompt's name, description, and required arguments
|
|
110
|
+
- For **read_resource**: Display the resource content in an appropriate format
|
|
111
|
+
- For **execute_prompt**: Show the prompt result clearly
|
|
112
|
+
- For ANY tool result: Extract and format the key information for the user to understand
|
|
113
|
+
|
|
114
|
+
**Example:**
|
|
115
|
+
User: "list all resources"
|
|
116
|
+
1. Call list_resources tool
|
|
117
|
+
2. Receive JSON array of resources
|
|
118
|
+
3. Format and present: "Here are the available resources:
|
|
119
|
+
- **Resource Name** (uri) - Description
|
|
120
|
+
- **Another Resource** (uri) - Description"
|
|
121
|
+
|
|
122
|
+
**NEVER** just say "I have the results" without showing them!
|
|
123
|
+
**ALWAYS** format and display the actual data you receive from tools!
|
|
124
|
+
|
|
125
|
+
**REMEMBER:**
|
|
126
|
+
|
|
127
|
+
- You have access to real, functional tools - use them!
|
|
128
|
+
- Call tools directly - don't ask for permission or credentials first
|
|
129
|
+
- **After calling a tool, ALWAYS present the results to the user clearly**
|
|
130
|
+
- Your goal is to be helpful, efficient, and reduce user friction
|
|
131
|
+
- Think through multi-step workflows and execute them seamlessly
|
|
132
|
+
- Use your intelligence to fill gaps rather than always asking questions`;
|
|
133
|
+
}
|
|
134
|
+
|
|
135
|
+
async chat(
|
|
136
|
+
provider: LLMProvider,
|
|
137
|
+
messages: ChatMessage[],
|
|
138
|
+
tools: any[],
|
|
139
|
+
apiKey: string
|
|
140
|
+
): Promise<ChatResponse> {
|
|
141
|
+
// Inject system prompt at the beginning if not already present
|
|
142
|
+
if (messages.length > 0 && messages[0].role !== 'system') {
|
|
143
|
+
messages = [
|
|
144
|
+
{
|
|
145
|
+
role: 'system',
|
|
146
|
+
content: this.getSystemPrompt(tools),
|
|
147
|
+
},
|
|
148
|
+
...messages,
|
|
149
|
+
];
|
|
150
|
+
}
|
|
151
|
+
|
|
152
|
+
if (provider === 'openai') {
|
|
153
|
+
return this.chatOpenAI(messages, tools, apiKey);
|
|
154
|
+
} else if (provider === 'gemini') {
|
|
155
|
+
return this.chatGemini(messages, tools, apiKey);
|
|
156
|
+
}
|
|
157
|
+
throw new Error(`Unsupported provider: ${provider}`);
|
|
158
|
+
}
|
|
159
|
+
|
|
160
|
+
private async chatOpenAI(
|
|
161
|
+
messages: ChatMessage[],
|
|
162
|
+
tools: any[],
|
|
163
|
+
apiKey: string
|
|
164
|
+
): Promise<ChatResponse> {
|
|
165
|
+
const formattedMessages = messages.map((msg) => {
|
|
166
|
+
if (msg.role === 'tool') {
|
|
167
|
+
return {
|
|
168
|
+
role: 'tool' as const,
|
|
169
|
+
content: msg.content,
|
|
170
|
+
tool_call_id: msg.toolCallId,
|
|
171
|
+
};
|
|
172
|
+
}
|
|
173
|
+
|
|
174
|
+
// Handle assistant messages with tool calls
|
|
175
|
+
if (msg.role === 'assistant' && msg.toolCalls && msg.toolCalls.length > 0) {
|
|
176
|
+
return {
|
|
177
|
+
role: 'assistant' as const,
|
|
178
|
+
content: msg.content || null,
|
|
179
|
+
tool_calls: msg.toolCalls.map(tc => ({
|
|
180
|
+
id: tc.id,
|
|
181
|
+
type: 'function' as const,
|
|
182
|
+
function: {
|
|
183
|
+
name: tc.name,
|
|
184
|
+
arguments: typeof tc.arguments === 'string' ? tc.arguments : JSON.stringify(tc.arguments),
|
|
185
|
+
},
|
|
186
|
+
})),
|
|
187
|
+
};
|
|
188
|
+
}
|
|
189
|
+
|
|
190
|
+
return {
|
|
191
|
+
role: msg.role,
|
|
192
|
+
content: msg.content,
|
|
193
|
+
};
|
|
194
|
+
});
|
|
195
|
+
|
|
196
|
+
console.log('Formatted messages for OpenAI:', JSON.stringify(formattedMessages, null, 2));
|
|
197
|
+
|
|
198
|
+
const requestBody: any = {
|
|
199
|
+
model: 'gpt-4-turbo-preview',
|
|
200
|
+
messages: formattedMessages,
|
|
201
|
+
};
|
|
202
|
+
|
|
203
|
+
if (tools.length > 0) {
|
|
204
|
+
requestBody.tools = tools.map((tool) => {
|
|
205
|
+
// Clean the schema for OpenAI - remove unsupported properties
|
|
206
|
+
const cleanSchema = { ...tool.inputSchema };
|
|
207
|
+
|
|
208
|
+
// Remove properties that OpenAI doesn't support
|
|
209
|
+
delete cleanSchema.$schema;
|
|
210
|
+
delete cleanSchema.additionalProperties;
|
|
211
|
+
|
|
212
|
+
// Ensure required properties for OpenAI
|
|
213
|
+
if (!cleanSchema.type) {
|
|
214
|
+
cleanSchema.type = 'object';
|
|
215
|
+
}
|
|
216
|
+
if (!cleanSchema.properties) {
|
|
217
|
+
cleanSchema.properties = {};
|
|
218
|
+
}
|
|
219
|
+
|
|
220
|
+
return {
|
|
221
|
+
type: 'function',
|
|
222
|
+
function: {
|
|
223
|
+
name: tool.name,
|
|
224
|
+
description: tool.description || 'No description provided',
|
|
225
|
+
parameters: cleanSchema,
|
|
226
|
+
},
|
|
227
|
+
};
|
|
228
|
+
});
|
|
229
|
+
|
|
230
|
+
console.log('Sending tools to OpenAI:', JSON.stringify(requestBody.tools, null, 2));
|
|
231
|
+
}
|
|
232
|
+
|
|
233
|
+
const response = await fetch('https://api.openai.com/v1/chat/completions', {
|
|
234
|
+
method: 'POST',
|
|
235
|
+
headers: {
|
|
236
|
+
'Content-Type': 'application/json',
|
|
237
|
+
Authorization: `Bearer ${apiKey}`,
|
|
238
|
+
},
|
|
239
|
+
body: JSON.stringify(requestBody),
|
|
240
|
+
});
|
|
241
|
+
|
|
242
|
+
if (!response.ok) {
|
|
243
|
+
const errorData = await response.json().catch(() => ({}));
|
|
244
|
+
console.error('OpenAI API error details:', errorData);
|
|
245
|
+
throw new Error(`OpenAI API error: ${response.statusText} - ${JSON.stringify(errorData)}`);
|
|
246
|
+
}
|
|
247
|
+
|
|
248
|
+
const data = await response.json();
|
|
249
|
+
const choice = data.choices[0];
|
|
250
|
+
|
|
251
|
+
const result: ChatResponse = {
|
|
252
|
+
message: {
|
|
253
|
+
role: 'assistant',
|
|
254
|
+
content: choice.message.content || '',
|
|
255
|
+
},
|
|
256
|
+
finishReason: choice.finish_reason,
|
|
257
|
+
};
|
|
258
|
+
|
|
259
|
+
if (choice.message.tool_calls) {
|
|
260
|
+
result.toolCalls = choice.message.tool_calls.map((tc: any) => ({
|
|
261
|
+
id: tc.id,
|
|
262
|
+
name: tc.function?.name || '',
|
|
263
|
+
arguments: JSON.parse(tc.function?.arguments || '{}'),
|
|
264
|
+
}));
|
|
265
|
+
result.message.toolCalls = result.toolCalls;
|
|
266
|
+
}
|
|
267
|
+
|
|
268
|
+
return result;
|
|
269
|
+
}
|
|
270
|
+
|
|
271
|
+
private async chatGemini(
|
|
272
|
+
messages: ChatMessage[],
|
|
273
|
+
tools: any[],
|
|
274
|
+
apiKey: string
|
|
275
|
+
): Promise<ChatResponse> {
|
|
276
|
+
// Convert messages to Gemini format
|
|
277
|
+
const contents: any[] = [];
|
|
278
|
+
let systemInstruction = '';
|
|
279
|
+
|
|
280
|
+
// Group consecutive tool messages together for Gemini
|
|
281
|
+
let i = 0;
|
|
282
|
+
while (i < messages.length) {
|
|
283
|
+
const msg = messages[i];
|
|
284
|
+
|
|
285
|
+
if (msg.role === 'system') {
|
|
286
|
+
systemInstruction = msg.content;
|
|
287
|
+
i++;
|
|
288
|
+
continue;
|
|
289
|
+
}
|
|
290
|
+
|
|
291
|
+
if (msg.role === 'tool') {
|
|
292
|
+
// Collect all consecutive tool messages and group them into ONE function response
|
|
293
|
+
const functionParts: any[] = [];
|
|
294
|
+
|
|
295
|
+
while (i < messages.length && messages[i].role === 'tool') {
|
|
296
|
+
const toolMsg = messages[i];
|
|
297
|
+
functionParts.push({
|
|
298
|
+
functionResponse: {
|
|
299
|
+
name: toolMsg.toolName || 'unknown', // Use toolName for Gemini!
|
|
300
|
+
response: {
|
|
301
|
+
content: toolMsg.content,
|
|
302
|
+
},
|
|
303
|
+
},
|
|
304
|
+
});
|
|
305
|
+
i++;
|
|
306
|
+
}
|
|
307
|
+
|
|
308
|
+
// Add all function responses as ONE entry with multiple parts
|
|
309
|
+
contents.push({
|
|
310
|
+
role: 'function',
|
|
311
|
+
parts: functionParts,
|
|
312
|
+
});
|
|
313
|
+
} else if (msg.role === 'assistant' && msg.toolCalls && msg.toolCalls.length > 0) {
|
|
314
|
+
// Assistant message with tool calls
|
|
315
|
+
const parts: any[] = [];
|
|
316
|
+
|
|
317
|
+
if (msg.content) {
|
|
318
|
+
parts.push({ text: msg.content });
|
|
319
|
+
}
|
|
320
|
+
|
|
321
|
+
for (const tc of msg.toolCalls) {
|
|
322
|
+
parts.push({
|
|
323
|
+
functionCall: {
|
|
324
|
+
name: tc.name,
|
|
325
|
+
args: tc.arguments,
|
|
326
|
+
},
|
|
327
|
+
});
|
|
328
|
+
}
|
|
329
|
+
|
|
330
|
+
contents.push({
|
|
331
|
+
role: 'model',
|
|
332
|
+
parts,
|
|
333
|
+
});
|
|
334
|
+
i++;
|
|
335
|
+
} else {
|
|
336
|
+
// Regular user or assistant message
|
|
337
|
+
contents.push({
|
|
338
|
+
role: msg.role === 'assistant' ? 'model' : 'user',
|
|
339
|
+
parts: [{ text: msg.content }],
|
|
340
|
+
});
|
|
341
|
+
i++;
|
|
342
|
+
}
|
|
343
|
+
}
|
|
344
|
+
|
|
345
|
+
// console.log('Formatted contents for Gemini:', JSON.stringify(contents, null, 2));
|
|
346
|
+
|
|
347
|
+
// Prepare request body
|
|
348
|
+
const requestBody: any = {
|
|
349
|
+
contents,
|
|
350
|
+
};
|
|
351
|
+
|
|
352
|
+
// Add system instruction if present
|
|
353
|
+
if (systemInstruction) {
|
|
354
|
+
requestBody.systemInstruction = {
|
|
355
|
+
parts: [{ text: systemInstruction }],
|
|
356
|
+
};
|
|
357
|
+
}
|
|
358
|
+
|
|
359
|
+
// Add tools if present
|
|
360
|
+
if (tools.length > 0) {
|
|
361
|
+
requestBody.tools = [{
|
|
362
|
+
functionDeclarations: tools.map((tool) => {
|
|
363
|
+
const cleanSchema = { ...tool.inputSchema };
|
|
364
|
+
|
|
365
|
+
// Remove unsupported properties
|
|
366
|
+
delete cleanSchema.$schema;
|
|
367
|
+
delete cleanSchema.additionalProperties;
|
|
368
|
+
|
|
369
|
+
// Convert to Gemini's expected format
|
|
370
|
+
const parameters: any = {
|
|
371
|
+
type: cleanSchema.type || 'OBJECT',
|
|
372
|
+
properties: {},
|
|
373
|
+
required: cleanSchema.required || [],
|
|
374
|
+
};
|
|
375
|
+
|
|
376
|
+
// Convert properties
|
|
377
|
+
if (cleanSchema.properties) {
|
|
378
|
+
for (const [key, value] of Object.entries(cleanSchema.properties)) {
|
|
379
|
+
const prop: any = value;
|
|
380
|
+
parameters.properties[key] = {
|
|
381
|
+
type: this.convertTypeToGemini(prop.type),
|
|
382
|
+
description: prop.description || '',
|
|
383
|
+
};
|
|
384
|
+
|
|
385
|
+
// Handle enums
|
|
386
|
+
if (prop.enum) {
|
|
387
|
+
parameters.properties[key].enum = prop.enum;
|
|
388
|
+
}
|
|
389
|
+
}
|
|
390
|
+
}
|
|
391
|
+
|
|
392
|
+
return {
|
|
393
|
+
name: tool.name,
|
|
394
|
+
description: tool.description || 'No description provided',
|
|
395
|
+
parameters,
|
|
396
|
+
};
|
|
397
|
+
}),
|
|
398
|
+
}];
|
|
399
|
+
|
|
400
|
+
// console.log('Sending tools to Gemini:', JSON.stringify(requestBody.tools, null, 2));
|
|
401
|
+
}
|
|
402
|
+
|
|
403
|
+
// Use Gemini 2.0 Flash Experimental (latest model with function calling)
|
|
404
|
+
// The v1beta API uses 'gemini-2.0-flash-exp' for the newest features
|
|
405
|
+
const model = 'gemini-2.0-flash-exp';
|
|
406
|
+
const response = await fetch(
|
|
407
|
+
`https://generativelanguage.googleapis.com/v1beta/models/${model}:generateContent?key=${apiKey}`,
|
|
408
|
+
{
|
|
409
|
+
method: 'POST',
|
|
410
|
+
headers: { 'Content-Type': 'application/json' },
|
|
411
|
+
body: JSON.stringify(requestBody),
|
|
412
|
+
}
|
|
413
|
+
);
|
|
414
|
+
|
|
415
|
+
if (!response.ok) {
|
|
416
|
+
const errorData = await response.json().catch(() => ({}));
|
|
417
|
+
console.error('Gemini API error details:', errorData);
|
|
418
|
+
throw new Error(`Gemini API error: ${response.statusText} - ${JSON.stringify(errorData)}`);
|
|
419
|
+
}
|
|
420
|
+
|
|
421
|
+
const data = await response.json();
|
|
422
|
+
console.log('Gemini response:', JSON.stringify(data, null, 2));
|
|
423
|
+
|
|
424
|
+
const candidate = data.candidates?.[0];
|
|
425
|
+
if (!candidate) {
|
|
426
|
+
throw new Error('No response from Gemini');
|
|
427
|
+
}
|
|
428
|
+
|
|
429
|
+
const parts = candidate.content?.parts || [];
|
|
430
|
+
|
|
431
|
+
// Extract text content
|
|
432
|
+
let content = '';
|
|
433
|
+
const toolCalls: ToolCall[] = [];
|
|
434
|
+
|
|
435
|
+
for (const part of parts) {
|
|
436
|
+
if (part.text) {
|
|
437
|
+
content += part.text;
|
|
438
|
+
}
|
|
439
|
+
|
|
440
|
+
if (part.functionCall) {
|
|
441
|
+
toolCalls.push({
|
|
442
|
+
id: `call_${Date.now()}_${Math.random().toString(36).substr(2, 9)}`,
|
|
443
|
+
name: part.functionCall.name,
|
|
444
|
+
arguments: part.functionCall.args || {},
|
|
445
|
+
});
|
|
446
|
+
}
|
|
447
|
+
}
|
|
448
|
+
|
|
449
|
+
const result: ChatResponse = {
|
|
450
|
+
message: {
|
|
451
|
+
role: 'assistant',
|
|
452
|
+
content: content || '',
|
|
453
|
+
},
|
|
454
|
+
finishReason: candidate.finishReason,
|
|
455
|
+
};
|
|
456
|
+
|
|
457
|
+
if (toolCalls.length > 0) {
|
|
458
|
+
result.toolCalls = toolCalls;
|
|
459
|
+
result.message.toolCalls = toolCalls;
|
|
460
|
+
}
|
|
461
|
+
|
|
462
|
+
return result;
|
|
463
|
+
}
|
|
464
|
+
|
|
465
|
+
private convertTypeToGemini(type?: string): string {
|
|
466
|
+
if (!type) return 'STRING';
|
|
467
|
+
|
|
468
|
+
const typeMap: Record<string, string> = {
|
|
469
|
+
'string': 'STRING',
|
|
470
|
+
'number': 'NUMBER',
|
|
471
|
+
'integer': 'INTEGER',
|
|
472
|
+
'boolean': 'BOOLEAN',
|
|
473
|
+
'array': 'ARRAY',
|
|
474
|
+
'object': 'OBJECT',
|
|
475
|
+
};
|
|
476
|
+
|
|
477
|
+
return typeMap[type.toLowerCase()] || 'STRING';
|
|
478
|
+
}
|
|
479
|
+
}
|
|
480
|
+
|