@jupyterlite/ai 0.8.1 → 0.9.0-a0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/lib/agent.d.ts +233 -0
- package/lib/agent.js +604 -0
- package/lib/chat-model.d.ts +195 -0
- package/lib/chat-model.js +590 -0
- package/lib/completion/completion-provider.d.ts +83 -0
- package/lib/completion/completion-provider.js +209 -0
- package/lib/completion/index.d.ts +1 -0
- package/lib/completion/index.js +1 -0
- package/lib/components/clear-button.d.ts +18 -0
- package/lib/components/clear-button.js +31 -0
- package/lib/components/index.d.ts +3 -0
- package/lib/components/index.js +3 -0
- package/lib/components/model-select.d.ts +19 -0
- package/lib/components/model-select.js +154 -0
- package/lib/components/stop-button.d.ts +3 -3
- package/lib/components/stop-button.js +8 -9
- package/lib/components/token-usage-display.d.ts +45 -0
- package/lib/components/token-usage-display.js +74 -0
- package/lib/components/tool-select.d.ts +27 -0
- package/lib/components/tool-select.js +130 -0
- package/lib/icons.d.ts +3 -1
- package/lib/icons.js +10 -13
- package/lib/index.d.ts +4 -5
- package/lib/index.js +322 -167
- package/lib/mcp/browser.d.ts +68 -0
- package/lib/mcp/browser.js +132 -0
- package/lib/models/settings-model.d.ts +69 -0
- package/lib/models/settings-model.js +295 -0
- package/lib/providers/built-in-providers.d.ts +9 -0
- package/lib/providers/built-in-providers.js +192 -0
- package/lib/providers/models.d.ts +37 -0
- package/lib/providers/models.js +28 -0
- package/lib/providers/provider-registry.d.ts +94 -0
- package/lib/providers/provider-registry.js +155 -0
- package/lib/tokens.d.ts +157 -86
- package/lib/tokens.js +16 -12
- package/lib/tools/commands.d.ts +11 -0
- package/lib/tools/commands.js +126 -0
- package/lib/tools/file.d.ts +27 -0
- package/lib/tools/file.js +262 -0
- package/lib/tools/notebook.d.ts +40 -0
- package/lib/tools/notebook.js +762 -0
- package/lib/tools/tool-registry.d.ts +35 -0
- package/lib/tools/tool-registry.js +55 -0
- package/lib/widgets/ai-settings.d.ts +39 -0
- package/lib/widgets/ai-settings.js +506 -0
- package/lib/widgets/chat-wrapper.d.ts +144 -0
- package/lib/widgets/chat-wrapper.js +390 -0
- package/lib/widgets/provider-config-dialog.d.ts +13 -0
- package/lib/widgets/provider-config-dialog.js +104 -0
- package/package.json +150 -41
- package/schema/settings-model.json +153 -0
- package/src/agent.ts +800 -0
- package/src/chat-model.ts +770 -0
- package/src/completion/completion-provider.ts +308 -0
- package/src/completion/index.ts +1 -0
- package/src/components/clear-button.tsx +56 -0
- package/src/components/index.ts +3 -0
- package/src/components/model-select.tsx +245 -0
- package/src/components/stop-button.tsx +11 -11
- package/src/components/token-usage-display.tsx +130 -0
- package/src/components/tool-select.tsx +218 -0
- package/src/icons.ts +12 -14
- package/src/index.ts +468 -238
- package/src/mcp/browser.ts +213 -0
- package/src/models/settings-model.ts +409 -0
- package/src/providers/built-in-providers.ts +216 -0
- package/src/providers/models.ts +79 -0
- package/src/providers/provider-registry.ts +189 -0
- package/src/tokens.ts +203 -90
- package/src/tools/commands.ts +151 -0
- package/src/tools/file.ts +307 -0
- package/src/tools/notebook.ts +964 -0
- package/src/tools/tool-registry.ts +63 -0
- package/src/types.d.ts +4 -0
- package/src/widgets/ai-settings.tsx +1100 -0
- package/src/widgets/chat-wrapper.tsx +543 -0
- package/src/widgets/provider-config-dialog.tsx +256 -0
- package/style/base.css +335 -14
- package/style/icons/jupyternaut-lite.svg +1 -1
- package/lib/base-completer.d.ts +0 -49
- package/lib/base-completer.js +0 -14
- package/lib/chat-handler.d.ts +0 -56
- package/lib/chat-handler.js +0 -201
- package/lib/completion-provider.d.ts +0 -34
- package/lib/completion-provider.js +0 -32
- package/lib/default-prompts.d.ts +0 -2
- package/lib/default-prompts.js +0 -31
- package/lib/default-providers/Anthropic/completer.d.ts +0 -12
- package/lib/default-providers/Anthropic/completer.js +0 -46
- package/lib/default-providers/Anthropic/settings-schema.json +0 -70
- package/lib/default-providers/ChromeAI/completer.d.ts +0 -12
- package/lib/default-providers/ChromeAI/completer.js +0 -56
- package/lib/default-providers/ChromeAI/instructions.d.ts +0 -6
- package/lib/default-providers/ChromeAI/instructions.js +0 -42
- package/lib/default-providers/ChromeAI/settings-schema.json +0 -18
- package/lib/default-providers/Gemini/completer.d.ts +0 -12
- package/lib/default-providers/Gemini/completer.js +0 -48
- package/lib/default-providers/Gemini/instructions.d.ts +0 -2
- package/lib/default-providers/Gemini/instructions.js +0 -9
- package/lib/default-providers/Gemini/settings-schema.json +0 -64
- package/lib/default-providers/MistralAI/completer.d.ts +0 -13
- package/lib/default-providers/MistralAI/completer.js +0 -52
- package/lib/default-providers/MistralAI/instructions.d.ts +0 -2
- package/lib/default-providers/MistralAI/instructions.js +0 -18
- package/lib/default-providers/MistralAI/settings-schema.json +0 -75
- package/lib/default-providers/Ollama/completer.d.ts +0 -12
- package/lib/default-providers/Ollama/completer.js +0 -43
- package/lib/default-providers/Ollama/instructions.d.ts +0 -2
- package/lib/default-providers/Ollama/instructions.js +0 -70
- package/lib/default-providers/Ollama/settings-schema.json +0 -143
- package/lib/default-providers/OpenAI/completer.d.ts +0 -12
- package/lib/default-providers/OpenAI/completer.js +0 -43
- package/lib/default-providers/OpenAI/settings-schema.json +0 -628
- package/lib/default-providers/WebLLM/completer.d.ts +0 -21
- package/lib/default-providers/WebLLM/completer.js +0 -127
- package/lib/default-providers/WebLLM/instructions.d.ts +0 -6
- package/lib/default-providers/WebLLM/instructions.js +0 -32
- package/lib/default-providers/WebLLM/settings-schema.json +0 -19
- package/lib/default-providers/index.d.ts +0 -2
- package/lib/default-providers/index.js +0 -179
- package/lib/provider.d.ts +0 -144
- package/lib/provider.js +0 -412
- package/lib/settings/base.json +0 -7
- package/lib/settings/index.d.ts +0 -3
- package/lib/settings/index.js +0 -3
- package/lib/settings/panel.d.ts +0 -226
- package/lib/settings/panel.js +0 -510
- package/lib/settings/textarea.d.ts +0 -2
- package/lib/settings/textarea.js +0 -18
- package/lib/settings/utils.d.ts +0 -2
- package/lib/settings/utils.js +0 -4
- package/lib/types/ai-model.d.ts +0 -24
- package/lib/types/ai-model.js +0 -5
- package/schema/chat.json +0 -28
- package/schema/provider-registry.json +0 -29
- package/schema/system-prompts.json +0 -22
- package/src/base-completer.ts +0 -75
- package/src/chat-handler.ts +0 -262
- package/src/completion-provider.ts +0 -64
- package/src/default-prompts.ts +0 -33
- package/src/default-providers/Anthropic/completer.ts +0 -59
- package/src/default-providers/ChromeAI/completer.ts +0 -73
- package/src/default-providers/ChromeAI/instructions.ts +0 -45
- package/src/default-providers/Gemini/completer.ts +0 -61
- package/src/default-providers/Gemini/instructions.ts +0 -9
- package/src/default-providers/MistralAI/completer.ts +0 -69
- package/src/default-providers/MistralAI/instructions.ts +0 -18
- package/src/default-providers/Ollama/completer.ts +0 -54
- package/src/default-providers/Ollama/instructions.ts +0 -70
- package/src/default-providers/OpenAI/completer.ts +0 -54
- package/src/default-providers/WebLLM/completer.ts +0 -151
- package/src/default-providers/WebLLM/instructions.ts +0 -33
- package/src/default-providers/index.ts +0 -211
- package/src/global.d.ts +0 -9
- package/src/provider.ts +0 -514
- package/src/settings/index.ts +0 -3
- package/src/settings/panel.tsx +0 -773
- package/src/settings/textarea.tsx +0 -33
- package/src/settings/utils.ts +0 -5
- package/src/types/ai-model.ts +0 -37
- package/src/types/service-worker.d.ts +0 -6
|
@@ -0,0 +1,590 @@
|
|
|
1
|
+
import { AbstractChatModel } from '@jupyter/chat';
|
|
2
|
+
import { UUID } from '@lumino/coreutils';
|
|
3
|
+
import { PathExt } from '@jupyterlab/coreutils';
|
|
4
|
+
import { AI_AVATAR } from './icons';
|
|
5
|
+
/**
|
|
6
|
+
* AI Chat Model implementation that provides chat functionality with OpenAI agents,
|
|
7
|
+
* tool integration, and MCP server support.
|
|
8
|
+
* Extends the base AbstractChatModel to provide AI-powered conversations.
|
|
9
|
+
*/
|
|
10
|
+
export class AIChatModel extends AbstractChatModel {
|
|
11
|
+
/**
|
|
12
|
+
* Constructs a new AIChatModel instance.
|
|
13
|
+
* @param options Configuration options for the chat model
|
|
14
|
+
*/
|
|
15
|
+
constructor(options) {
|
|
16
|
+
super({
|
|
17
|
+
activeCellManager: options.activeCellManager,
|
|
18
|
+
documentManager: options.documentManager,
|
|
19
|
+
config: {
|
|
20
|
+
enableCodeToolbar: true,
|
|
21
|
+
sendWithShiftEnter: options.settingsModel.config.sendWithShiftEnter
|
|
22
|
+
}
|
|
23
|
+
});
|
|
24
|
+
this._settingsModel = options.settingsModel;
|
|
25
|
+
this._user = options.user;
|
|
26
|
+
this._agentManager = options.agentManager;
|
|
27
|
+
// Listen for agent events
|
|
28
|
+
this._agentManager.agentEvent.connect(this._onAgentEvent, this);
|
|
29
|
+
// Listen for settings changes to update chat behavior
|
|
30
|
+
this._settingsModel.stateChanged.connect(this._onSettingsChanged, this);
|
|
31
|
+
}
|
|
32
|
+
/**
|
|
33
|
+
* Gets the current user information.
|
|
34
|
+
*/
|
|
35
|
+
get user() {
|
|
36
|
+
return this._user;
|
|
37
|
+
}
|
|
38
|
+
get tokenUsageChanged() {
|
|
39
|
+
return this._agentManager.tokenUsageChanged;
|
|
40
|
+
}
|
|
41
|
+
/**
|
|
42
|
+
* Creates a chat context for the current conversation.
|
|
43
|
+
*/
|
|
44
|
+
createChatContext() {
|
|
45
|
+
return {
|
|
46
|
+
name: this.name,
|
|
47
|
+
user: { username: 'me' },
|
|
48
|
+
users: [],
|
|
49
|
+
messages: this.messages,
|
|
50
|
+
stopStreaming: () => this.stopStreaming(),
|
|
51
|
+
clearMessages: () => this.clearMessages(),
|
|
52
|
+
agentManager: this._agentManager
|
|
53
|
+
};
|
|
54
|
+
}
|
|
55
|
+
/**
|
|
56
|
+
* Stops the current streaming response by aborting the request.
|
|
57
|
+
*/
|
|
58
|
+
stopStreaming = () => {
|
|
59
|
+
this._agentManager.stopStreaming();
|
|
60
|
+
};
|
|
61
|
+
/**
|
|
62
|
+
* Clears all messages from the chat and resets conversation state.
|
|
63
|
+
*/
|
|
64
|
+
clearMessages = () => {
|
|
65
|
+
this.messagesDeleted(0, this.messages.length);
|
|
66
|
+
this._pendingToolCalls.clear();
|
|
67
|
+
this._agentManager.clearHistory();
|
|
68
|
+
};
|
|
69
|
+
/**
|
|
70
|
+
* Sends a message to the AI and generates a response.
|
|
71
|
+
* @param message The user message to send
|
|
72
|
+
*/
|
|
73
|
+
async sendMessage(message) {
|
|
74
|
+
// Add user message to chat
|
|
75
|
+
const userMessage = {
|
|
76
|
+
body: message.body,
|
|
77
|
+
sender: this.user || { username: 'user', display_name: 'User' },
|
|
78
|
+
id: UUID.uuid4(),
|
|
79
|
+
time: Date.now() / 1000,
|
|
80
|
+
type: 'msg',
|
|
81
|
+
raw_time: false,
|
|
82
|
+
attachments: this.input.attachments
|
|
83
|
+
};
|
|
84
|
+
this.messageAdded(userMessage);
|
|
85
|
+
// Check if we have valid configuration
|
|
86
|
+
if (!this._agentManager.hasValidConfig()) {
|
|
87
|
+
const errorMessage = {
|
|
88
|
+
body: 'Please configure your AI settings first. Open the AI Settings to set your API key and model.',
|
|
89
|
+
sender: this._getAIUser(),
|
|
90
|
+
id: UUID.uuid4(),
|
|
91
|
+
time: Date.now() / 1000,
|
|
92
|
+
type: 'msg',
|
|
93
|
+
raw_time: false
|
|
94
|
+
};
|
|
95
|
+
this.messageAdded(errorMessage);
|
|
96
|
+
return;
|
|
97
|
+
}
|
|
98
|
+
try {
|
|
99
|
+
// Process attachments and add their content to the message
|
|
100
|
+
let enhancedMessage = message.body;
|
|
101
|
+
if (this.input.attachments.length > 0) {
|
|
102
|
+
const attachmentContents = await this._processAttachments(this.input.attachments);
|
|
103
|
+
if (attachmentContents.length > 0) {
|
|
104
|
+
enhancedMessage +=
|
|
105
|
+
'\n\n--- Attached Files ---\n' + attachmentContents.join('\n\n');
|
|
106
|
+
}
|
|
107
|
+
}
|
|
108
|
+
this.updateWriters([{ user: this._getAIUser() }]);
|
|
109
|
+
await this._agentManager.generateResponse(enhancedMessage);
|
|
110
|
+
// Clear attachments after processing
|
|
111
|
+
this.input.clearAttachments();
|
|
112
|
+
}
|
|
113
|
+
catch (error) {
|
|
114
|
+
const errorMessage = {
|
|
115
|
+
body: `Error generating AI response: ${error.message}`,
|
|
116
|
+
sender: this._getAIUser(),
|
|
117
|
+
id: UUID.uuid4(),
|
|
118
|
+
time: Date.now() / 1000,
|
|
119
|
+
type: 'msg',
|
|
120
|
+
raw_time: false
|
|
121
|
+
};
|
|
122
|
+
this.messageAdded(errorMessage);
|
|
123
|
+
}
|
|
124
|
+
finally {
|
|
125
|
+
this.updateWriters([]);
|
|
126
|
+
}
|
|
127
|
+
}
|
|
128
|
+
/**
|
|
129
|
+
* Approves a tool call and updates the UI accordingly.
|
|
130
|
+
* @param interruptionId The interruption ID to approve
|
|
131
|
+
* @param messageId Optional message ID for UI updates
|
|
132
|
+
*/
|
|
133
|
+
async approveToolCall(interruptionId, messageId) {
|
|
134
|
+
await this._agentManager.approveToolCall(interruptionId);
|
|
135
|
+
// Update the tool call box to show "Approved" status
|
|
136
|
+
if (messageId) {
|
|
137
|
+
this._updateToolCallBoxStatus(messageId, 'Approved', true);
|
|
138
|
+
}
|
|
139
|
+
}
|
|
140
|
+
/**
|
|
141
|
+
* Rejects a tool call and updates the UI accordingly.
|
|
142
|
+
* @param interruptionId The interruption ID to reject
|
|
143
|
+
* @param messageId Optional message ID for UI updates
|
|
144
|
+
*/
|
|
145
|
+
async rejectToolCall(interruptionId, messageId) {
|
|
146
|
+
await this._agentManager.rejectToolCall(interruptionId);
|
|
147
|
+
// Update the tool call box to show "Rejected" status
|
|
148
|
+
if (messageId) {
|
|
149
|
+
this._updateToolCallBoxStatus(messageId, 'Rejected', false);
|
|
150
|
+
}
|
|
151
|
+
}
|
|
152
|
+
/**
|
|
153
|
+
* Approves all tools in a group.
|
|
154
|
+
* @param groupId The group ID containing the tool calls
|
|
155
|
+
* @param interruptionIds Array of interruption IDs to approve
|
|
156
|
+
* @param messageId Optional message ID for UI updates
|
|
157
|
+
*/
|
|
158
|
+
async approveGroupedToolCalls(groupId, interruptionIds, messageId) {
|
|
159
|
+
await this._agentManager.approveGroupedToolCalls(groupId, interruptionIds);
|
|
160
|
+
// Update the grouped approval message to show approved status
|
|
161
|
+
if (messageId) {
|
|
162
|
+
this._updateGroupedApprovalStatus(messageId, 'Tools approved', true);
|
|
163
|
+
}
|
|
164
|
+
}
|
|
165
|
+
/**
|
|
166
|
+
* Rejects all tools in a group.
|
|
167
|
+
* @param groupId The group ID containing the tool calls
|
|
168
|
+
* @param interruptionIds Array of interruption IDs to reject
|
|
169
|
+
* @param messageId Optional message ID for UI updates
|
|
170
|
+
*/
|
|
171
|
+
async rejectGroupedToolCalls(groupId, interruptionIds, messageId) {
|
|
172
|
+
await this._agentManager.rejectGroupedToolCalls(groupId, interruptionIds);
|
|
173
|
+
// Update the grouped approval message to show rejected status
|
|
174
|
+
if (messageId) {
|
|
175
|
+
this._updateGroupedApprovalStatus(messageId, 'Tools rejected', false);
|
|
176
|
+
}
|
|
177
|
+
}
|
|
178
|
+
/**
|
|
179
|
+
* Gets the AI user information for system messages.
|
|
180
|
+
*/
|
|
181
|
+
_getAIUser() {
|
|
182
|
+
return {
|
|
183
|
+
username: 'ai-assistant',
|
|
184
|
+
display_name: 'Jupyternaut',
|
|
185
|
+
initials: 'JN',
|
|
186
|
+
color: '#2196F3',
|
|
187
|
+
avatar_url: AI_AVATAR
|
|
188
|
+
};
|
|
189
|
+
}
|
|
190
|
+
/**
|
|
191
|
+
* Handles settings changes and updates chat configuration accordingly.
|
|
192
|
+
*/
|
|
193
|
+
_onSettingsChanged() {
|
|
194
|
+
const config = this._settingsModel.config;
|
|
195
|
+
this.config = {
|
|
196
|
+
...config,
|
|
197
|
+
enableCodeToolbar: true
|
|
198
|
+
};
|
|
199
|
+
// Agent manager handles agent recreation automatically via its own settings listener
|
|
200
|
+
}
|
|
201
|
+
/**
|
|
202
|
+
* Handles events emitted by the agent manager.
|
|
203
|
+
* @param event The event data containing type and payload
|
|
204
|
+
*/
|
|
205
|
+
_onAgentEvent(_sender, event) {
|
|
206
|
+
switch (event.type) {
|
|
207
|
+
case 'message_start':
|
|
208
|
+
this._handleMessageStart(event);
|
|
209
|
+
break;
|
|
210
|
+
case 'message_chunk':
|
|
211
|
+
this._handleMessageChunk(event);
|
|
212
|
+
break;
|
|
213
|
+
case 'message_complete':
|
|
214
|
+
this._handleMessageComplete(event);
|
|
215
|
+
break;
|
|
216
|
+
case 'tool_call_start':
|
|
217
|
+
this._handleToolCallStartEvent(event);
|
|
218
|
+
break;
|
|
219
|
+
case 'tool_call_complete':
|
|
220
|
+
this._handleToolCallCompleteEvent(event);
|
|
221
|
+
break;
|
|
222
|
+
case 'tool_approval_required':
|
|
223
|
+
this._handleToolApprovalRequired(event);
|
|
224
|
+
break;
|
|
225
|
+
case 'grouped_approval_required':
|
|
226
|
+
this._handleGroupedApprovalRequired(event);
|
|
227
|
+
break;
|
|
228
|
+
case 'error':
|
|
229
|
+
this._handleErrorEvent(event);
|
|
230
|
+
break;
|
|
231
|
+
}
|
|
232
|
+
}
|
|
233
|
+
/**
|
|
234
|
+
* Handles the start of a new message from the AI agent.
|
|
235
|
+
* @param event Event containing the message start data
|
|
236
|
+
*/
|
|
237
|
+
_handleMessageStart(event) {
|
|
238
|
+
const aiMessage = {
|
|
239
|
+
body: '',
|
|
240
|
+
sender: this._getAIUser(),
|
|
241
|
+
id: event.data.messageId,
|
|
242
|
+
time: Date.now() / 1000,
|
|
243
|
+
type: 'msg',
|
|
244
|
+
raw_time: false
|
|
245
|
+
};
|
|
246
|
+
this._currentStreamingMessage = aiMessage;
|
|
247
|
+
this.messageAdded(aiMessage);
|
|
248
|
+
}
|
|
249
|
+
/**
|
|
250
|
+
* Handles streaming message chunks from the AI agent.
|
|
251
|
+
* @param event Event containing the message chunk data
|
|
252
|
+
*/
|
|
253
|
+
_handleMessageChunk(event) {
|
|
254
|
+
if (this._currentStreamingMessage &&
|
|
255
|
+
this._currentStreamingMessage.id === event.data.messageId) {
|
|
256
|
+
this._currentStreamingMessage.body = event.data.fullContent;
|
|
257
|
+
this.messageAdded(this._currentStreamingMessage);
|
|
258
|
+
}
|
|
259
|
+
}
|
|
260
|
+
/**
|
|
261
|
+
* Handles the completion of a message from the AI agent.
|
|
262
|
+
* @param event Event containing the message completion data
|
|
263
|
+
*/
|
|
264
|
+
_handleMessageComplete(event) {
|
|
265
|
+
if (this._currentStreamingMessage &&
|
|
266
|
+
this._currentStreamingMessage.id === event.data.messageId) {
|
|
267
|
+
this._currentStreamingMessage.body = event.data.content;
|
|
268
|
+
this.messageAdded(this._currentStreamingMessage);
|
|
269
|
+
this._currentStreamingMessage = null;
|
|
270
|
+
}
|
|
271
|
+
}
|
|
272
|
+
/**
|
|
273
|
+
* Handles the start of a tool call execution.
|
|
274
|
+
* @param event Event containing the tool call start data
|
|
275
|
+
*/
|
|
276
|
+
_handleToolCallStartEvent(event) {
|
|
277
|
+
const toolCallMessageId = UUID.uuid4();
|
|
278
|
+
const toolCallMessage = {
|
|
279
|
+
body: `<details class="jp-ai-tool-call jp-ai-tool-pending">
|
|
280
|
+
<summary class="jp-ai-tool-header">
|
|
281
|
+
<div class="jp-ai-tool-icon">⚡</div>
|
|
282
|
+
<div class="jp-ai-tool-title">${event.data.toolName}</div>
|
|
283
|
+
<div class="jp-ai-tool-status jp-ai-tool-status-pending">Running...</div>
|
|
284
|
+
</summary>
|
|
285
|
+
<div class="jp-ai-tool-body">
|
|
286
|
+
<div class="jp-ai-tool-section">
|
|
287
|
+
<div class="jp-ai-tool-label">Input</div>
|
|
288
|
+
<pre class="jp-ai-tool-code"><code>${JSON.stringify(event.data.input, null, 2)}</code></pre>
|
|
289
|
+
</div>
|
|
290
|
+
</div>
|
|
291
|
+
</details>`,
|
|
292
|
+
sender: this._getAIUser(),
|
|
293
|
+
id: toolCallMessageId,
|
|
294
|
+
time: Date.now() / 1000,
|
|
295
|
+
type: 'msg',
|
|
296
|
+
raw_time: false
|
|
297
|
+
};
|
|
298
|
+
if (event.data.callId) {
|
|
299
|
+
this._pendingToolCalls.set(event.data.callId, toolCallMessageId);
|
|
300
|
+
}
|
|
301
|
+
this.messageAdded(toolCallMessage);
|
|
302
|
+
}
|
|
303
|
+
/**
|
|
304
|
+
* Handles the completion of a tool call execution.
|
|
305
|
+
* @param event Event containing the tool call completion data
|
|
306
|
+
*/
|
|
307
|
+
_handleToolCallCompleteEvent(event) {
|
|
308
|
+
const messageId = this._pendingToolCalls.get(event.data.callId);
|
|
309
|
+
if (messageId) {
|
|
310
|
+
const existingMessageIndex = this.messages.findIndex(msg => msg.id === messageId);
|
|
311
|
+
if (existingMessageIndex !== -1) {
|
|
312
|
+
const existingMessage = this.messages[existingMessageIndex];
|
|
313
|
+
const inputJson = existingMessage.body.match(/<code>([\s\S]*?)<\/code>/)?.[1] || '';
|
|
314
|
+
const statusClass = event.data.isError
|
|
315
|
+
? 'jp-ai-tool-error'
|
|
316
|
+
: 'jp-ai-tool-completed';
|
|
317
|
+
const statusText = event.data.isError ? 'Error' : 'Completed';
|
|
318
|
+
const statusColor = event.data.isError
|
|
319
|
+
? 'jp-ai-tool-status-error'
|
|
320
|
+
: 'jp-ai-tool-status-completed';
|
|
321
|
+
const updatedMessage = {
|
|
322
|
+
...existingMessage,
|
|
323
|
+
body: `<details class="jp-ai-tool-call ${statusClass}">
|
|
324
|
+
<summary class="jp-ai-tool-header">
|
|
325
|
+
<div class="jp-ai-tool-icon">⚡</div>
|
|
326
|
+
<div class="jp-ai-tool-title">${event.data.toolName}</div>
|
|
327
|
+
<div class="jp-ai-tool-status ${statusColor}">${statusText}</div>
|
|
328
|
+
</summary>
|
|
329
|
+
<div class="jp-ai-tool-body">
|
|
330
|
+
<div class="jp-ai-tool-section">
|
|
331
|
+
<div class="jp-ai-tool-label">Input</div>
|
|
332
|
+
<pre class="jp-ai-tool-code"><code>${inputJson}</code></pre>
|
|
333
|
+
</div>
|
|
334
|
+
<div class="jp-ai-tool-section">
|
|
335
|
+
<div class="jp-ai-tool-label">${event.data.isError ? 'Error' : 'Result'}</div>
|
|
336
|
+
<pre class="jp-ai-tool-code"><code>${event.data.output}</code></pre>
|
|
337
|
+
</div>
|
|
338
|
+
</div>
|
|
339
|
+
</details>`
|
|
340
|
+
};
|
|
341
|
+
this.messageAdded(updatedMessage);
|
|
342
|
+
this._pendingToolCalls.delete(event.data.callId);
|
|
343
|
+
}
|
|
344
|
+
}
|
|
345
|
+
}
|
|
346
|
+
/**
|
|
347
|
+
* Handles tool approval requests from the AI agent.
|
|
348
|
+
* @param event Event containing the tool approval request data
|
|
349
|
+
*/
|
|
350
|
+
_handleToolApprovalRequired(event) {
|
|
351
|
+
// Handle single tool approval - either update existing tool call message or create new approval message
|
|
352
|
+
if (event.data.callId) {
|
|
353
|
+
const messageId = this._pendingToolCalls.get(event.data.callId);
|
|
354
|
+
if (messageId) {
|
|
355
|
+
const existingMessageIndex = this.messages.findIndex(msg => msg.id === messageId);
|
|
356
|
+
if (existingMessageIndex !== -1) {
|
|
357
|
+
const existingMessage = this.messages[existingMessageIndex];
|
|
358
|
+
const assistantName = this._getAIUser().display_name;
|
|
359
|
+
const updatedMessage = {
|
|
360
|
+
...existingMessage,
|
|
361
|
+
body: `<details class="jp-ai-tool-call jp-ai-tool-pending" open>
|
|
362
|
+
<summary class="jp-ai-tool-header">
|
|
363
|
+
<div class="jp-ai-tool-icon">⚡</div>
|
|
364
|
+
<div class="jp-ai-tool-title">${event.data.toolName}</div>
|
|
365
|
+
<div class="jp-ai-tool-status jp-ai-tool-status-pending">Needs Approval</div>
|
|
366
|
+
</summary>
|
|
367
|
+
<div class="jp-ai-tool-body">
|
|
368
|
+
<div class="jp-ai-tool-section">
|
|
369
|
+
<div class="jp-ai-tool-label">${assistantName} wants to execute this tool. Do you approve?</div>
|
|
370
|
+
<pre class="jp-ai-tool-code"><code>${JSON.stringify(event.data.toolInput, null, 2)}</code></pre>
|
|
371
|
+
</div>
|
|
372
|
+
[APPROVAL_BUTTONS:${event.data.interruptionId}]
|
|
373
|
+
</div>
|
|
374
|
+
</details>`
|
|
375
|
+
};
|
|
376
|
+
this.messageAdded(updatedMessage);
|
|
377
|
+
this.updateWriters([]);
|
|
378
|
+
return;
|
|
379
|
+
}
|
|
380
|
+
}
|
|
381
|
+
}
|
|
382
|
+
// Fallback: create separate approval message
|
|
383
|
+
const approvalMessageId = UUID.uuid4();
|
|
384
|
+
const assistantName = this._getAIUser().display_name;
|
|
385
|
+
const approvalMessage = {
|
|
386
|
+
body: `**🤖 Tool Approval Required: ${event.data.toolName}**
|
|
387
|
+
|
|
388
|
+
${assistantName} wants to execute this tool. Do you approve?
|
|
389
|
+
|
|
390
|
+
${JSON.stringify(event.data.toolInput, null, 2)}
|
|
391
|
+
|
|
392
|
+
[APPROVAL_BUTTONS:${event.data.interruptionId}]`,
|
|
393
|
+
sender: this._getAIUser(),
|
|
394
|
+
id: approvalMessageId,
|
|
395
|
+
time: Date.now() / 1000,
|
|
396
|
+
type: 'msg',
|
|
397
|
+
raw_time: false
|
|
398
|
+
};
|
|
399
|
+
this.messageAdded(approvalMessage);
|
|
400
|
+
this.updateWriters([]); // Stop showing "AI is writing"
|
|
401
|
+
}
|
|
402
|
+
/**
|
|
403
|
+
* Handles grouped tool approval requests from the AI agent.
|
|
404
|
+
* @param event Event containing the grouped tool approval request data
|
|
405
|
+
*/
|
|
406
|
+
_handleGroupedApprovalRequired(event) {
|
|
407
|
+
const assistantName = this._getAIUser().display_name;
|
|
408
|
+
const approvalMessageId = UUID.uuid4();
|
|
409
|
+
const toolsList = event.data.approvals
|
|
410
|
+
.map((info, index) => `**${index + 1}. ${info.toolName}**\n${JSON.stringify(info.toolInput, null, 2)}\n`)
|
|
411
|
+
.join('\n\n');
|
|
412
|
+
const approvalMessage = {
|
|
413
|
+
body: `**🤖 Multiple Tool Approvals Required**
|
|
414
|
+
|
|
415
|
+
${assistantName} wants to execute ${event.data.approvals.length} tools. Do you approve?
|
|
416
|
+
|
|
417
|
+
${toolsList}
|
|
418
|
+
|
|
419
|
+
[GROUP_APPROVAL_BUTTONS:${event.data.groupId}:${event.data.approvals.map(info => info.interruptionId).join(',')}]`,
|
|
420
|
+
sender: this._getAIUser(),
|
|
421
|
+
id: approvalMessageId,
|
|
422
|
+
time: Date.now() / 1000,
|
|
423
|
+
type: 'msg',
|
|
424
|
+
raw_time: false
|
|
425
|
+
};
|
|
426
|
+
this.messageAdded(approvalMessage);
|
|
427
|
+
this.updateWriters([]); // Stop showing "AI is writing"
|
|
428
|
+
}
|
|
429
|
+
/**
|
|
430
|
+
* Handles error events from the AI agent.
|
|
431
|
+
* @param event Event containing the error information
|
|
432
|
+
*/
|
|
433
|
+
_handleErrorEvent(event) {
|
|
434
|
+
const errorMessage = {
|
|
435
|
+
body: `Error generating response: ${event.data.error.message}`,
|
|
436
|
+
sender: this._getAIUser(),
|
|
437
|
+
id: UUID.uuid4(),
|
|
438
|
+
time: Date.now() / 1000,
|
|
439
|
+
type: 'msg',
|
|
440
|
+
raw_time: false
|
|
441
|
+
};
|
|
442
|
+
this.messageAdded(errorMessage);
|
|
443
|
+
}
|
|
444
|
+
/**
|
|
445
|
+
* Processes file attachments and returns their content as formatted strings.
|
|
446
|
+
* @param attachments Array of file attachments to process
|
|
447
|
+
* @returns Array of formatted attachment contents
|
|
448
|
+
*/
|
|
449
|
+
async _processAttachments(attachments) {
|
|
450
|
+
const contents = [];
|
|
451
|
+
for (const attachment of attachments) {
|
|
452
|
+
try {
|
|
453
|
+
const fileContent = await this._readFileAttachment(attachment);
|
|
454
|
+
if (fileContent) {
|
|
455
|
+
// Get file extension for syntax highlighting
|
|
456
|
+
const fileExtension = PathExt.extname(attachment.value).toLowerCase();
|
|
457
|
+
const language = fileExtension === '.ipynb' ? 'json' : '';
|
|
458
|
+
contents.push(`**File: ${attachment.value}**\n\`\`\`${language}\n${fileContent}\n\`\`\``);
|
|
459
|
+
}
|
|
460
|
+
}
|
|
461
|
+
catch (error) {
|
|
462
|
+
console.warn(`Failed to read attachment ${attachment.value}:`, error);
|
|
463
|
+
contents.push(`**File: ${attachment.value}** (Could not read file)`);
|
|
464
|
+
}
|
|
465
|
+
}
|
|
466
|
+
return contents;
|
|
467
|
+
}
|
|
468
|
+
/**
|
|
469
|
+
* Reads the content of a file attachment.
|
|
470
|
+
* @param attachment The file attachment to read
|
|
471
|
+
* @returns File content as string or null if unable to read
|
|
472
|
+
*/
|
|
473
|
+
async _readFileAttachment(attachment) {
|
|
474
|
+
// Handle both 'file' and 'notebook' types since both have a 'value' path
|
|
475
|
+
if (attachment.type !== 'file' && attachment.type !== 'notebook') {
|
|
476
|
+
return null;
|
|
477
|
+
}
|
|
478
|
+
try {
|
|
479
|
+
const model = await this.input.documentManager?.services.contents.get(attachment.value);
|
|
480
|
+
if (!model?.content) {
|
|
481
|
+
return null;
|
|
482
|
+
}
|
|
483
|
+
if (model.type === 'file') {
|
|
484
|
+
// Regular file content
|
|
485
|
+
return model.content;
|
|
486
|
+
}
|
|
487
|
+
else if (model.type === 'notebook') {
|
|
488
|
+
// Clear outputs from notebook cells before sending to LLM
|
|
489
|
+
// TODO: make this configurable?
|
|
490
|
+
const cells = model.content.cells.map((cell) => {
|
|
491
|
+
const cleanCell = { ...cell };
|
|
492
|
+
if (cleanCell.outputs) {
|
|
493
|
+
cleanCell.outputs = [];
|
|
494
|
+
}
|
|
495
|
+
if (cleanCell.execution_count) {
|
|
496
|
+
cleanCell.execution_count = null;
|
|
497
|
+
}
|
|
498
|
+
return cleanCell;
|
|
499
|
+
});
|
|
500
|
+
const notebookModel = {
|
|
501
|
+
cells,
|
|
502
|
+
metadata: model.metadata || {},
|
|
503
|
+
nbformat: model.nbformat || 4,
|
|
504
|
+
nbformat_minor: model.nbformat_minor || 4
|
|
505
|
+
};
|
|
506
|
+
return JSON.stringify(notebookModel);
|
|
507
|
+
}
|
|
508
|
+
return null;
|
|
509
|
+
}
|
|
510
|
+
catch (error) {
|
|
511
|
+
console.warn(`Failed to read file ${attachment.value}:`, error);
|
|
512
|
+
return null;
|
|
513
|
+
}
|
|
514
|
+
}
|
|
515
|
+
/**
|
|
516
|
+
* Updates the status display of a grouped approval message.
|
|
517
|
+
* @param messageId The message ID to update
|
|
518
|
+
* @param status The status text to display
|
|
519
|
+
* @param isSuccess Whether the action was successful
|
|
520
|
+
*/
|
|
521
|
+
_updateGroupedApprovalStatus(messageId, status, isSuccess) {
|
|
522
|
+
const existingMessageIndex = this.messages.findIndex(msg => msg.id === messageId);
|
|
523
|
+
if (existingMessageIndex !== -1) {
|
|
524
|
+
const existingMessage = this.messages[existingMessageIndex];
|
|
525
|
+
// Extract tool count and names from existing message
|
|
526
|
+
const toolCountMatch = existingMessage.body.match(/execute (\d+) tools/);
|
|
527
|
+
const toolCount = toolCountMatch ? toolCountMatch[1] : 'multiple';
|
|
528
|
+
const statusIcon = isSuccess ? '✅' : '❌';
|
|
529
|
+
const statusClass = isSuccess ? 'approved' : 'rejected';
|
|
530
|
+
const updatedMessage = {
|
|
531
|
+
...existingMessage,
|
|
532
|
+
body: `**${statusIcon} Group Tool Approval: ${status}**
|
|
533
|
+
|
|
534
|
+
The request to execute ${toolCount} tools has been **${statusClass}**.
|
|
535
|
+
|
|
536
|
+
<div class="jp-ai-group-approval-${statusClass}">
|
|
537
|
+
Status: ${status}
|
|
538
|
+
</div>`
|
|
539
|
+
};
|
|
540
|
+
this.messageAdded(updatedMessage);
|
|
541
|
+
}
|
|
542
|
+
}
|
|
543
|
+
/**
|
|
544
|
+
* Updates the status display of a tool call box.
|
|
545
|
+
* @param messageId The message ID to update
|
|
546
|
+
* @param status The status text to display
|
|
547
|
+
* @param isSuccess Whether the action was successful
|
|
548
|
+
*/
|
|
549
|
+
_updateToolCallBoxStatus(messageId, status, isSuccess) {
|
|
550
|
+
const existingMessageIndex = this.messages.findIndex(msg => msg.id === messageId);
|
|
551
|
+
if (existingMessageIndex !== -1) {
|
|
552
|
+
const existingMessage = this.messages[existingMessageIndex];
|
|
553
|
+
// Extract tool name and input from existing message
|
|
554
|
+
const toolNameMatch = existingMessage.body.match(/<div class="jp-ai-tool-title">([^<]+)<\/div>/);
|
|
555
|
+
const toolName = toolNameMatch ? toolNameMatch[1] : 'Unknown Tool';
|
|
556
|
+
const codeMatch = existingMessage.body.match(/<code>([\s\S]*?)<\/code>/);
|
|
557
|
+
const toolInput = codeMatch ? codeMatch[1] : '{}';
|
|
558
|
+
// Determine styling based on status
|
|
559
|
+
const statusClass = isSuccess
|
|
560
|
+
? 'jp-ai-tool-completed'
|
|
561
|
+
: 'jp-ai-tool-error';
|
|
562
|
+
const statusColor = isSuccess
|
|
563
|
+
? 'jp-ai-tool-status-completed'
|
|
564
|
+
: 'jp-ai-tool-status-error';
|
|
565
|
+
const updatedMessage = {
|
|
566
|
+
...existingMessage,
|
|
567
|
+
body: `<details class="jp-ai-tool-call ${statusClass}">
|
|
568
|
+
<summary class="jp-ai-tool-header">
|
|
569
|
+
<div class="jp-ai-tool-icon">⚡</div>
|
|
570
|
+
<div class="jp-ai-tool-title">${toolName}</div>
|
|
571
|
+
<div class="jp-ai-tool-status ${statusColor}">${status}</div>
|
|
572
|
+
</summary>
|
|
573
|
+
<div class="jp-ai-tool-body">
|
|
574
|
+
<div class="jp-ai-tool-section">
|
|
575
|
+
<div class="jp-ai-tool-label">Input</div>
|
|
576
|
+
<pre class="jp-ai-tool-code"><code>${toolInput}</code></pre>
|
|
577
|
+
</div>
|
|
578
|
+
</div>
|
|
579
|
+
</details>`
|
|
580
|
+
};
|
|
581
|
+
this.messageAdded(updatedMessage);
|
|
582
|
+
}
|
|
583
|
+
}
|
|
584
|
+
// Private fields
|
|
585
|
+
_settingsModel;
|
|
586
|
+
_user;
|
|
587
|
+
_pendingToolCalls = new Map();
|
|
588
|
+
_agentManager;
|
|
589
|
+
_currentStreamingMessage = null;
|
|
590
|
+
}
|
|
@@ -0,0 +1,83 @@
|
|
|
1
|
+
import { CompletionHandler, IInlineCompletionContext, IInlineCompletionList, IInlineCompletionProvider } from '@jupyterlab/completer';
|
|
2
|
+
import { AISettingsModel } from '../models/settings-model';
|
|
3
|
+
import type { ICompletionProviderRegistry } from '../tokens';
|
|
4
|
+
/**
|
|
5
|
+
* Configuration interface for provider-specific completion behavior
|
|
6
|
+
*/
|
|
7
|
+
export interface IProviderCompletionConfig {
|
|
8
|
+
/**
|
|
9
|
+
* Temperature setting for the provider
|
|
10
|
+
*/
|
|
11
|
+
temperature?: number;
|
|
12
|
+
/**
|
|
13
|
+
* Whether the provider supports fill-in-the-middle completion
|
|
14
|
+
*/
|
|
15
|
+
supportsFillInMiddle?: boolean;
|
|
16
|
+
/**
|
|
17
|
+
* Whether to set filterText for this provider
|
|
18
|
+
*/
|
|
19
|
+
useFilterText?: boolean;
|
|
20
|
+
/**
|
|
21
|
+
* Custom prompt formatter for provider-specific requirements
|
|
22
|
+
*/
|
|
23
|
+
customPromptFormat?: (prompt: string, suffix: string) => string;
|
|
24
|
+
/**
|
|
25
|
+
* Function to clean up provider-specific artifacts from completion text
|
|
26
|
+
*/
|
|
27
|
+
cleanupCompletion?: (completion: string) => string;
|
|
28
|
+
}
|
|
29
|
+
/**
|
|
30
|
+
* The generic completion provider to register to the completion provider manager.
|
|
31
|
+
*/
|
|
32
|
+
export declare class AICompletionProvider implements IInlineCompletionProvider {
|
|
33
|
+
/**
|
|
34
|
+
* Construct a new completion provider.
|
|
35
|
+
*/
|
|
36
|
+
constructor(options: AICompletionProvider.IOptions);
|
|
37
|
+
/**
|
|
38
|
+
* The unique identifier of the provider.
|
|
39
|
+
*/
|
|
40
|
+
readonly identifier = "@jupyterlite/ai:completer";
|
|
41
|
+
/**
|
|
42
|
+
* Get the current completer name based on settings.
|
|
43
|
+
*/
|
|
44
|
+
get name(): string;
|
|
45
|
+
/**
|
|
46
|
+
* Get the system prompt for the completion.
|
|
47
|
+
*/
|
|
48
|
+
get systemPrompt(): string;
|
|
49
|
+
/**
|
|
50
|
+
* Fetch completion items based on the request and context.
|
|
51
|
+
*/
|
|
52
|
+
fetch(request: CompletionHandler.IRequest, context: IInlineCompletionContext): Promise<IInlineCompletionList>;
|
|
53
|
+
/**
|
|
54
|
+
* Update the language model based on current settings.
|
|
55
|
+
*/
|
|
56
|
+
private _updateModel;
|
|
57
|
+
/**
|
|
58
|
+
* Extract context from notebook cells
|
|
59
|
+
*/
|
|
60
|
+
private _extractNotebookContext;
|
|
61
|
+
/**
|
|
62
|
+
* Get provider-specific completion configuration from registry
|
|
63
|
+
*/
|
|
64
|
+
private _getProviderCompletionConfig;
|
|
65
|
+
private _settingsModel;
|
|
66
|
+
private _completionProviderRegistry?;
|
|
67
|
+
private _model;
|
|
68
|
+
}
|
|
69
|
+
export declare namespace AICompletionProvider {
|
|
70
|
+
/**
|
|
71
|
+
* The options for the constructor of the completion provider.
|
|
72
|
+
*/
|
|
73
|
+
interface IOptions {
|
|
74
|
+
/**
|
|
75
|
+
* The AI settings model.
|
|
76
|
+
*/
|
|
77
|
+
settingsModel: AISettingsModel;
|
|
78
|
+
/**
|
|
79
|
+
* The completion provider registry.
|
|
80
|
+
*/
|
|
81
|
+
completionProviderRegistry?: ICompletionProviderRegistry;
|
|
82
|
+
}
|
|
83
|
+
}
|