openbot 0.2.14 → 0.3.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (84) hide show
  1. package/dist/agents/openbot/index.js +76 -0
  2. package/dist/agents/openbot/middleware/approval.js +132 -0
  3. package/dist/agents/openbot/runtime.js +289 -0
  4. package/dist/agents/openbot/system-prompt.js +32 -0
  5. package/dist/agents/openbot/tools/delegation.js +78 -0
  6. package/dist/agents/openbot/tools/mcp.js +99 -0
  7. package/dist/agents/openbot/tools/shell.js +91 -0
  8. package/dist/agents/openbot/tools/storage.js +75 -0
  9. package/dist/agents/openbot/tools/ui.js +176 -0
  10. package/dist/agents/system.js +20 -93
  11. package/dist/app/cli.js +1 -1
  12. package/dist/app/config.js +4 -1
  13. package/dist/app/server.js +15 -8
  14. package/dist/bus/agent-package.js +1 -0
  15. package/dist/bus/plugin.js +1 -0
  16. package/dist/bus/services.js +711 -0
  17. package/dist/bus/types.js +1 -0
  18. package/dist/harness/context.js +250 -0
  19. package/dist/harness/event-normalizer.js +59 -0
  20. package/dist/harness/orchestrator.js +27 -227
  21. package/dist/harness/process.js +25 -3
  22. package/dist/harness/queue-processor.js +227 -0
  23. package/dist/harness/runtime-factory.js +103 -0
  24. package/dist/plugins/ai-sdk/index.js +37 -0
  25. package/dist/plugins/ai-sdk/runtime.js +402 -0
  26. package/dist/plugins/ai-sdk/system-prompt.js +3 -0
  27. package/dist/plugins/ai-sdk.js +277 -87
  28. package/dist/plugins/approval/index.js +159 -0
  29. package/dist/plugins/approval.js +163 -0
  30. package/dist/plugins/delegation/index.js +79 -0
  31. package/dist/plugins/delegation.js +67 -11
  32. package/dist/plugins/mcp/index.js +108 -0
  33. package/dist/plugins/memory/index.js +71 -0
  34. package/dist/plugins/shell/index.js +99 -0
  35. package/dist/plugins/shell.js +123 -0
  36. package/dist/plugins/storage-tools/index.js +85 -0
  37. package/dist/plugins/storage.js +240 -5
  38. package/dist/plugins/ui/index.js +184 -0
  39. package/dist/plugins/ui.js +185 -21
  40. package/dist/registry/agents.js +138 -0
  41. package/dist/registry/plugins.js +93 -50
  42. package/dist/services/agent-packages.js +103 -0
  43. package/dist/services/memory.js +152 -0
  44. package/dist/services/plugins.js +98 -0
  45. package/dist/services/storage.js +366 -94
  46. package/docs/agents.md +52 -65
  47. package/docs/architecture.md +1 -1
  48. package/docs/plugins.md +70 -58
  49. package/docs/templates/AGENT.example.md +57 -0
  50. package/package.json +8 -7
  51. package/src/app/cli.ts +1 -1
  52. package/src/app/config.ts +14 -4
  53. package/src/app/server.ts +23 -10
  54. package/src/app/types.ts +445 -16
  55. package/src/assets/icon.svg +4 -1
  56. package/src/bus/plugin.ts +67 -0
  57. package/src/bus/services.ts +786 -0
  58. package/src/bus/types.ts +160 -0
  59. package/src/harness/context.ts +293 -0
  60. package/src/harness/event-normalizer.ts +82 -0
  61. package/src/harness/orchestrator.ts +35 -273
  62. package/src/harness/process.ts +28 -4
  63. package/src/harness/queue-processor.ts +309 -0
  64. package/src/harness/runtime-factory.ts +125 -0
  65. package/src/plugins/ai-sdk/index.ts +44 -0
  66. package/src/plugins/ai-sdk/runtime.ts +484 -0
  67. package/src/plugins/ai-sdk/system-prompt.ts +4 -0
  68. package/src/plugins/approval/index.ts +228 -0
  69. package/src/plugins/delegation/index.ts +94 -0
  70. package/src/plugins/mcp/index.ts +128 -0
  71. package/src/plugins/memory/index.ts +85 -0
  72. package/src/plugins/shell/index.ts +123 -0
  73. package/src/plugins/storage-tools/index.ts +101 -0
  74. package/src/plugins/ui/index.ts +227 -0
  75. package/src/registry/plugins.ts +108 -55
  76. package/src/services/memory.ts +213 -0
  77. package/src/services/plugins.ts +133 -0
  78. package/src/services/storage.ts +472 -137
  79. package/src/agents/system.ts +0 -112
  80. package/src/plugins/ai-sdk.ts +0 -197
  81. package/src/plugins/delegation.ts +0 -60
  82. package/src/plugins/mcp.ts +0 -154
  83. package/src/plugins/storage.ts +0 -725
  84. package/src/plugins/ui.ts +0 -57
@@ -0,0 +1,125 @@
1
+ import { melony, MelonyPlugin, Runtime } from 'melony';
2
+ import { OpenBotEvent, OpenBotState } from '../app/types.js';
3
+ import type { Plugin, PluginContext, ToolDefinition } from '../bus/plugin.js';
4
+ import { resolvePlugin } from '../registry/plugins.js';
5
+ import { storageService } from '../services/storage.js';
6
+ import { busServicesPlugin } from '../bus/services.js';
7
+
8
+ /**
9
+ * Enhances the agent's instructions with a list of other available agents the
10
+ * orchestrator can hand off / delegate to. Agents that include the
11
+ * `delegation` plugin will surface peers; agents without it can ignore this.
12
+ */
13
+ export async function enhanceInstructions(state: OpenBotState) {
14
+ const { agentId, agentDetails } = state;
15
+ if (!agentDetails) return;
16
+
17
+ try {
18
+ const agents = await storageService.getAgents();
19
+ const otherAgents = agents.filter((a) => a.id !== agentId);
20
+ if (otherAgents.length === 0) return;
21
+
22
+ const agentsList = otherAgents
23
+ .map((a) => `- **${a.id}**${a.description ? `: ${a.description}` : ''}`)
24
+ .join('\n');
25
+
26
+ const header = '### Available Agents for Handoff/Delegation:';
27
+ if (!agentDetails.instructions.includes(header)) {
28
+ agentDetails.instructions +=
29
+ `\n\n${header}\n${agentsList}\n\n` +
30
+ 'Use `handoff` to transfer control to another agent. ' +
31
+ 'Use `delegate` when you need a sub-result from another agent and want to continue after it returns.';
32
+ }
33
+ } catch (error) {
34
+ console.warn('[agent] Failed to enhance instructions', error);
35
+ }
36
+ }
37
+
38
+ const composeMelonyPlugin = (
39
+ ...plugins: MelonyPlugin<OpenBotState, OpenBotEvent>[]
40
+ ): MelonyPlugin<OpenBotState, OpenBotEvent> => {
41
+ return (builder) => {
42
+ for (const plugin of plugins) {
43
+ plugin(builder);
44
+ }
45
+ };
46
+ };
47
+
48
+ /**
49
+ * Build the Melony runtime that drives a single agent run on the OpenBot bus.
50
+ *
51
+ * The runtime always wires:
52
+ * 1. `busServicesPlugin` — bus-level services (storage, channels, threads,
53
+ * plugin install/marketplace) shared by every agent.
54
+ * 2. Every Plugin referenced by the agent's `plugins[]` frontmatter, in
55
+ * order. Tool definitions from each plugin are merged into a single map
56
+ * and passed to every plugin via `PluginContext.tools`. Runtime plugins
57
+ * (those that handle `agent:invoke`) consume the merged map; tool plugins
58
+ * ignore it.
59
+ *
60
+ * Tool name collisions across plugins log a warning; the first plugin wins.
61
+ */
62
+ export async function createAgentRuntime(
63
+ state: OpenBotState,
64
+ ): Promise<Runtime<OpenBotState, OpenBotEvent>> {
65
+ await enhanceInstructions(state);
66
+
67
+ const runtime = melony<OpenBotState, OpenBotEvent>({
68
+ initialState: state,
69
+ });
70
+
71
+ runtime.use(busServicesPlugin({ storage: storageService }));
72
+
73
+ const refs = state.agentDetails?.pluginRefs || [];
74
+ if (refs.length === 0) {
75
+ console.warn(
76
+ `[agent] Agent "${state.agentId}" has no plugins; only bus services will be active.`,
77
+ );
78
+ return runtime.build();
79
+ }
80
+
81
+ // Resolve all plugins first so we can merge tool definitions before factory calls.
82
+ const resolved: Array<{ ref: { id: string; config?: Record<string, unknown> }; plugin: Plugin }> = [];
83
+ for (const ref of refs) {
84
+ const plugin = await resolvePlugin(ref.id);
85
+ if (!plugin) {
86
+ console.warn(
87
+ `[agent] Plugin "${ref.id}" for agent "${state.agentId}" could not be resolved.`,
88
+ );
89
+ continue;
90
+ }
91
+ resolved.push({ ref, plugin });
92
+ }
93
+
94
+ // Merge tool definitions; first plugin wins on collision.
95
+ const tools: Record<string, ToolDefinition> = {};
96
+ for (const { plugin } of resolved) {
97
+ if (!plugin.toolDefinitions) continue;
98
+ for (const [name, def] of Object.entries(plugin.toolDefinitions)) {
99
+ if (tools[name]) {
100
+ console.warn(
101
+ `[agent] Tool name collision for "${name}" while loading plugin "${plugin.id}"; keeping first registration.`,
102
+ );
103
+ continue;
104
+ }
105
+ tools[name] = def;
106
+ }
107
+ }
108
+
109
+ // Compose all plugin factories with the shared context.
110
+ const pluginPlugins: MelonyPlugin<OpenBotState, OpenBotEvent>[] = [];
111
+ for (const { ref, plugin } of resolved) {
112
+ const context: PluginContext = {
113
+ agentId: state.agentId,
114
+ agentDetails: state.agentDetails!,
115
+ config: ref.config || {},
116
+ storage: storageService,
117
+ tools,
118
+ };
119
+ pluginPlugins.push(plugin.factory(context));
120
+ }
121
+
122
+ runtime.use(composeMelonyPlugin(...pluginPlugins));
123
+
124
+ return runtime.build();
125
+ }
@@ -0,0 +1,44 @@
1
+ import type { Plugin } from '../../bus/plugin.js';
2
+ import { aiSdkRuntime } from './runtime.js';
3
+ import { AI_SDK_SYSTEM_PROMPT } from './system-prompt.js';
4
+
5
+ /**
6
+ * `ai-sdk` — generic LLM runtime plugin built on the Vercel AI SDK.
7
+ *
8
+ * Owns `agent:invoke` and consumes the merged `tools` map provided by the
9
+ * agent loader (collected from every tool plugin attached to the same agent).
10
+ * Pair with tool plugins like `shell`, `mcp`, `delegation`, etc.
11
+ */
12
+ export const aiSdkPlugin: Plugin = {
13
+ id: 'ai-sdk',
14
+ name: 'AI SDK Runtime',
15
+ description:
16
+ 'Generic LLM runtime built on the Vercel AI SDK. Consumes tools contributed by other plugins.',
17
+ defaultInstructions: AI_SDK_SYSTEM_PROMPT,
18
+ configSchema: {
19
+ type: 'object',
20
+ properties: {
21
+ model: {
22
+ type: 'string',
23
+ description:
24
+ 'Provider model string, e.g. openai/gpt-4o-mini, anthropic/claude-3-5-sonnet-20240620',
25
+ default: 'openai/gpt-4o-mini',
26
+ },
27
+ },
28
+ },
29
+ factory: ({ agentDetails, config, storage, tools }) => {
30
+ const model =
31
+ typeof config.model === 'string' && config.model
32
+ ? config.model
33
+ : 'openai/gpt-4o-mini';
34
+
35
+ return aiSdkRuntime({
36
+ model,
37
+ system: agentDetails.instructions || AI_SDK_SYSTEM_PROMPT,
38
+ storage,
39
+ toolDefinitions: tools,
40
+ });
41
+ },
42
+ };
43
+
44
+ export default aiSdkPlugin;
@@ -0,0 +1,484 @@
1
+ import { MelonyPlugin, RuntimeContext } from 'melony';
2
+ import { generateText, type LanguageModel, type ModelMessage } from 'ai';
3
+ import { openai } from '@ai-sdk/openai';
4
+ import { anthropic } from '@ai-sdk/anthropic';
5
+ import { OpenBotEvent, OpenBotState, ShortTermMessage } from '../../app/types.js';
6
+ import { Storage } from '../../bus/types.js';
7
+ import type { ToolDefinition } from '../../bus/plugin.js';
8
+ import { createDefaultContextEngine } from '../../harness/context.js';
9
+ import { saveConfig } from '../../app/config.js';
10
+
11
+ export interface AiSdkRuntimeOptions {
12
+ /** Provider model string (e.g. `openai/gpt-4o-mini`, `anthropic/claude-3-5-sonnet-20240620`). */
13
+ model?: string;
14
+ /** Static or dynamic system prompt. */
15
+ system?: string | ((context: RuntimeContext) => string | Promise<string>);
16
+ storage?: Storage;
17
+ contextEngine?: {
18
+ buildContext: (state: OpenBotState, storage?: Storage) => Promise<string>;
19
+ };
20
+ /** Tool definitions merged from all tool plugins attached to this agent. */
21
+ toolDefinitions?: Record<string, ToolDefinition>;
22
+ }
23
+
24
+ function resolveModel(modelString: string): LanguageModel {
25
+ const [provider, ...rest] = modelString.split('/');
26
+ const modelId = rest.join('/');
27
+ if (!modelId) {
28
+ throw new Error(`Invalid model string: "${modelString}". Expected "provider/model-id".`);
29
+ }
30
+ switch (provider) {
31
+ case 'openai':
32
+ return openai(modelId);
33
+ case 'anthropic':
34
+ return anthropic(modelId);
35
+ default:
36
+ throw new Error(`Unsupported AI provider: "${provider}"`);
37
+ }
38
+ }
39
+
40
+ const asRecord = (value: unknown): Record<string, unknown> =>
41
+ value && typeof value === 'object' && !Array.isArray(value)
42
+ ? (value as Record<string, unknown>)
43
+ : {};
44
+
45
+ /** Per-message hard cap (in characters) on tool-result payloads we feed back
46
+ * to the model. Prevents one huge tool output from eating the context window;
47
+ * the original event remains intact in storage. */
48
+ const TOOL_RESULT_MAX_CHARS = 8000;
49
+
50
+ /** Sliding window: max number of messages we replay to the model on each
51
+ * invocation. Older turns stay on disk but are not sent. Keeps both the
52
+ * recent prompts and the prompt token budget bounded. */
53
+ const MAX_WINDOW_MESSAGES = 80;
54
+
55
+ const truncateToolPayload = (raw: unknown): string => {
56
+ const serialized = typeof raw === 'string' ? raw : JSON.stringify(raw);
57
+ if (serialized.length <= TOOL_RESULT_MAX_CHARS) return serialized;
58
+ const dropped = serialized.length - TOOL_RESULT_MAX_CHARS;
59
+ return `${serialized.slice(0, TOOL_RESULT_MAX_CHARS)}\n…[truncated ${dropped} chars]`;
60
+ };
61
+
62
+ /**
63
+ * Trim the message history to a sliding window while preserving tool-call
64
+ * integrity. Drops any leading orphan `tool` messages whose matching
65
+ * assistant call was sliced off, since most providers reject that.
66
+ */
67
+ const buildMessageWindow = (messages: ShortTermMessage[]): ShortTermMessage[] => {
68
+ if (messages.length <= MAX_WINDOW_MESSAGES) return messages;
69
+ const tail = messages.slice(-MAX_WINDOW_MESSAGES);
70
+ const knownAssistantCallIds = new Set<string>();
71
+ for (const m of tail) {
72
+ if (m.role === 'assistant' && m.toolCalls) {
73
+ for (const tc of m.toolCalls) knownAssistantCallIds.add(tc.id);
74
+ }
75
+ }
76
+ return tail.filter((m) => m.role !== 'tool' || knownAssistantCallIds.has(m.toolCallId));
77
+ };
78
+
79
+ /**
80
+ * Self-healing pass: every assistant tool_call must have a matching tool
81
+ * result before the next user/assistant turn, or providers (OpenAI in
82
+ * particular) reject the request with "Tool result is missing for tool call".
83
+ *
84
+ * This can happen when a handler emits a `:result` event without `meta`
85
+ * (orphaning the call), the process restarts mid-run, or a tool handler
86
+ * crashes. Rather than refuse to continue, we inject synthetic tool messages
87
+ * with a clear error payload — the LLM can then explain the failure to the
88
+ * user and proceed.
89
+ */
90
+ const repairOpenToolCalls = (messages: ShortTermMessage[]): ShortTermMessage[] => {
91
+ const fulfilled = new Set<string>();
92
+ for (const m of messages) {
93
+ if (m.role === 'tool') fulfilled.add(m.toolCallId);
94
+ }
95
+
96
+ const repaired: ShortTermMessage[] = [];
97
+ for (const m of messages) {
98
+ repaired.push(m);
99
+ if (m.role !== 'assistant' || !m.toolCalls) continue;
100
+ for (const tc of m.toolCalls) {
101
+ if (fulfilled.has(tc.id)) continue;
102
+ repaired.push({
103
+ role: 'tool',
104
+ toolCallId: tc.id,
105
+ toolName: tc.function.name,
106
+ content: JSON.stringify({
107
+ success: false,
108
+ error: 'Tool result was lost (handler did not emit a matching :result event).',
109
+ }),
110
+ });
111
+ fulfilled.add(tc.id);
112
+ }
113
+ }
114
+ return repaired;
115
+ };
116
+
117
+ const readPersistedShortTermMessages = (state: OpenBotState): ShortTermMessage[] => {
118
+ const source = state.threadDetails?.state ?? state.channelDetails?.state;
119
+ const record = asRecord(source);
120
+ const raw = record.shortTermMessages;
121
+ return Array.isArray(raw) ? (raw as ShortTermMessage[]) : [];
122
+ };
123
+
124
+ const persistShortTermMessages = async (
125
+ state: OpenBotState,
126
+ storage: Storage | undefined,
127
+ ): Promise<void> => {
128
+ if (!storage) return;
129
+ const shortTermMessages = state.shortTermMessages ?? [];
130
+ if (state.threadId) {
131
+ await storage.patchThreadState({
132
+ channelId: state.channelId,
133
+ threadId: state.threadId,
134
+ state: { shortTermMessages },
135
+ });
136
+ return;
137
+ }
138
+ await storage.patchChannelState({
139
+ channelId: state.channelId,
140
+ state: { shortTermMessages },
141
+ });
142
+ };
143
+
144
+ async function buildSystemPrompt(
145
+ state: OpenBotState,
146
+ system?: string | ((context: RuntimeContext) => string | Promise<string>),
147
+ context?: RuntimeContext,
148
+ storage?: Storage,
149
+ contextEngine?: {
150
+ buildContext: (state: OpenBotState, storage?: Storage) => Promise<string>;
151
+ },
152
+ ): Promise<string> {
153
+ const sections: string[] = [];
154
+ if (system && typeof system === 'string') sections.push(system);
155
+ if (system && typeof system === 'function' && context) sections.push(await system(context));
156
+ if (contextEngine) sections.push(await contextEngine.buildContext(state, storage));
157
+ return sections.join('\n\n');
158
+ }
159
+
160
+ /**
161
+ * Generic ai-sdk runtime plugin.
162
+ *
163
+ * Owns `agent:invoke`, runs the LLM, emits tool-call events, and stitches tool
164
+ * results back into the conversation. Tools are supplied externally by the
165
+ * loader (merged from every tool plugin attached to the same agent).
166
+ */
167
+ export const aiSdkRuntime =
168
+ (options: AiSdkRuntimeOptions): MelonyPlugin<OpenBotState, OpenBotEvent> =>
169
+ (builder) => {
170
+ const {
171
+ model: modelString = 'openai/gpt-4o-mini',
172
+ system,
173
+ storage,
174
+ contextEngine = createDefaultContextEngine(),
175
+ toolDefinitions = {},
176
+ } = options;
177
+
178
+ let currentModelString = modelString;
179
+ let model = resolveModel(currentModelString);
180
+
181
+ const ensureShortTermMessages = (state: OpenBotState) => {
182
+ if (!state.shortTermMessages || state.shortTermMessages.length === 0) {
183
+ state.shortTermMessages = readPersistedShortTermMessages(state);
184
+ }
185
+ };
186
+
187
+ const mapToCoreMessages = (messages: ShortTermMessage[]): ModelMessage[] => {
188
+ return messages.map((m): ModelMessage => {
189
+ if (m.role === 'assistant' && m.toolCalls) {
190
+ return {
191
+ role: 'assistant',
192
+ content: [
193
+ { type: 'text', text: m.content || '' },
194
+ ...m.toolCalls.map((tc) => ({
195
+ type: 'tool-call' as const,
196
+ toolCallId: tc.id,
197
+ toolName: tc.function.name,
198
+ input: JSON.parse(tc.function.arguments),
199
+ })),
200
+ ],
201
+ };
202
+ }
203
+ if (m.role === 'assistant') {
204
+ return { role: 'assistant', content: m.content || '' };
205
+ }
206
+ if (m.role === 'tool') {
207
+ return {
208
+ role: 'tool',
209
+ content: [
210
+ {
211
+ type: 'tool-result',
212
+ toolCallId: m.toolCallId,
213
+ toolName: m.toolName,
214
+ output: { type: 'text', value: JSON.stringify(m.content) },
215
+ },
216
+ ],
217
+ };
218
+ }
219
+ return m;
220
+ });
221
+ };
222
+
223
+ const runLLM = async function* (
224
+ context: RuntimeContext<OpenBotState, OpenBotEvent>,
225
+ threadId?: string,
226
+ ): AsyncGenerator<OpenBotEvent> {
227
+ ensureShortTermMessages(context.state);
228
+ const systemPrompt = await buildSystemPrompt(
229
+ context.state,
230
+ system,
231
+ context,
232
+ storage,
233
+ contextEngine,
234
+ );
235
+
236
+ const coreMessages = mapToCoreMessages(
237
+ buildMessageWindow(repairOpenToolCalls(context.state.shortTermMessages || [])),
238
+ );
239
+
240
+ try {
241
+ const result = await generateText({
242
+ model,
243
+ system: systemPrompt,
244
+ messages: coreMessages,
245
+ tools: toolDefinitions as Record<string, { description: string; inputSchema: any }>,
246
+ });
247
+
248
+ const toolCalls = result.toolCalls ?? [];
249
+
250
+ if (toolCalls.length > 0) {
251
+ context.state.shortTermMessages = [
252
+ ...(context.state.shortTermMessages ?? []),
253
+ {
254
+ role: 'assistant',
255
+ content: result.text || '',
256
+ toolCalls: toolCalls.map((tc) => ({
257
+ id: tc.toolCallId,
258
+ type: 'function',
259
+ function: {
260
+ name: tc.toolName,
261
+ arguments: JSON.stringify(tc.input),
262
+ },
263
+ })),
264
+ },
265
+ ];
266
+ await persistShortTermMessages(context.state, storage);
267
+
268
+ for (const toolCall of toolCalls) {
269
+ yield {
270
+ type: `action:${toolCall.toolName}` as OpenBotEvent['type'],
271
+ data: toolCall.input,
272
+ meta: {
273
+ toolCallId: toolCall.toolCallId,
274
+ agentId: context.state.agentId,
275
+ threadId,
276
+ },
277
+ } as unknown as OpenBotEvent;
278
+ }
279
+ }
280
+
281
+ if (result.text) {
282
+ if (toolCalls.length === 0) {
283
+ context.state.shortTermMessages = [
284
+ ...(context.state.shortTermMessages ?? []),
285
+ { role: 'assistant', content: result.text },
286
+ ];
287
+ await persistShortTermMessages(context.state, storage);
288
+ }
289
+
290
+ yield {
291
+ type: 'agent:output',
292
+ data: { content: result.text },
293
+ meta: { agentId: context.state.agentId, threadId },
294
+ };
295
+ }
296
+ } catch (error: unknown) {
297
+ const errorMessage = error instanceof Error ? error.message : String(error);
298
+ const isApiKeyError =
299
+ errorMessage.includes('API key') ||
300
+ errorMessage.includes('401') ||
301
+ errorMessage.includes('Unauthorized') ||
302
+ errorMessage.includes('authentication');
303
+
304
+ if (isApiKeyError) {
305
+ const [currentProvider, ...rest] = currentModelString.split('/');
306
+ const currentModelId = rest.join('/');
307
+ yield {
308
+ type: 'client:ui:widget',
309
+ data: {
310
+ kind: 'form',
311
+ widgetId: `api_key_request_${Date.now()}`,
312
+ title: `AI Provider API Key Required`,
313
+ description: `The AI provider returned an authentication error. Select your provider, model, and provide a valid API key to continue. The key never leaves your local runtime.`,
314
+ fields: [
315
+ {
316
+ id: 'provider',
317
+ label: 'Provider',
318
+ type: 'select',
319
+ required: true,
320
+ options: [
321
+ { label: 'OpenAI', value: 'openai' },
322
+ { label: 'Anthropic', value: 'anthropic' },
323
+ ],
324
+ defaultValue: currentProvider === 'anthropic' ? 'anthropic' : 'openai',
325
+ },
326
+ {
327
+ id: 'model',
328
+ label: 'Model',
329
+ type: 'text',
330
+ description:
331
+ 'Model name without the provider prefix (e.g. `gpt-4o-mini` or `claude-3-5-sonnet-20240620`).',
332
+ placeholder: 'gpt-4o-mini',
333
+ required: true,
334
+ defaultValue: currentModelId,
335
+ },
336
+ {
337
+ id: 'apiKey',
338
+ label: 'API Key',
339
+ type: 'text',
340
+ placeholder: `sk-...`,
341
+ required: true,
342
+ },
343
+ ],
344
+ submitLabel: 'Save & Continue',
345
+ metadata: {
346
+ type: 'api_key_request',
347
+ },
348
+ },
349
+ meta: { agentId: context.state.agentId, threadId },
350
+ } as OpenBotEvent;
351
+ return;
352
+ }
353
+
354
+ throw error;
355
+ }
356
+ };
357
+
358
+ builder.on('agent:invoke', async function* (event, context) {
359
+ const routedTo = (event as { data?: { agentId?: string } }).data?.agentId;
360
+ if (typeof routedTo === 'string' && routedTo && routedTo !== context.state.agentId) {
361
+ return;
362
+ }
363
+
364
+ const threadId = event.meta?.threadId || context.state.threadId;
365
+
366
+ ensureShortTermMessages(context.state);
367
+ context.state.shortTermMessages = [
368
+ ...(context.state.shortTermMessages ?? []),
369
+ {
370
+ role: event.data?.role || 'user',
371
+ content: event?.data?.content || '',
372
+ },
373
+ ];
374
+ await persistShortTermMessages(context.state, storage);
375
+
376
+ yield* runLLM(context, threadId);
377
+ });
378
+
379
+ builder.on('*', async function* (event, context) {
380
+ if (!event.type.endsWith(':result')) return;
381
+ if (event.meta?.agentId !== context.state.agentId) return;
382
+ const toolCallId = event.meta?.toolCallId;
383
+ if (!toolCallId) return;
384
+ ensureShortTermMessages(context.state);
385
+
386
+ const toolName = event.type.replace(/^action:/, '').replace(/:result$/, '');
387
+ const resultData = (event as { data?: unknown }).data;
388
+ const content = truncateToolPayload(resultData);
389
+
390
+ context.state.shortTermMessages = [
391
+ ...(context.state.shortTermMessages ?? []),
392
+ { role: 'tool', content, toolCallId, toolName },
393
+ ];
394
+ await persistShortTermMessages(context.state, storage);
395
+
396
+ const lastAssistant = [...(context.state.shortTermMessages ?? [])]
397
+ .reverse()
398
+ .find(
399
+ (m): m is Extract<ShortTermMessage, { role: 'assistant' }> =>
400
+ m.role === 'assistant' && Array.isArray(m.toolCalls) && m.toolCalls.length > 0,
401
+ );
402
+
403
+ if (lastAssistant && lastAssistant.toolCalls) {
404
+ const allFulfilled = lastAssistant.toolCalls.every((tc) =>
405
+ context.state.shortTermMessages?.some(
406
+ (m) => m.role === 'tool' && m.toolCallId === tc.id,
407
+ ),
408
+ );
409
+
410
+ if (allFulfilled) {
411
+ if (toolName === 'handoff') return;
412
+ const threadId = event.meta?.threadId || context.state.threadId;
413
+ yield* runLLM(context, threadId);
414
+ }
415
+ }
416
+ });
417
+
418
+ builder.on('client:ui:widget:response', async function* (event, context) {
419
+ const { metadata, values } = event.data;
420
+ if (metadata?.type !== 'api_key_request') return;
421
+ if (!values?.apiKey || !values?.provider || !values?.model) return;
422
+
423
+ const provider = String(values.provider);
424
+ const modelId = String(values.model).trim();
425
+ const apiKey = String(values.apiKey);
426
+
427
+ if (provider !== 'openai' && provider !== 'anthropic') {
428
+ yield {
429
+ type: 'agent:output',
430
+ data: { content: `Unsupported provider: ${provider}` },
431
+ meta: { agentId: context.state.agentId },
432
+ };
433
+ return;
434
+ }
435
+
436
+ const envVar = provider === 'openai' ? 'OPENAI_API_KEY' : 'ANTHROPIC_API_KEY';
437
+ const newModelString = `${provider}/${modelId}`;
438
+
439
+ if (!storage) return;
440
+ try {
441
+ await storage.createVariable({ key: envVar, value: apiKey, secret: true });
442
+ process.env[envVar] = apiKey;
443
+
444
+ currentModelString = newModelString;
445
+ model = resolveModel(currentModelString);
446
+ try {
447
+ saveConfig({ model: currentModelString });
448
+ } catch {
449
+ // best-effort: config persistence failure shouldn't block the conversation
450
+ }
451
+
452
+ yield {
453
+ type: 'agent:output',
454
+ data: {
455
+ content: `Saved ${provider} API key and set model to \`${newModelString}\`.`,
456
+ },
457
+ meta: { agentId: context.state.agentId },
458
+ };
459
+
460
+ yield {
461
+ type: 'client:ui:widget',
462
+ data: {
463
+ widgetId: event.data.widgetId,
464
+ kind: 'message',
465
+ title: 'API Key Saved',
466
+ body: `Successfully saved ${provider} API key and selected model \`${newModelString}\`. You can now continue your conversation.`,
467
+ state: 'submitted',
468
+ actions: [{ id: 'ok', label: 'Got it', variant: 'primary' }],
469
+ },
470
+ meta: { agentId: context.state.agentId },
471
+ };
472
+ } catch (error) {
473
+ yield {
474
+ type: 'agent:output',
475
+ data: {
476
+ content: `Failed to save API key: ${
477
+ error instanceof Error ? error.message : 'Unknown error'
478
+ }`,
479
+ },
480
+ meta: { agentId: context.state.agentId },
481
+ };
482
+ }
483
+ });
484
+ };
@@ -0,0 +1,4 @@
1
+ export const AI_SDK_SYSTEM_PROMPT =
2
+ 'You are a helpful AI assistant on the OpenBot platform. ' +
3
+ 'Use the tools available to you to help the user. ' +
4
+ 'Be concise unless the user asks for depth.';