openbot 0.2.14 → 0.3.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (84) hide show
  1. package/dist/agents/openbot/index.js +76 -0
  2. package/dist/agents/openbot/middleware/approval.js +132 -0
  3. package/dist/agents/openbot/runtime.js +289 -0
  4. package/dist/agents/openbot/system-prompt.js +32 -0
  5. package/dist/agents/openbot/tools/delegation.js +78 -0
  6. package/dist/agents/openbot/tools/mcp.js +99 -0
  7. package/dist/agents/openbot/tools/shell.js +91 -0
  8. package/dist/agents/openbot/tools/storage.js +75 -0
  9. package/dist/agents/openbot/tools/ui.js +176 -0
  10. package/dist/agents/system.js +20 -93
  11. package/dist/app/cli.js +1 -1
  12. package/dist/app/config.js +4 -1
  13. package/dist/app/server.js +15 -8
  14. package/dist/bus/agent-package.js +1 -0
  15. package/dist/bus/plugin.js +1 -0
  16. package/dist/bus/services.js +711 -0
  17. package/dist/bus/types.js +1 -0
  18. package/dist/harness/context.js +250 -0
  19. package/dist/harness/event-normalizer.js +59 -0
  20. package/dist/harness/orchestrator.js +27 -227
  21. package/dist/harness/process.js +25 -3
  22. package/dist/harness/queue-processor.js +227 -0
  23. package/dist/harness/runtime-factory.js +103 -0
  24. package/dist/plugins/ai-sdk/index.js +37 -0
  25. package/dist/plugins/ai-sdk/runtime.js +402 -0
  26. package/dist/plugins/ai-sdk/system-prompt.js +3 -0
  27. package/dist/plugins/ai-sdk.js +277 -87
  28. package/dist/plugins/approval/index.js +159 -0
  29. package/dist/plugins/approval.js +163 -0
  30. package/dist/plugins/delegation/index.js +79 -0
  31. package/dist/plugins/delegation.js +67 -11
  32. package/dist/plugins/mcp/index.js +108 -0
  33. package/dist/plugins/memory/index.js +71 -0
  34. package/dist/plugins/shell/index.js +99 -0
  35. package/dist/plugins/shell.js +123 -0
  36. package/dist/plugins/storage-tools/index.js +85 -0
  37. package/dist/plugins/storage.js +240 -5
  38. package/dist/plugins/ui/index.js +184 -0
  39. package/dist/plugins/ui.js +185 -21
  40. package/dist/registry/agents.js +138 -0
  41. package/dist/registry/plugins.js +93 -50
  42. package/dist/services/agent-packages.js +103 -0
  43. package/dist/services/memory.js +152 -0
  44. package/dist/services/plugins.js +98 -0
  45. package/dist/services/storage.js +366 -94
  46. package/docs/agents.md +52 -65
  47. package/docs/architecture.md +1 -1
  48. package/docs/plugins.md +70 -58
  49. package/docs/templates/AGENT.example.md +57 -0
  50. package/package.json +8 -7
  51. package/src/app/cli.ts +1 -1
  52. package/src/app/config.ts +14 -4
  53. package/src/app/server.ts +23 -10
  54. package/src/app/types.ts +445 -16
  55. package/src/assets/icon.svg +4 -1
  56. package/src/bus/plugin.ts +67 -0
  57. package/src/bus/services.ts +786 -0
  58. package/src/bus/types.ts +160 -0
  59. package/src/harness/context.ts +293 -0
  60. package/src/harness/event-normalizer.ts +82 -0
  61. package/src/harness/orchestrator.ts +35 -273
  62. package/src/harness/process.ts +28 -4
  63. package/src/harness/queue-processor.ts +309 -0
  64. package/src/harness/runtime-factory.ts +125 -0
  65. package/src/plugins/ai-sdk/index.ts +44 -0
  66. package/src/plugins/ai-sdk/runtime.ts +484 -0
  67. package/src/plugins/ai-sdk/system-prompt.ts +4 -0
  68. package/src/plugins/approval/index.ts +228 -0
  69. package/src/plugins/delegation/index.ts +94 -0
  70. package/src/plugins/mcp/index.ts +128 -0
  71. package/src/plugins/memory/index.ts +85 -0
  72. package/src/plugins/shell/index.ts +123 -0
  73. package/src/plugins/storage-tools/index.ts +101 -0
  74. package/src/plugins/ui/index.ts +227 -0
  75. package/src/registry/plugins.ts +108 -55
  76. package/src/services/memory.ts +213 -0
  77. package/src/services/plugins.ts +133 -0
  78. package/src/services/storage.ts +472 -137
  79. package/src/agents/system.ts +0 -112
  80. package/src/plugins/ai-sdk.ts +0 -197
  81. package/src/plugins/delegation.ts +0 -60
  82. package/src/plugins/mcp.ts +0 -154
  83. package/src/plugins/storage.ts +0 -725
  84. package/src/plugins/ui.ts +0 -57
@@ -0,0 +1,402 @@
1
+ import { generateText } from 'ai';
2
+ import { openai } from '@ai-sdk/openai';
3
+ import { anthropic } from '@ai-sdk/anthropic';
4
+ import { createDefaultContextEngine } from '../../harness/context.js';
5
+ import { saveConfig } from '../../app/config.js';
6
+ function resolveModel(modelString) {
7
+ const [provider, ...rest] = modelString.split('/');
8
+ const modelId = rest.join('/');
9
+ if (!modelId) {
10
+ throw new Error(`Invalid model string: "${modelString}". Expected "provider/model-id".`);
11
+ }
12
+ switch (provider) {
13
+ case 'openai':
14
+ return openai(modelId);
15
+ case 'anthropic':
16
+ return anthropic(modelId);
17
+ default:
18
+ throw new Error(`Unsupported AI provider: "${provider}"`);
19
+ }
20
+ }
21
+ const asRecord = (value) => value && typeof value === 'object' && !Array.isArray(value)
22
+ ? value
23
+ : {};
24
+ /** Per-message hard cap (in characters) on tool-result payloads we feed back
25
+ * to the model. Prevents one huge tool output from eating the context window;
26
+ * the original event remains intact in storage. */
27
+ const TOOL_RESULT_MAX_CHARS = 8000;
28
+ /** Sliding window: max number of messages we replay to the model on each
29
+ * invocation. Older turns stay on disk but are not sent. Keeps both the
30
+ * recent prompts and the prompt token budget bounded. */
31
+ const MAX_WINDOW_MESSAGES = 80;
32
+ const truncateToolPayload = (raw) => {
33
+ const serialized = typeof raw === 'string' ? raw : JSON.stringify(raw);
34
+ if (serialized.length <= TOOL_RESULT_MAX_CHARS)
35
+ return serialized;
36
+ const dropped = serialized.length - TOOL_RESULT_MAX_CHARS;
37
+ return `${serialized.slice(0, TOOL_RESULT_MAX_CHARS)}\n…[truncated ${dropped} chars]`;
38
+ };
39
+ /**
40
+ * Trim the message history to a sliding window while preserving tool-call
41
+ * integrity. Drops any leading orphan `tool` messages whose matching
42
+ * assistant call was sliced off, since most providers reject that.
43
+ */
44
+ const buildMessageWindow = (messages) => {
45
+ if (messages.length <= MAX_WINDOW_MESSAGES)
46
+ return messages;
47
+ const tail = messages.slice(-MAX_WINDOW_MESSAGES);
48
+ const knownAssistantCallIds = new Set();
49
+ for (const m of tail) {
50
+ if (m.role === 'assistant' && m.toolCalls) {
51
+ for (const tc of m.toolCalls)
52
+ knownAssistantCallIds.add(tc.id);
53
+ }
54
+ }
55
+ return tail.filter((m) => m.role !== 'tool' || knownAssistantCallIds.has(m.toolCallId));
56
+ };
57
+ /**
58
+ * Self-healing pass: every assistant tool_call must have a matching tool
59
+ * result before the next user/assistant turn, or providers (OpenAI in
60
+ * particular) reject the request with "Tool result is missing for tool call".
61
+ *
62
+ * This can happen when a handler emits a `:result` event without `meta`
63
+ * (orphaning the call), the process restarts mid-run, or a tool handler
64
+ * crashes. Rather than refuse to continue, we inject synthetic tool messages
65
+ * with a clear error payload — the LLM can then explain the failure to the
66
+ * user and proceed.
67
+ */
68
+ const repairOpenToolCalls = (messages) => {
69
+ const fulfilled = new Set();
70
+ for (const m of messages) {
71
+ if (m.role === 'tool')
72
+ fulfilled.add(m.toolCallId);
73
+ }
74
+ const repaired = [];
75
+ for (const m of messages) {
76
+ repaired.push(m);
77
+ if (m.role !== 'assistant' || !m.toolCalls)
78
+ continue;
79
+ for (const tc of m.toolCalls) {
80
+ if (fulfilled.has(tc.id))
81
+ continue;
82
+ repaired.push({
83
+ role: 'tool',
84
+ toolCallId: tc.id,
85
+ toolName: tc.function.name,
86
+ content: JSON.stringify({
87
+ success: false,
88
+ error: 'Tool result was lost (handler did not emit a matching :result event).',
89
+ }),
90
+ });
91
+ fulfilled.add(tc.id);
92
+ }
93
+ }
94
+ return repaired;
95
+ };
96
+ const readPersistedShortTermMessages = (state) => {
97
+ const source = state.threadDetails?.state ?? state.channelDetails?.state;
98
+ const record = asRecord(source);
99
+ const raw = record.shortTermMessages;
100
+ return Array.isArray(raw) ? raw : [];
101
+ };
102
+ const persistShortTermMessages = async (state, storage) => {
103
+ if (!storage)
104
+ return;
105
+ const shortTermMessages = state.shortTermMessages ?? [];
106
+ if (state.threadId) {
107
+ await storage.patchThreadState({
108
+ channelId: state.channelId,
109
+ threadId: state.threadId,
110
+ state: { shortTermMessages },
111
+ });
112
+ return;
113
+ }
114
+ await storage.patchChannelState({
115
+ channelId: state.channelId,
116
+ state: { shortTermMessages },
117
+ });
118
+ };
119
+ async function buildSystemPrompt(state, system, context, storage, contextEngine) {
120
+ const sections = [];
121
+ if (system && typeof system === 'string')
122
+ sections.push(system);
123
+ if (system && typeof system === 'function' && context)
124
+ sections.push(await system(context));
125
+ if (contextEngine)
126
+ sections.push(await contextEngine.buildContext(state, storage));
127
+ return sections.join('\n\n');
128
+ }
129
+ /**
130
+ * Generic ai-sdk runtime plugin.
131
+ *
132
+ * Owns `agent:invoke`, runs the LLM, emits tool-call events, and stitches tool
133
+ * results back into the conversation. Tools are supplied externally by the
134
+ * loader (merged from every tool plugin attached to the same agent).
135
+ */
136
+ export const aiSdkRuntime = (options) => (builder) => {
137
+ const { model: modelString = 'openai/gpt-4o-mini', system, storage, contextEngine = createDefaultContextEngine(), toolDefinitions = {}, } = options;
138
+ let currentModelString = modelString;
139
+ let model = resolveModel(currentModelString);
140
+ const ensureShortTermMessages = (state) => {
141
+ if (!state.shortTermMessages || state.shortTermMessages.length === 0) {
142
+ state.shortTermMessages = readPersistedShortTermMessages(state);
143
+ }
144
+ };
145
+ const mapToCoreMessages = (messages) => {
146
+ return messages.map((m) => {
147
+ if (m.role === 'assistant' && m.toolCalls) {
148
+ return {
149
+ role: 'assistant',
150
+ content: [
151
+ { type: 'text', text: m.content || '' },
152
+ ...m.toolCalls.map((tc) => ({
153
+ type: 'tool-call',
154
+ toolCallId: tc.id,
155
+ toolName: tc.function.name,
156
+ input: JSON.parse(tc.function.arguments),
157
+ })),
158
+ ],
159
+ };
160
+ }
161
+ if (m.role === 'assistant') {
162
+ return { role: 'assistant', content: m.content || '' };
163
+ }
164
+ if (m.role === 'tool') {
165
+ return {
166
+ role: 'tool',
167
+ content: [
168
+ {
169
+ type: 'tool-result',
170
+ toolCallId: m.toolCallId,
171
+ toolName: m.toolName,
172
+ output: { type: 'text', value: JSON.stringify(m.content) },
173
+ },
174
+ ],
175
+ };
176
+ }
177
+ return m;
178
+ });
179
+ };
180
+ const runLLM = async function* (context, threadId) {
181
+ ensureShortTermMessages(context.state);
182
+ const systemPrompt = await buildSystemPrompt(context.state, system, context, storage, contextEngine);
183
+ const coreMessages = mapToCoreMessages(buildMessageWindow(repairOpenToolCalls(context.state.shortTermMessages || [])));
184
+ try {
185
+ const result = await generateText({
186
+ model,
187
+ system: systemPrompt,
188
+ messages: coreMessages,
189
+ tools: toolDefinitions,
190
+ });
191
+ const toolCalls = result.toolCalls ?? [];
192
+ if (toolCalls.length > 0) {
193
+ context.state.shortTermMessages = [
194
+ ...(context.state.shortTermMessages ?? []),
195
+ {
196
+ role: 'assistant',
197
+ content: result.text || '',
198
+ toolCalls: toolCalls.map((tc) => ({
199
+ id: tc.toolCallId,
200
+ type: 'function',
201
+ function: {
202
+ name: tc.toolName,
203
+ arguments: JSON.stringify(tc.input),
204
+ },
205
+ })),
206
+ },
207
+ ];
208
+ await persistShortTermMessages(context.state, storage);
209
+ for (const toolCall of toolCalls) {
210
+ yield {
211
+ type: `action:${toolCall.toolName}`,
212
+ data: toolCall.input,
213
+ meta: {
214
+ toolCallId: toolCall.toolCallId,
215
+ agentId: context.state.agentId,
216
+ threadId,
217
+ },
218
+ };
219
+ }
220
+ }
221
+ if (result.text) {
222
+ if (toolCalls.length === 0) {
223
+ context.state.shortTermMessages = [
224
+ ...(context.state.shortTermMessages ?? []),
225
+ { role: 'assistant', content: result.text },
226
+ ];
227
+ await persistShortTermMessages(context.state, storage);
228
+ }
229
+ yield {
230
+ type: 'agent:output',
231
+ data: { content: result.text },
232
+ meta: { agentId: context.state.agentId, threadId },
233
+ };
234
+ }
235
+ }
236
+ catch (error) {
237
+ const errorMessage = error instanceof Error ? error.message : String(error);
238
+ const isApiKeyError = errorMessage.includes('API key') ||
239
+ errorMessage.includes('401') ||
240
+ errorMessage.includes('Unauthorized') ||
241
+ errorMessage.includes('authentication');
242
+ if (isApiKeyError) {
243
+ const [currentProvider, ...rest] = currentModelString.split('/');
244
+ const currentModelId = rest.join('/');
245
+ yield {
246
+ type: 'client:ui:widget',
247
+ data: {
248
+ kind: 'form',
249
+ widgetId: `api_key_request_${Date.now()}`,
250
+ title: `AI Provider API Key Required`,
251
+ description: `The AI provider returned an authentication error. Select your provider, model, and provide a valid API key to continue. The key never leaves your local runtime.`,
252
+ fields: [
253
+ {
254
+ id: 'provider',
255
+ label: 'Provider',
256
+ type: 'select',
257
+ required: true,
258
+ options: [
259
+ { label: 'OpenAI', value: 'openai' },
260
+ { label: 'Anthropic', value: 'anthropic' },
261
+ ],
262
+ defaultValue: currentProvider === 'anthropic' ? 'anthropic' : 'openai',
263
+ },
264
+ {
265
+ id: 'model',
266
+ label: 'Model',
267
+ type: 'text',
268
+ description: 'Model name without the provider prefix (e.g. `gpt-4o-mini` or `claude-3-5-sonnet-20240620`).',
269
+ placeholder: 'gpt-4o-mini',
270
+ required: true,
271
+ defaultValue: currentModelId,
272
+ },
273
+ {
274
+ id: 'apiKey',
275
+ label: 'API Key',
276
+ type: 'text',
277
+ placeholder: `sk-...`,
278
+ required: true,
279
+ },
280
+ ],
281
+ submitLabel: 'Save & Continue',
282
+ metadata: {
283
+ type: 'api_key_request',
284
+ },
285
+ },
286
+ meta: { agentId: context.state.agentId, threadId },
287
+ };
288
+ return;
289
+ }
290
+ throw error;
291
+ }
292
+ };
293
+ builder.on('agent:invoke', async function* (event, context) {
294
+ const routedTo = event.data?.agentId;
295
+ if (typeof routedTo === 'string' && routedTo && routedTo !== context.state.agentId) {
296
+ return;
297
+ }
298
+ const threadId = event.meta?.threadId || context.state.threadId;
299
+ ensureShortTermMessages(context.state);
300
+ context.state.shortTermMessages = [
301
+ ...(context.state.shortTermMessages ?? []),
302
+ {
303
+ role: event.data?.role || 'user',
304
+ content: event?.data?.content || '',
305
+ },
306
+ ];
307
+ await persistShortTermMessages(context.state, storage);
308
+ yield* runLLM(context, threadId);
309
+ });
310
+ builder.on('*', async function* (event, context) {
311
+ if (!event.type.endsWith(':result'))
312
+ return;
313
+ if (event.meta?.agentId !== context.state.agentId)
314
+ return;
315
+ const toolCallId = event.meta?.toolCallId;
316
+ if (!toolCallId)
317
+ return;
318
+ ensureShortTermMessages(context.state);
319
+ const toolName = event.type.replace(/^action:/, '').replace(/:result$/, '');
320
+ const resultData = event.data;
321
+ const content = truncateToolPayload(resultData);
322
+ context.state.shortTermMessages = [
323
+ ...(context.state.shortTermMessages ?? []),
324
+ { role: 'tool', content, toolCallId, toolName },
325
+ ];
326
+ await persistShortTermMessages(context.state, storage);
327
+ const lastAssistant = [...(context.state.shortTermMessages ?? [])]
328
+ .reverse()
329
+ .find((m) => m.role === 'assistant' && Array.isArray(m.toolCalls) && m.toolCalls.length > 0);
330
+ if (lastAssistant && lastAssistant.toolCalls) {
331
+ const allFulfilled = lastAssistant.toolCalls.every((tc) => context.state.shortTermMessages?.some((m) => m.role === 'tool' && m.toolCallId === tc.id));
332
+ if (allFulfilled) {
333
+ if (toolName === 'handoff')
334
+ return;
335
+ const threadId = event.meta?.threadId || context.state.threadId;
336
+ yield* runLLM(context, threadId);
337
+ }
338
+ }
339
+ });
340
+ builder.on('client:ui:widget:response', async function* (event, context) {
341
+ const { metadata, values } = event.data;
342
+ if (metadata?.type !== 'api_key_request')
343
+ return;
344
+ if (!values?.apiKey || !values?.provider || !values?.model)
345
+ return;
346
+ const provider = String(values.provider);
347
+ const modelId = String(values.model).trim();
348
+ const apiKey = String(values.apiKey);
349
+ if (provider !== 'openai' && provider !== 'anthropic') {
350
+ yield {
351
+ type: 'agent:output',
352
+ data: { content: `Unsupported provider: ${provider}` },
353
+ meta: { agentId: context.state.agentId },
354
+ };
355
+ return;
356
+ }
357
+ const envVar = provider === 'openai' ? 'OPENAI_API_KEY' : 'ANTHROPIC_API_KEY';
358
+ const newModelString = `${provider}/${modelId}`;
359
+ if (!storage)
360
+ return;
361
+ try {
362
+ await storage.createVariable({ key: envVar, value: apiKey, secret: true });
363
+ process.env[envVar] = apiKey;
364
+ currentModelString = newModelString;
365
+ model = resolveModel(currentModelString);
366
+ try {
367
+ saveConfig({ model: currentModelString });
368
+ }
369
+ catch {
370
+ // best-effort: config persistence failure shouldn't block the conversation
371
+ }
372
+ yield {
373
+ type: 'agent:output',
374
+ data: {
375
+ content: `Saved ${provider} API key and set model to \`${newModelString}\`.`,
376
+ },
377
+ meta: { agentId: context.state.agentId },
378
+ };
379
+ yield {
380
+ type: 'client:ui:widget',
381
+ data: {
382
+ widgetId: event.data.widgetId,
383
+ kind: 'message',
384
+ title: 'API Key Saved',
385
+ body: `Successfully saved ${provider} API key and selected model \`${newModelString}\`. You can now continue your conversation.`,
386
+ state: 'submitted',
387
+ actions: [{ id: 'ok', label: 'Got it', variant: 'primary' }],
388
+ },
389
+ meta: { agentId: context.state.agentId },
390
+ };
391
+ }
392
+ catch (error) {
393
+ yield {
394
+ type: 'agent:output',
395
+ data: {
396
+ content: `Failed to save API key: ${error instanceof Error ? error.message : 'Unknown error'}`,
397
+ },
398
+ meta: { agentId: context.state.agentId },
399
+ };
400
+ }
401
+ });
402
+ };
@@ -0,0 +1,3 @@
1
+ export const AI_SDK_SYSTEM_PROMPT = 'You are a helpful AI assistant on the OpenBot platform. ' +
2
+ 'Use the tools available to you to help the user. ' +
3
+ 'Be concise unless the user asks for depth.';