@minded-ai/mindedjs 2.0.0 → 2.0.1-beta-1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (32) hide show
  1. package/dist/agent.js +2 -2
  2. package/dist/agent.js.map +1 -1
  3. package/dist/browserTask/README.md +419 -0
  4. package/dist/browserTask/browserAgent.py +632 -0
  5. package/dist/browserTask/captcha_isolated.png +0 -0
  6. package/dist/browserTask/executeBrowserTask.ts +79 -0
  7. package/dist/browserTask/requirements.txt +8 -0
  8. package/dist/browserTask/setup.sh +144 -0
  9. package/dist/internalTools/retell.d.ts +12 -0
  10. package/dist/internalTools/retell.d.ts.map +1 -0
  11. package/dist/internalTools/retell.js +54 -0
  12. package/dist/internalTools/retell.js.map +1 -0
  13. package/dist/internalTools/sendPlaceholderMessage.d.ts +14 -0
  14. package/dist/internalTools/sendPlaceholderMessage.d.ts.map +1 -0
  15. package/dist/internalTools/sendPlaceholderMessage.js +61 -0
  16. package/dist/internalTools/sendPlaceholderMessage.js.map +1 -0
  17. package/dist/interrupts/BaseInterruptSessionManager.d.ts +1 -1
  18. package/dist/interrupts/BaseInterruptSessionManager.d.ts.map +1 -1
  19. package/dist/nodes/addPromptNode.d.ts.map +1 -1
  20. package/dist/nodes/addPromptNode.js +250 -93
  21. package/dist/nodes/addPromptNode.js.map +1 -1
  22. package/dist/utils/extractStateMemoryResponse.d.ts +5 -0
  23. package/dist/utils/extractStateMemoryResponse.d.ts.map +1 -0
  24. package/dist/utils/extractStateMemoryResponse.js +91 -0
  25. package/dist/utils/extractStateMemoryResponse.js.map +1 -0
  26. package/docs/low-code-editor/tools.md +37 -43
  27. package/docs/sdk/events.md +40 -38
  28. package/docs/sdk/memory.md +27 -24
  29. package/package.json +1 -1
  30. package/src/agent.ts +2 -2
  31. package/src/interrupts/BaseInterruptSessionManager.ts +1 -1
  32. package/src/nodes/addPromptNode.ts +283 -94
@@ -58,134 +58,323 @@ export const addPromptNode = async ({ graph, node, llm, tools, emit, agent }: Ad
58
58
  state.messages.unshift(systemMessage);
59
59
  }
60
60
 
61
- const startTime = Date.now();
62
- const result: AIMessage = await llmToUse.bindTools(scopedTools).invoke(state.messages);
63
- const endTime = Date.now();
64
- await agent.interruptSessionManager.checkQueueAndInterrupt(state.sessionId);
65
- logger.debug({
66
- msg: '[Model] Model execution time',
67
- executionTimeMs: endTime - startTime,
68
- sessionId: state.sessionId,
69
- node: node.displayName,
70
- });
71
- // Check if the result contains tool calls
72
- if (result.tool_calls && result.tool_calls.length > 0) {
73
- // Execute the tools
74
- const toolResults = [];
75
-
76
- for (const toolCall of result.tool_calls) {
77
- const matchedTool = scopedTools.find((t) => t.name === toolCall.name);
78
- logger.info({
79
- msg: `[Model] Calling tool inside prompt node`,
80
- tool: matchedTool?.name,
61
+ // Check if we should use the agentic loop (only when humanInTheLoop is true)
62
+ const useAgenticLoop = node.humanInTheLoop === true;
63
+
64
+ /**
65
+ * When humanInTheLoop is true: Run an agentic loop that continues invoking the LLM
66
+ * after tool calls until we get a final AI message without tool calls.
67
+ *
68
+ * When humanInTheLoop is false: Return immediately after tool execution without
69
+ * additional LLM invocations.
70
+ */
71
+ const newMessages: any[] = [];
72
+ const newHistory: any[] = [];
73
+ let currentMessages = [...state.messages];
74
+ let finalAIMessage: AIMessage | null = null;
75
+ let loopCount = 0;
76
+ const MAX_LOOP_ITERATIONS = 10;
77
+
78
+ while (true) {
79
+ loopCount++;
80
+
81
+ // Break if we've reached the maximum number of iterations
82
+ if (loopCount > MAX_LOOP_ITERATIONS) {
83
+ logger.warn({
84
+ msg: '[Model] Reached maximum loop iterations, breaking agentic loop',
85
+ loopCount,
81
86
  sessionId: state.sessionId,
82
87
  node: node.displayName,
83
88
  });
84
- if (matchedTool) {
85
- try {
86
- // Invoke the LangChain tool directly
87
- const startTime = Date.now();
88
- const toolResult = await matchedTool.invoke(toolCall);
89
- const endTime = Date.now();
90
- logger.debug({
91
- msg: `[Tool] Tool result inside prompt node`,
92
- tool: matchedTool?.name,
93
- result: toolResult,
94
- executionTimeMs: endTime - startTime,
95
- sessionId: state.sessionId,
96
- node: node.displayName,
97
- });
98
- //check for queue after tool call
99
- const systemMessageId = uuidv4();
100
-
101
- await agent.interruptSessionManager.checkQueueAndInterrupt(state.sessionId, {
102
- messages: [
103
- result,
104
- toolResult,
105
- new SystemMessage({
106
- id: systemMessageId,
107
- content:
108
- 'you called tool when the user send a new message, Consider calling the function again after user message is processed',
109
- }),
110
- ],
111
- history: [
89
+ break;
90
+ }
91
+ const startTime = Date.now();
92
+
93
+ const result: AIMessage = await llmToUse.bindTools(scopedTools).invoke([new SystemMessage(compiledPrompt), ...currentMessages]);
94
+
95
+ // Always pass accumulated state to interrupt manager
96
+ // Pass the accumulated messages from this prompt node execution
97
+ // If empty, pass undefined to avoid empty state updates
98
+ const interruptState =
99
+ newMessages.length > 0 || newHistory.length > 0
100
+ ? {
101
+ ...state,
102
+ messages: [...state.messages, ...newMessages],
103
+ history: [...state.history, ...newHistory],
104
+ }
105
+ : undefined;
106
+ await agent.interruptSessionManager.checkQueueAndInterrupt(state.sessionId, interruptState);
107
+
108
+ const endTime = Date.now();
109
+
110
+ logger.debug({
111
+ msg: '[Model] Model execution time',
112
+ executionTimeMs: endTime - startTime,
113
+ sessionId: state.sessionId,
114
+ node: node.displayName,
115
+ });
116
+
117
+ // Check if the result contains tool calls
118
+ if (result.tool_calls && result.tool_calls.length > 0) {
119
+ // Add the tool call message
120
+ newMessages.push(result);
121
+ newHistory.push(
122
+ createHistoryStep<HistoryStep>(state.history, {
123
+ type: NodeType.TOOL,
124
+ nodeId: node.name,
125
+ nodeDisplayName: node.displayName,
126
+ raw: result,
127
+ messageIds: [result.id!],
128
+ }),
129
+ );
130
+
131
+ // Execute the tools
132
+ // Track how many tool responses we've added for validation
133
+ const toolResponseStartIndex = newMessages.length;
134
+
135
+ // Ensure we process ALL tool calls, even if some fail
136
+ for (const toolCall of result.tool_calls) {
137
+ const matchedTool = scopedTools.find((t) => t.name === toolCall.name);
138
+ logger.info({
139
+ msg: `[Model] Calling tool inside prompt node`,
140
+ tool: matchedTool?.name || toolCall.name,
141
+ toolCallId: toolCall.id,
142
+ sessionId: state.sessionId,
143
+ node: node.displayName,
144
+ });
145
+
146
+ if (matchedTool) {
147
+ try {
148
+ // Invoke the LangChain tool directly
149
+ const toolStartTime = Date.now();
150
+ const toolResult = await matchedTool.invoke(toolCall);
151
+ const toolEndTime = Date.now();
152
+
153
+ logger.debug({
154
+ msg: `[Tool] Tool result inside prompt node`,
155
+ tool: matchedTool?.name,
156
+ result: toolResult,
157
+ executionTimeMs: toolEndTime - toolStartTime,
158
+ sessionId: state.sessionId,
159
+ node: node.displayName,
160
+ });
161
+
162
+ // Add the tool result directly to allMessages
163
+ newMessages.push(toolResult);
164
+
165
+ // Check for queue after tool call
166
+ const systemMessageId = uuidv4();
167
+ await agent.interruptSessionManager.checkQueueAndInterrupt(state.sessionId, {
168
+ ...state,
169
+ messages: [
170
+ ...state.messages,
171
+ ...newMessages,
172
+ new SystemMessage({
173
+ id: systemMessageId,
174
+ content:
175
+ 'you called tool when the user send a new message, Consider calling the function again after user message is processed',
176
+ }),
177
+ ],
178
+ history: [
179
+ ...state.history,
180
+ ...newHistory,
181
+ createHistoryStep<HistoryStep>(state.history, {
182
+ type: NodeType.TOOL,
183
+ nodeId: node.name,
184
+ nodeDisplayName: node.displayName,
185
+ raw: toolResult,
186
+ messageIds: [toolResult.id!, systemMessageId],
187
+ }),
188
+ ],
189
+ });
190
+
191
+ // In v2.0, tools update state by reference, no need to extract state updates
192
+
193
+ newHistory.push(
112
194
  createHistoryStep<HistoryStep>(state.history, {
113
195
  type: NodeType.TOOL,
114
196
  nodeId: node.name,
115
197
  nodeDisplayName: node.displayName,
116
198
  raw: toolResult,
117
- messageIds: [toolResult.id!, systemMessageId],
199
+ messageIds: [toolResult.id!],
118
200
  }),
119
- ],
120
- });
121
- toolResults.push(toolResult);
122
- } catch (err: any) {
123
- if (err?.name === 'GraphInterrupt') throw err;
201
+ );
202
+ } catch (err: any) {
203
+ if (err?.name === 'GraphInterrupt') throw err;
204
+ logger.error({
205
+ msg: `[Tool] Error executing tool inside prompt node`,
206
+ tool: toolCall.name,
207
+ err,
208
+ sessionId: state.sessionId,
209
+ node: node.displayName,
210
+ });
211
+ const errorMessage = new ToolMessage({
212
+ content: JSON.stringify({ error: err instanceof Error ? err.message : String(err) }),
213
+ tool_call_id: toolCall.id!,
214
+ });
215
+
216
+ // Add the error message directly to allMessages
217
+ newMessages.push(errorMessage);
218
+
219
+ // Check for queue after tool error
220
+ const errorSystemMessageId = uuidv4();
221
+ await agent.interruptSessionManager.checkQueueAndInterrupt(state.sessionId, {
222
+ ...state,
223
+ messages: [
224
+ ...state.messages,
225
+ ...newMessages,
226
+ new SystemMessage({
227
+ id: errorSystemMessageId,
228
+ content: 'Tool execution failed. Consider handling the error or calling another function if needed.',
229
+ }),
230
+ ],
231
+ history: [
232
+ ...state.history,
233
+ ...newHistory,
234
+ createHistoryStep<HistoryStep>(state.history, {
235
+ type: NodeType.TOOL,
236
+ nodeId: node.name,
237
+ nodeDisplayName: node.displayName,
238
+ raw: errorMessage,
239
+ messageIds: [errorMessage.id!, errorSystemMessageId],
240
+ }),
241
+ ],
242
+ });
243
+ newHistory.push(
244
+ createHistoryStep<HistoryStep>(state.history, {
245
+ type: NodeType.TOOL,
246
+ nodeId: node.name,
247
+ nodeDisplayName: node.displayName,
248
+ raw: errorMessage,
249
+ messageIds: [errorMessage.id!],
250
+ }),
251
+ );
252
+ }
253
+ } else {
124
254
  logger.error({
125
- msg: `[Tool] Error executing tool inside prompt node`,
255
+ msg: `[Tool] Model called tool but it was not found inside prompt node`,
126
256
  tool: toolCall.name,
127
- err,
257
+ toolCallId: toolCall.id,
128
258
  sessionId: state.sessionId,
129
259
  node: node.displayName,
130
260
  });
131
- const errorMessage = new ToolMessage({
132
- content: JSON.stringify({ error: err instanceof Error ? err.message : String(err) }),
133
- tool_call_id: toolCall.id!,
261
+
262
+ // IMPORTANT: Create an error message for the missing tool with the correct tool_call_id
263
+ const missingToolMessage = new ToolMessage({
264
+ content: JSON.stringify({ error: `Tool '${toolCall.name}' not found` }),
265
+ tool_call_id: toolCall.id!, // This MUST match the tool_call_id from the AI message
266
+ name: toolCall.name,
134
267
  });
135
- toolResults.push(errorMessage);
268
+
269
+ newMessages.push(missingToolMessage);
270
+ newHistory.push(
271
+ createHistoryStep<HistoryStep>(state.history, {
272
+ type: NodeType.TOOL,
273
+ nodeId: node.name,
274
+ nodeDisplayName: node.displayName,
275
+ raw: missingToolMessage,
276
+ messageIds: [missingToolMessage.id!],
277
+ }),
278
+ );
136
279
  }
137
- } else {
280
+ }
281
+
282
+ // Validate that we have responses for ALL tool calls
283
+ const toolResponseCount = newMessages.length - toolResponseStartIndex;
284
+ if (toolResponseCount !== result.tool_calls.length) {
138
285
  logger.error({
139
- msg: `[Tool] Model called tool but it was not found inside prompt node`,
140
- tool: toolCall.name,
286
+ msg: '[Model] Tool response count mismatch',
287
+ expectedCount: result.tool_calls.length,
288
+ actualCount: toolResponseCount,
289
+ toolCallIds: result.tool_calls.map((tc) => tc.id),
141
290
  sessionId: state.sessionId,
142
291
  node: node.displayName,
143
292
  });
144
293
  }
294
+
295
+ // If humanInTheLoop is false, return immediately after tool execution
296
+ if (!useAgenticLoop) {
297
+ // In v2.0, modify state directly
298
+ state.messages.push(...newMessages);
299
+ state.history.push(...newHistory);
300
+ return state;
301
+ }
302
+
303
+ // Otherwise, prepare for next iteration
304
+ // allMessages now contains all the new messages from this iteration
305
+ currentMessages = [...state.messages, ...newMessages];
306
+
307
+ // Continue the loop to see what the model wants to do next
308
+ continue;
309
+ } else {
310
+ // We got an AI message without tool calls - this is our final response
311
+ finalAIMessage = result;
312
+ break;
313
+ }
314
+ }
315
+
316
+ // Process the final AI message if we have one
317
+ if (finalAIMessage) {
318
+ // Add the final AI message to our messages first
319
+ newMessages.push(finalAIMessage);
320
+
321
+ if (finalAIMessage.getType() === 'ai') {
322
+ logger.info({
323
+ msg: `[Model] Response`,
324
+ content: finalAIMessage.content,
325
+ sessionId: state.sessionId,
326
+ node: node.displayName,
327
+ });
328
+
329
+ // First add all messages to state
330
+ state.messages.push(...newMessages);
331
+
332
+ // In v2.0, emit AI_MESSAGE for handlers to process
333
+ // Handlers will modify state directly by reference
334
+ await emit(AgentEvents.AI_MESSAGE, {
335
+ message: finalAIMessage.content as string,
336
+ state: state,
337
+ });
145
338
  }
146
339
 
147
- state.messages.push(result, ...toolResults);
148
- state.history.push(
340
+ // Set goto to null
341
+ state.goto = null;
342
+
343
+ newHistory.push(
149
344
  createHistoryStep<HistoryStep>(state.history, {
150
- type: NodeType.TOOL,
345
+ type: NodeType.PROMPT_NODE,
151
346
  nodeId: node.name,
152
347
  nodeDisplayName: node.displayName,
153
- raw: result,
154
- messageIds: [result.id!],
348
+ raw: finalAIMessage.content,
349
+ messageIds: [finalAIMessage.id!],
155
350
  }),
156
- ...toolResults.map((toolResult) =>
157
- createHistoryStep<HistoryStep>(state.history, {
158
- type: NodeType.TOOL,
159
- nodeId: node.name,
160
- nodeDisplayName: node.displayName,
161
- raw: toolResult,
162
- messageIds: [toolResult.id!],
163
- }),
164
- ),
165
351
  );
352
+
353
+ // History is added to state
354
+ state.history.push(...newHistory);
355
+
166
356
  return state;
167
357
  }
168
358
 
169
- // Model text response
170
- state.goto = null;
171
- state.messages.push(result);
359
+ // If we hit the loop limit without a final AI message, return what we have
360
+ if (loopCount > MAX_LOOP_ITERATIONS && newMessages.length > 0) {
361
+ logger.warn({
362
+ msg: '[Model] Returning accumulated messages after hitting loop limit',
363
+ messageCount: newMessages.length,
364
+ sessionId: state.sessionId,
365
+ node: node.displayName,
366
+ });
172
367
 
173
- if (result.getType() === 'ai') {
174
- logger.info({ msg: `[Model] Response`, content: result.content, sessionId: state.sessionId, node: node.displayName });
175
- await emit(AgentEvents.AI_MESSAGE, { message: result.content as string, state });
368
+ state.goto = null;
369
+ // In v2.0, modify state directly
370
+ state.messages.push(...newMessages);
371
+ state.history.push(...newHistory);
372
+
373
+ return state;
176
374
  }
177
375
 
178
- state.history.push(
179
- createHistoryStep<HistoryStep>(state.history, {
180
- type: NodeType.PROMPT_NODE,
181
- nodeId: node.name,
182
- nodeDisplayName: node.displayName,
183
- raw: result.content,
184
- messageIds: [result.id!],
185
- }),
186
- );
187
-
188
- return state;
376
+ // This should only be reached in unexpected cases
377
+ throw new Error('Unexpected state: no messages generated in prompt node');
189
378
  };
190
379
  graph.addNode(node.name, callback);
191
380
  };