@minded-ai/mindedjs 1.0.98 → 1.0.99

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (76) hide show
  1. package/dist/agent.js +11 -11
  2. package/dist/agent.js.map +1 -1
  3. package/dist/checkpointer/checkpointSaverFactory.js +1 -1
  4. package/dist/checkpointer/checkpointSaverFactory.js.map +1 -1
  5. package/dist/edges/createDirectEdge.d.ts.map +1 -1
  6. package/dist/edges/createDirectEdge.js +1 -1
  7. package/dist/edges/createDirectEdge.js.map +1 -1
  8. package/dist/edges/createLogicalRouter.d.ts.map +1 -1
  9. package/dist/edges/createLogicalRouter.js +19 -6
  10. package/dist/edges/createLogicalRouter.js.map +1 -1
  11. package/dist/edges/createPromptRouter.d.ts.map +1 -1
  12. package/dist/edges/createPromptRouter.js +8 -5
  13. package/dist/edges/createPromptRouter.js.map +1 -1
  14. package/dist/index.d.ts +4 -0
  15. package/dist/index.d.ts.map +1 -1
  16. package/dist/index.js +6 -1
  17. package/dist/index.js.map +1 -1
  18. package/dist/llm/createLlmInstance.d.ts +1 -1
  19. package/dist/llm/createLlmInstance.d.ts.map +1 -1
  20. package/dist/llm/createLlmInstance.js +18 -1
  21. package/dist/llm/createLlmInstance.js.map +1 -1
  22. package/dist/nodes/addAppToolNode.js +2 -2
  23. package/dist/nodes/addAppToolNode.js.map +1 -1
  24. package/dist/nodes/addBrowserTaskNode.d.ts.map +1 -1
  25. package/dist/nodes/addBrowserTaskNode.js +5 -5
  26. package/dist/nodes/addBrowserTaskNode.js.map +1 -1
  27. package/dist/nodes/addHumanInTheLoopNode.js +1 -1
  28. package/dist/nodes/addHumanInTheLoopNode.js.map +1 -1
  29. package/dist/nodes/addPromptNode.d.ts.map +1 -1
  30. package/dist/nodes/addPromptNode.js +6 -5
  31. package/dist/nodes/addPromptNode.js.map +1 -1
  32. package/dist/nodes/addToolNode.js +1 -1
  33. package/dist/nodes/addToolNode.js.map +1 -1
  34. package/dist/nodes/addToolRunNode.js +1 -1
  35. package/dist/nodes/addToolRunNode.js.map +1 -1
  36. package/dist/nodes/addTriggerNode.js +1 -1
  37. package/dist/nodes/addTriggerNode.js.map +1 -1
  38. package/dist/platform/models/mindedChatOpenAI.d.ts +20 -0
  39. package/dist/platform/models/mindedChatOpenAI.d.ts.map +1 -0
  40. package/dist/platform/{mindedChatOpenAI.js → models/mindedChatOpenAI.js} +10 -1
  41. package/dist/platform/models/mindedChatOpenAI.js.map +1 -0
  42. package/dist/platform/models/parallelWrapper.d.ts +17 -0
  43. package/dist/platform/models/parallelWrapper.d.ts.map +1 -0
  44. package/dist/platform/models/parallelWrapper.js +105 -0
  45. package/dist/platform/models/parallelWrapper.js.map +1 -0
  46. package/dist/types/LLM.types.d.ts.map +1 -1
  47. package/dist/types/LLM.types.js +1 -1
  48. package/dist/types/LLM.types.js.map +1 -1
  49. package/dist/voice/voiceSession.d.ts.map +1 -1
  50. package/dist/voice/voiceSession.js +16 -17
  51. package/dist/voice/voiceSession.js.map +1 -1
  52. package/docs/SUMMARY.md +1 -0
  53. package/docs/platform/parallel-llm.md +242 -0
  54. package/package.json +2 -2
  55. package/src/agent.ts +11 -11
  56. package/src/checkpointer/checkpointSaverFactory.ts +1 -1
  57. package/src/edges/createDirectEdge.ts +1 -2
  58. package/src/edges/createLogicalRouter.ts +18 -6
  59. package/src/edges/createPromptRouter.ts +8 -5
  60. package/src/index.ts +6 -0
  61. package/src/llm/createLlmInstance.ts +25 -2
  62. package/src/nodes/addAppToolNode.ts +2 -2
  63. package/src/nodes/addBrowserTaskNode.ts +16 -10
  64. package/src/nodes/addHumanInTheLoopNode.ts +1 -1
  65. package/src/nodes/addPromptNode.ts +6 -5
  66. package/src/nodes/addToolNode.ts +1 -1
  67. package/src/nodes/addToolRunNode.ts +1 -1
  68. package/src/nodes/addTriggerNode.ts +1 -1
  69. package/src/platform/models/mindedChatOpenAI.ts +49 -0
  70. package/src/platform/models/parallelWrapper.ts +141 -0
  71. package/src/types/LLM.types.ts +5 -5
  72. package/src/voice/voiceSession.ts +16 -17
  73. package/dist/platform/mindedChatOpenAI.d.ts +0 -5
  74. package/dist/platform/mindedChatOpenAI.d.ts.map +0 -1
  75. package/dist/platform/mindedChatOpenAI.js.map +0 -1
  76. package/src/platform/mindedChatOpenAI.ts +0 -19
@@ -0,0 +1,242 @@
1
+ # Parallel LLM Requests
2
+
3
+ Parallel LLM requests can significantly reduce latency by sending multiple identical requests and using the fastest response. This feature can reduce response times by 30-50% in scenarios with variable network conditions.
4
+
5
+ ## Quick Start
6
+
7
+ The easiest way to enable parallel requests is through your `minded.json` configuration:
8
+
9
+ ```json
10
+ {
11
+ "flows": ["./src/flows"],
12
+ "tools": ["./src/tools"],
13
+ "agent": "./src/agent.ts",
14
+ "llm": {
15
+ "name": "MindedChatOpenAI",
16
+ "properties": {
17
+ "model": "gpt-4o",
18
+ "numParallelRequests": 3,
19
+ "logTimings": true
20
+ }
21
+ }
22
+ }
23
+ ```
24
+
25
+ Your agent will automatically use parallel requests for all LLM calls:
26
+
27
+ ```typescript
28
+ import { Agent } from '@minded-ai/mindedjs';
29
+ import memorySchema from './agentMemorySchema';
30
+ import config from '../minded.json';
31
+ import tools from './tools';
32
+
33
+ const agent = new Agent({
34
+ memorySchema,
35
+ config, // Parallel configuration is automatically applied
36
+ tools,
37
+ });
38
+ ```
39
+
40
+ ## Configuration Options
41
+
42
+ ### MindedChatOpenAI (Recommended)
43
+
44
+ For agents running on the Minded platform, use `MindedChatOpenAI` with parallel configuration:
45
+
46
+ ```json
47
+ {
48
+ "llm": {
49
+ "name": "MindedChatOpenAI",
50
+ "properties": {
51
+ "model": "gpt-4o",
52
+ "numParallelRequests": 3,
53
+ "logTimings": true,
54
+ "temperature": 0.7
55
+ }
56
+ }
57
+ }
58
+ ```
59
+
60
+ ### AzureChatOpenAI
61
+
62
+ For Azure OpenAI deployments:
63
+
64
+ ```json
65
+ {
66
+ "llm": {
67
+ "name": "AzureChatOpenAI",
68
+ "properties": {
69
+ "model": "gpt-4o",
70
+ "numParallelRequests": 3,
71
+ "logTimings": true,
72
+ "azureOpenAIApiVersion": "2024-02-01"
73
+ }
74
+ }
75
+ }
76
+ ```
77
+
78
+ Required environment variables:
79
+
80
+ ```env
81
+ AZURE_OPENAI_API_KEY=your_azure_key
82
+ AZURE_OPENAI_API_INSTANCE_NAME=your_instance_name
83
+ AZURE_OPENAI_API_DEPLOYMENT_NAME=your_deployment_name
84
+ ```
85
+
86
+ ### ChatOpenAI
87
+
88
+ For standard OpenAI API:
89
+
90
+ ```json
91
+ {
92
+ "llm": {
93
+ "name": "ChatOpenAI",
94
+ "properties": {
95
+ "model": "gpt-4o",
96
+ "numParallelRequests": 3,
97
+ "logTimings": true,
98
+ "openAIApiKey": "${OPENAI_API_KEY}"
99
+ }
100
+ }
101
+ }
102
+ ```
103
+
104
+ ## Configuration Parameters
105
+
106
+ | Parameter | Type | Default | Description |
107
+ | --------------------- | ------- | ------- | --------------------------------------------- |
108
+ | `numParallelRequests` | number | 1 | Number of parallel requests (2-5 recommended) |
109
+ | `logTimings` | boolean | false | Enable detailed timing logs |
110
+
111
+ ## Performance Notes
112
+
113
+ - **Optimal Range**: 2-3 parallel requests usually provide the best latency/cost balance
114
+ - **Cost Impact**: You pay for all parallel requests made
115
+ - **Best Use Cases**: Variable network conditions, consistency requirements
116
+ - **Latency Reduction**: Typically 30-50% faster response times
117
+
118
+ ## Monitoring Performance
119
+
120
+ When `logTimings: true` is enabled, you'll see detailed performance logs:
121
+
122
+ ```
123
+ [Model] Fastest request completed { requestTime: 1.234, numParallelRequests: 3 }
124
+ [Model] Time saved using parallel requests {
125
+ fastestRequestTime: 1.234,
126
+ secondFastestRequestTime: 1.567,
127
+ allFinishTime: 2.345,
128
+ timeSaved: 1.111,
129
+ timeSavedFromSecond: 0.333
130
+ }
131
+ ```
132
+
133
+ ## Advanced Usage
134
+
135
+ ### Dynamic Configuration
136
+
137
+ You can adjust parallel requests based on environment:
138
+
139
+ ```json
140
+ {
141
+ "llm": {
142
+ "name": "MindedChatOpenAI",
143
+ "properties": {
144
+ "model": "gpt-4o",
145
+ "numParallelRequests": "${NODE_ENV === 'production' ? 3 : 1}",
146
+ "logTimings": "${NODE_ENV === 'development'}"
147
+ }
148
+ }
149
+ }
150
+ ```
151
+
152
+ ### Manual Instantiation with createParallelWrapper
153
+
154
+ For advanced use cases where you need direct control over the LLM instance, you can manually apply parallel wrapping:
155
+
156
+ ```typescript
157
+ import { createParallelWrapper } from '@minded-ai/mindedjs';
158
+ import { ChatOpenAI, AzureChatOpenAI } from '@langchain/openai';
159
+
160
+ // Manual wrapping for ChatOpenAI
161
+ const parallelOpenAI = createParallelWrapper(
162
+ new ChatOpenAI({
163
+ openAIApiKey: process.env.OPENAI_API_KEY,
164
+ model: 'gpt-4o',
165
+ temperature: 0.7,
166
+ }),
167
+ {
168
+ numParallelRequests: 3,
169
+ logTimings: true,
170
+ },
171
+ );
172
+
173
+ // Manual wrapping for AzureChatOpenAI
174
+ const parallelAzure = createParallelWrapper(
175
+ new AzureChatOpenAI({
176
+ azureOpenAIApiKey: process.env.AZURE_OPENAI_API_KEY,
177
+ azureOpenAIApiInstanceName: process.env.AZURE_OPENAI_INSTANCE!,
178
+ azureOpenAIApiDeploymentName: process.env.AZURE_OPENAI_DEPLOYMENT!,
179
+ azureOpenAIApiVersion: '2024-02-01',
180
+ }),
181
+ {
182
+ numParallelRequests: 2,
183
+ logTimings: false,
184
+ },
185
+ );
186
+
187
+ // Use directly with Agent (bypassing configuration)
188
+ const agent = new Agent({
189
+ memorySchema,
190
+ config: {
191
+ flows: ['./flows'],
192
+ tools: [],
193
+ llm: parallelOpenAI as any, // Direct LLM instance
194
+ },
195
+ tools: [],
196
+ });
197
+ ```
198
+
199
+ **Note**: The configuration-based approach is recommended for most use cases as it's simpler and more maintainable. Use manual instantiation only when you need specific control over LLM creation or are integrating with existing LangChain workflows.
200
+
201
+ ## How It Works
202
+
203
+ ### MindedChatOpenAI (Backend Processing)
204
+
205
+ - Parallel requests are handled on the Minded platform backend
206
+ - Multiple requests sent to Azure OpenAI from the backend
207
+ - Fastest response returned to your agent
208
+ - Optimal for production deployments
209
+
210
+ ### Other LLM Providers (Client-Side Processing)
211
+
212
+ - Parallel requests handled in your application
213
+ - Multiple requests sent directly to the LLM provider
214
+ - Good for development and custom deployments
215
+
216
+ ## Best Practices
217
+
218
+ 1. **Start Small**: Begin with 2-3 parallel requests
219
+ 2. **Monitor Costs**: Each parallel request counts toward your usage
220
+ 3. **Enable Logging**: Use `logTimings: true` during development to measure improvements
221
+ 4. **Environment-Specific**: Use fewer parallel requests in development
222
+ 5. **Configuration Over Code**: Prefer `minded.json` configuration over manual instantiation
223
+
224
+ ## Troubleshooting
225
+
226
+ ### No Performance Improvement
227
+
228
+ - Check network latency variability
229
+ - Ensure `numParallelRequests > 1`
230
+ - Verify timing logs are showing multiple requests
231
+
232
+ ### Increased Costs
233
+
234
+ - Reduce `numParallelRequests`
235
+ - Consider cost vs. latency trade-offs
236
+ - Monitor usage patterns
237
+
238
+ ### Rate Limiting
239
+
240
+ - Lower `numParallelRequests`
241
+ - Implement backoff strategies
242
+ - Contact provider about rate limits
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@minded-ai/mindedjs",
3
- "version": "1.0.98",
3
+ "version": "1.0.99",
4
4
  "description": "MindedJS is a TypeScript library for building agents.",
5
5
  "main": "dist/index.js",
6
6
  "types": "dist/index.d.ts",
@@ -66,4 +66,4 @@
66
66
  "uuid": "^11.1.0",
67
67
  "ws": "^8.15.1"
68
68
  }
69
- }
69
+ }
package/src/agent.ts CHANGED
@@ -447,7 +447,7 @@ export class Agent {
447
447
  const handlerResult = results.find((r) => r !== undefined);
448
448
  if (handlerResult) {
449
449
  if (!handlerResult.isQualified) {
450
- logger.info(`Trigger ${triggerName} was disqualified`);
450
+ logger.debug({ msg: '[Trigger] Disqualified', triggerName, triggerBody, sessionId });
451
451
  return;
452
452
  }
453
453
  memoryUpdate = handlerResult.memory || {};
@@ -457,7 +457,7 @@ export class Agent {
457
457
  }
458
458
  }
459
459
 
460
- logger.info({ message: 'Invoking trigger', triggerName, sessionId, triggerBody });
460
+ logger.info({ msg: '[Trigger] Received', triggerName, triggerBody, sessionId });
461
461
  const langraphConfig = this.getLangraphConfig(sessionId || uuidv4());
462
462
  const state = await this.compiledGraph.getState(langraphConfig);
463
463
  const suffixes = Object.values(internalNodesSuffix);
@@ -510,7 +510,7 @@ export class Agent {
510
510
  return res;
511
511
  } catch (error: any) {
512
512
  logger.error({
513
- message: 'Invoke error',
513
+ msg: '[Trigger] Error',
514
514
  errorMessage: error.message,
515
515
  stack: error.stack,
516
516
  sessionId,
@@ -744,7 +744,7 @@ export class Agent {
744
744
  // Skip secret loading in local development
745
745
  const { runLocally } = getConfig();
746
746
  if (runLocally) {
747
- logger.info('Running locally - skipping secret loading');
747
+ logger.debug({ msg: '[Agent] Secrets loaded from local .env file' });
748
748
  return {};
749
749
  }
750
750
  if (!mindedConnection.isConnected()) {
@@ -771,10 +771,10 @@ export class Agent {
771
771
  const secrets = response.secrets || {};
772
772
 
773
773
  // Load secrets into process.env
774
- logger.debug(`Loading ${Object.keys(secrets).length} secrets into environment variables`);
775
774
  Object.entries(secrets).forEach(([key, value]) => {
776
775
  process.env[key] = value;
777
776
  });
777
+ logger.debug(`[Agent] Loaded ${Object.keys(secrets).length} secrets from platform`);
778
778
 
779
779
  // Cache the secrets for future requests
780
780
  this.secretsCache = secrets;
@@ -790,7 +790,7 @@ export class Agent {
790
790
  }
791
791
 
792
792
  private setupVoice(): void {
793
- logger.info('Setting up voice');
793
+ logger.info('[Voice] Setting up voice');
794
794
  if (!mindedConnection.isConnected()) {
795
795
  throw new Error('Minded connection is required');
796
796
  }
@@ -813,7 +813,7 @@ export class Agent {
813
813
  voiceSession.sendAudio(audioMessage.audioData);
814
814
  } else {
815
815
  logger.trace({
816
- message: 'Audio received; voice session not found for sessionId',
816
+ message: '[Voice] Audio received; voice session not found for sessionId',
817
817
  sessionId: audioMessage.sessionId,
818
818
  activeSessions: Array.from(this.voiceSessions.keys()),
819
819
  });
@@ -823,13 +823,13 @@ export class Agent {
823
823
  // Hangup / end session handler
824
824
  connection.on(mindedConnectionSocketMessageType.DASHBOARD_VOICE_SESSION_END, (message) => {
825
825
  const hangup = message as BaseVoiceMessage;
826
- logger.debug({ message: 'Dashboard eneded voice session', sessionId: hangup.sessionId });
826
+ logger.debug({ message: '[Voice] Dashboard eneded voice session', sessionId: hangup.sessionId });
827
827
  const voiceSession = this.voiceSessions.get(hangup.sessionId);
828
828
  if (voiceSession) {
829
829
  voiceSession.hangup();
830
830
  } else {
831
831
  logger.trace({
832
- message: 'Session ended; voice session not found for sessionId',
832
+ message: '[Voice] Session ended; voice session not found for sessionId',
833
833
  sessionId: hangup.sessionId,
834
834
  activeSessions: this.voiceSessions.keys(),
835
835
  });
@@ -856,7 +856,7 @@ export class Agent {
856
856
  voiceId: voiceTrigger.voiceId,
857
857
  });
858
858
  await voiceSession.init();
859
- logger.debug({ message: 'Voice session initialized', sessionId: params.sessionId });
859
+ logger.debug({ message: '[Voice] Voice session initialized', sessionId: params.sessionId });
860
860
  this.voiceSessions.set(params.sessionId, voiceSession);
861
861
 
862
862
  // Emit voice session start event
@@ -884,7 +884,7 @@ export class Agent {
884
884
  To be used by the Lambda wrapper to restore checkpoints
885
885
  */
886
886
  public async restoreCheckpoint(sessionId: string, checkpointId: string): Promise<void> {
887
- logger.info({ message: 'Restoring checkpoint', sessionId, checkpointId });
887
+ logger.info({ msg: '[Agent]Restoring checkpoint', sessionId, checkpointId });
888
888
  const langraphConfig = this.getLangraphConfig(sessionId, checkpointId);
889
889
  await this.compiledGraph.invoke(
890
890
  new Command({
@@ -13,6 +13,6 @@ export function createCheckpointSaver(): BaseCheckpointSaver {
13
13
  return new MemorySaver();
14
14
  }
15
15
 
16
- logger.debug('Using remote checkpoint saver');
16
+ logger.debug('[Agent] Using remote checkpoint saver');
17
17
  return new MindedCheckpointSaver();
18
18
  }
@@ -10,8 +10,7 @@ export const createDirectEdge = (edge: StepForwardEdge) => {
10
10
  }
11
11
  // For direct edges, we just return the target of the first edge
12
12
  // since there's no conditional logic needed
13
- logger.info(`Executing edge ${JSON.stringify(edge)}`);
14
-
13
+ logger.info({ msg: `[Router] Direct edge`, target: edge.target });
15
14
  return edge.target;
16
15
  };
17
16
  };
@@ -8,7 +8,7 @@ const CONDITION_TIMEOUT = 5000; // 5 seconds
8
8
 
9
9
  export const createLogicalRouter = ({ edges }: { edges: LogicalConditionEdge[] }) => {
10
10
  return async (state: typeof stateAnnotation.State) => {
11
- logger.debug(`Evaluating logical conditions for ${edges.length} edges`);
11
+ logger.debug({ msg: `[Router] Evaluating logical conditions for ${edges.length} edges` });
12
12
 
13
13
  if (state.goto) {
14
14
  console.log('Jumping to node', state.goto);
@@ -64,7 +64,16 @@ export const createLogicalRouter = ({ edges }: { edges: LogicalConditionEdge[] }
64
64
  ]);
65
65
 
66
66
  if (result === true) {
67
- logger.info(`Condition matched for edge ${edge.source} ${edge.target}`);
67
+ if (edge.source == edge.target) {
68
+ logger.info({ msg: `[Router] Stay at node ${edge.source}`, node: edge.source, condition: edge.condition });
69
+ } else {
70
+ logger.info({
71
+ msg: `[Router] Logical condition matched`,
72
+ transitionFrom: edge.source,
73
+ transitionTo: edge.target,
74
+ condition: edge.condition,
75
+ });
76
+ }
68
77
  return edge.target;
69
78
  }
70
79
  } catch (error) {
@@ -73,7 +82,7 @@ export const createLogicalRouter = ({ edges }: { edges: LogicalConditionEdge[] }
73
82
  const conditionPreview = edge.condition.length > 100 ? `${edge.condition.substring(0, 100)}...` : edge.condition;
74
83
 
75
84
  logger.error({
76
- msg: `Error evaluating condition for edge ${edge.source} → ${edge.target}`,
85
+ msg: `[Router] Error evaluating condition for edge ${edge.source} → ${edge.target}`,
77
86
  condition: conditionPreview,
78
87
  error: errorMessage,
79
88
  edgeIndex: edges.indexOf(edge),
@@ -88,15 +97,18 @@ export const createLogicalRouter = ({ edges }: { edges: LogicalConditionEdge[] }
88
97
 
89
98
  // If no regular conditions matched, check for "else" conditions
90
99
  if (elseEdges.length > 0) {
91
- logger.info(`No regular conditions matched, evaluating ${elseEdges.length} else condition(s)`);
92
100
  // Return the first "else" condition's target
93
101
  const elseEdge = elseEdges[0];
94
- logger.info(`Else condition matched for edge ${elseEdge.source} → ${elseEdge.target}`);
102
+ logger.info({
103
+ msg: `[Router] Else condition matched`,
104
+ transitionFrom: elseEdge.source,
105
+ transitionTo: elseEdge.target,
106
+ });
95
107
  return elseEdge.target;
96
108
  }
97
109
 
98
110
  // If no conditions matched or all failed, return to the source node
99
- logger.info('No conditions matched');
111
+ logger.info({ msg: `[Router] Stay at node ${edges[0].source}, no conditions matched`, node: edges[0].source });
100
112
  return null;
101
113
  };
102
114
  };
@@ -70,7 +70,7 @@ export const createPromptRouter = ({
70
70
  currentNodeName?: string;
71
71
  }) => {
72
72
  return async (state: typeof stateAnnotation.State) => {
73
- logger.info(`Executing prompt router. Edges: ${JSON.stringify(edges)}`);
73
+ logger.debug({ msg: `[Router] Executing prompt router`, edges: JSON.stringify(edges) });
74
74
 
75
75
  if (state.goto) {
76
76
  console.log('Jumping to node', state.goto);
@@ -79,7 +79,10 @@ export const createPromptRouter = ({
79
79
 
80
80
  // If canStayInCurrentNode is true and there are no edges, return current node immediately
81
81
  if (canStayInCurrentNode && edges.length === 0 && currentNodeName) {
82
- logger.info(`No edges available and canStayInCurrentNode is true, staying in current node: ${currentNodeName}`);
82
+ logger.info({
83
+ msg: `[Router] Stay at node, No edges available and canStayInCurrentNode==true`,
84
+ node: currentNodeName,
85
+ });
83
86
  return currentNodeName;
84
87
  }
85
88
 
@@ -179,13 +182,13 @@ export const createPromptRouter = ({
179
182
 
180
183
  const decision = validatedResponse.nextNodeId === currentNodeName ? 'stay in current node' : validatedResponse.nextNodeId;
181
184
  const reasoning = includeReasoning && 'reasoning' in validatedResponse ? ` - Reasoning: ${validatedResponse.reasoning}` : '';
182
- logger.info({ msg: `Router decision: ${decision}`, reasoning });
185
+ logger.debug({ msg: `[Router] Decision: ${decision}`, reasoning });
183
186
 
184
187
  return validatedResponse.nextNodeId;
185
188
  } catch (error) {
186
189
  lastError = error instanceof Error ? error : new Error(String(error));
187
190
  logger.warn({
188
- message: `Prompt router attempt ${attempts} failed`,
191
+ msg: `[Router] Prompt router attempt ${attempts} failed`,
189
192
  error: lastError.message,
190
193
  attempt: attempts,
191
194
  maxRetries,
@@ -195,7 +198,7 @@ export const createPromptRouter = ({
195
198
  // If all retries failed, return the first available edge as fallback
196
199
  const fallbackNode = edges[0]?.target;
197
200
  logger.error({
198
- message: 'Prompt router reached max retries, using fallback',
201
+ msg: '[Router] Prompt router reached max retries, using fallback',
199
202
  fallbackNode,
200
203
  lastError: lastError.message,
201
204
  });
package/src/index.ts CHANGED
@@ -7,6 +7,12 @@ import { resetTimer, cancelTimer, onTimer } from './internalTools/timer';
7
7
  export type { ElevenLabsContext } from './types/Voice.types';
8
8
  export { Agent, events, logger, sendPlaceholderMessage, resetTimer, cancelTimer, onTimer };
9
9
 
10
+ // Export LLM implementations
11
+ export { MindedChatOpenAI } from './platform/models/mindedChatOpenAI';
12
+ export { createParallelWrapper } from './platform/models/parallelWrapper';
13
+ export type { MindedChatOpenAIFields, BaseParallelChatFields } from './platform/models/mindedChatOpenAI';
14
+ export type { BaseParallelChatFields as ParallelWrapperFields } from './platform/models/parallelWrapper';
15
+
10
16
  // HTTP module for PII masking - only public API
11
17
  export type { PIIGatewayInstance, HttpRequestConfig, HttpResponse } from './platform/piiGateway';
12
18
 
@@ -1,4 +1,5 @@
1
- import { LLMConfig, LLMProviders, LLMProvider } from "../types/LLM.types";
1
+ import { LLMConfig, LLMProviders, LLMProvider } from '../types/LLM.types';
2
+ import { createParallelWrapper, BaseParallelChatFields } from '../platform/models/parallelWrapper';
2
3
 
3
4
  export const createLlmInstance = (llmConfig: LLMConfig) => {
4
5
  const { name, properties } = llmConfig;
@@ -6,5 +7,27 @@ export const createLlmInstance = (llmConfig: LLMConfig) => {
6
7
  if (!LLMClass) {
7
8
  throw new Error(`Unsupported LLM provider: ${name}`);
8
9
  }
9
- return new LLMClass(properties);
10
+
11
+ // Create the base LLM instance
12
+ const llmInstance = new LLMClass(properties);
13
+
14
+ // Check if parallel configuration is present
15
+ const hasParallelConfig = properties.numParallelRequests && properties.numParallelRequests > 1;
16
+
17
+ // For MindedChatOpenAI, parallel functionality is handled on the backend
18
+ if (name === 'MindedChatOpenAI') {
19
+ return llmInstance;
20
+ }
21
+
22
+ // For other LLM providers, apply client-side parallel wrapper if configured
23
+ if (hasParallelConfig) {
24
+ const parallelOptions: BaseParallelChatFields = {
25
+ numParallelRequests: properties.numParallelRequests,
26
+ logTimings: properties.logTimings,
27
+ };
28
+
29
+ return createParallelWrapper(llmInstance, parallelOptions);
30
+ }
31
+
32
+ return llmInstance;
10
33
  };
@@ -26,14 +26,14 @@ export const addAppToolNode = async ({
26
26
  const cleanedParameters = Object.fromEntries(Object.entries(node.parameters || {}).filter(([, value]) => value !== ''));
27
27
  const appRunnerTool = getAppActionRunnerTool(node.displayName!);
28
28
  const callback: RunnableLike = async (state: typeof stateAnnotation.State) => {
29
- logger.info(`Executing tool node ${appRunnerTool.name}`);
29
+ logger.debug({ msg: `[Node] Executing tool node`, node: appRunnerTool.name });
30
30
 
31
31
  const executeWrapper = async (input: z.infer<typeof appRunnerTool.input>) => {
32
32
  try {
33
33
  const response = await appRunnerTool.execute({ input, state, agent });
34
34
  return response || {};
35
35
  } catch (error) {
36
- logger.error({ msg: 'Error executing tool', error });
36
+ logger.error({ msg: `[Node] Error executing tool`, error, node: node.name });
37
37
  throw error;
38
38
  }
39
39
  };
@@ -17,7 +17,7 @@ type AddBrowserTaskNodeParams = {
17
17
 
18
18
  export const addBrowserTaskNode = async ({ graph, node, agent }: AddBrowserTaskNodeParams) => {
19
19
  const callback: RunnableLike = async (state: typeof stateAnnotation.State) => {
20
- logger.info({ message: `Executing browser task node ${node.displayName}`, prompt: node.prompt });
20
+ logger.info({ msg: `Executing browser task node ${node.displayName}`, prompt: node.prompt });
21
21
 
22
22
  const prompt = `
23
23
  # Task instructions:\n${node.prompt}\n\n
@@ -42,14 +42,20 @@ export const addBrowserTaskNode = async ({ graph, node, agent }: AddBrowserTaskN
42
42
  additional_kwargs: {
43
43
  mindedMetadata: {
44
44
  nodeType: NodeType.BROWSER_TASK,
45
- }
45
+ },
46
46
  },
47
47
  });
48
48
  try {
49
- const result = await executeBrowserTask(`Follow the instructions. Any retrieved data should be printed as string and not saved to a file.\n${node.prompt}`);
50
- await agent.compiledGraph.updateState(agent.getLangraphConfig(state.sessionId), {
51
- messages: [toolCallingMessage],
52
- }, node.name);
49
+ const result = await executeBrowserTask(
50
+ `Follow the instructions. Any retrieved data should be printed as string and not saved to a file.\n${node.prompt}`,
51
+ );
52
+ await agent.compiledGraph.updateState(
53
+ agent.getLangraphConfig(state.sessionId),
54
+ {
55
+ messages: [toolCallingMessage],
56
+ },
57
+ node.name,
58
+ );
53
59
  logger.debug({ message: 'Browser task result', result });
54
60
  const toolCall = new ToolMessage({
55
61
  id: uuidv4(),
@@ -62,7 +68,7 @@ export const addBrowserTaskNode = async ({ graph, node, agent }: AddBrowserTaskN
62
68
  additional_kwargs: {
63
69
  mindedMetadata: {
64
70
  nodeType: NodeType.BROWSER_TASK,
65
- }
71
+ },
66
72
  },
67
73
  });
68
74
  return {
@@ -80,7 +86,7 @@ export const addBrowserTaskNode = async ({ graph, node, agent }: AddBrowserTaskN
80
86
  const toolCall = new ToolMessage({
81
87
  id: uuidv4(),
82
88
  content: JSON.stringify({
83
- error: error instanceof Error ? error.message : error.toString()
89
+ error: error instanceof Error ? error.message : error.toString(),
84
90
  }),
85
91
  status: 'error',
86
92
  name: 'browser-task',
@@ -88,7 +94,7 @@ export const addBrowserTaskNode = async ({ graph, node, agent }: AddBrowserTaskN
88
94
  additional_kwargs: {
89
95
  mindedMetadata: {
90
96
  nodeType: NodeType.BROWSER_TASK,
91
- }
97
+ },
92
98
  },
93
99
  });
94
100
  return {
@@ -100,7 +106,7 @@ export const addBrowserTaskNode = async ({ graph, node, agent }: AddBrowserTaskN
100
106
  messageIds: [toolCallingMessageId],
101
107
  }),
102
108
  messages: [toolCallingMessage, toolCall],
103
- }
109
+ };
104
110
  }
105
111
  };
106
112
  graph.addNode(node.name, callback);
@@ -13,7 +13,7 @@ export const buildHumanInTheLoopNodeName = (nodeName: string) => `${nodeName}${i
13
13
 
14
14
  export const addHumanInTheLoopNode = async ({ graph, attachedToNodeName }: AddHumanInTheLoopNodeParams) => {
15
15
  const callback: RunnableLike = async (state: typeof stateAnnotation.State) => {
16
- logger.info(`Executing "human in the loop" node for the attached node${attachedToNodeName}`);
16
+ logger.debug({ msg: `[Node] Waiting for human input`, node: attachedToNodeName });
17
17
 
18
18
  if (state.messages[state.messages.length - 1].getType() === 'ai') {
19
19
  const value = interrupt('input from human in the loop');
@@ -26,7 +26,7 @@ type AddPromptNodeParams = {
26
26
 
27
27
  export const addPromptNode = async ({ graph, node, llm, tools, emit, agent }: AddPromptNodeParams) => {
28
28
  const callback: RunnableLike = async (state: typeof stateAnnotation.State) => {
29
- logger.info(`Executing prompt node ${node.name}`);
29
+ logger.debug({ msg: `[Node] Executing prompt node`, node: node.displayName });
30
30
  const llmToUse = node.llmConfig ? createLlmInstance(node.llmConfig) : llm;
31
31
 
32
32
  const globalTools = tools
@@ -61,11 +61,12 @@ export const addPromptNode = async ({ graph, node, llm, tools, emit, agent }: Ad
61
61
 
62
62
  for (const toolCall of result.tool_calls) {
63
63
  const matchedTool = globalTools.find((t) => t.name === toolCall.name);
64
- logger.info({ msg: `Model called tool ${matchedTool?.name}` });
64
+ logger.info({ msg: `[Model] Calling tool`, tool: matchedTool?.name });
65
65
  if (matchedTool) {
66
66
  try {
67
67
  // Invoke the LangChain tool directly
68
68
  const toolResult = await matchedTool.invoke(toolCall);
69
+ logger.debug({ msg: `[Tool] Tool result`, tool: matchedTool?.name, result: toolResult });
69
70
  const toolStateUpdate = extractToolStateResponse(toolResult);
70
71
  // Properly merge memory and other state updates
71
72
  stateUpdates = {
@@ -75,7 +76,7 @@ export const addPromptNode = async ({ graph, node, llm, tools, emit, agent }: Ad
75
76
  };
76
77
  toolResults.push(toolResult);
77
78
  } catch (error) {
78
- logger.error({ msg: `Error executing tool ${toolCall.name}:`, error });
79
+ logger.error({ msg: `[Tool] Error executing tool`, tool: toolCall.name, error });
79
80
  const errorMessage = new ToolMessage({
80
81
  content: JSON.stringify({ error: error instanceof Error ? error.message : String(error) }),
81
82
  tool_call_id: toolCall.id!,
@@ -83,7 +84,7 @@ export const addPromptNode = async ({ graph, node, llm, tools, emit, agent }: Ad
83
84
  toolResults.push(errorMessage);
84
85
  }
85
86
  } else {
86
- logger.error({ msg: `Model tried to call tool ${toolCall.name} but it was not found` });
87
+ logger.error({ msg: `[Tool] Model called tool but it was not found`, tool: toolCall.name });
87
88
  }
88
89
  }
89
90
 
@@ -99,7 +100,7 @@ export const addPromptNode = async ({ graph, node, llm, tools, emit, agent }: Ad
99
100
  message: result.content as string,
100
101
  state,
101
102
  });
102
- console.log('AI Message', result.content);
103
+ logger.info({ msg: `[Model] Response`, content: result.content });
103
104
  }
104
105
 
105
106
  return {