jaf-py 2.4.3__py3-none-any.whl → 2.4.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
jaf/core/__init__.py CHANGED
@@ -13,6 +13,14 @@ from .agent_tool import (
13
13
  get_current_run_config,
14
14
  set_current_run_config,
15
15
  )
16
+ from .parallel_agents import (
17
+ ParallelAgentGroup,
18
+ ParallelExecutionConfig,
19
+ create_parallel_agents_tool,
20
+ create_simple_parallel_tool,
21
+ create_language_specialists_tool,
22
+ create_domain_experts_tool,
23
+ )
16
24
  from .proxy import ProxyConfig, ProxyAuth, create_proxy_config, get_default_proxy_config
17
25
 
18
26
  __all__ = [
@@ -23,6 +31,8 @@ __all__ = [
23
31
  "Message",
24
32
  "ModelConfig",
25
33
  "ModelProvider",
34
+ "ParallelAgentGroup",
35
+ "ParallelExecutionConfig",
26
36
  "ProxyAuth",
27
37
  "ProxyConfig",
28
38
  "RunConfig",
@@ -41,9 +51,13 @@ __all__ = [
41
51
  "create_agent_tool",
42
52
  "create_conditional_enabler",
43
53
  "create_default_output_extractor",
54
+ "create_domain_experts_tool",
44
55
  "create_json_output_extractor",
56
+ "create_language_specialists_tool",
57
+ "create_parallel_agents_tool",
45
58
  "create_proxy_config",
46
59
  "create_run_id",
60
+ "create_simple_parallel_tool",
47
61
  "create_trace_id",
48
62
  "get_current_run_config",
49
63
  "get_default_proxy_config",
jaf/core/engine.py CHANGED
@@ -174,16 +174,18 @@ async def run(
174
174
  from .agent_tool import set_current_run_config
175
175
  set_current_run_config(config)
176
176
 
177
+ state_with_memory = await _load_conversation_history(initial_state, config)
178
+
179
+ # Emit RunStartEvent AFTER loading conversation history so we have complete context
177
180
  if config.on_event:
178
181
  config.on_event(RunStartEvent(data=to_event_data(RunStartEventData(
179
182
  run_id=initial_state.run_id,
180
183
  trace_id=initial_state.trace_id,
181
184
  session_id=config.conversation_id,
182
- context=initial_state.context,
183
- messages=initial_state.messages
185
+ context=state_with_memory.context,
186
+ messages=state_with_memory.messages, # Now includes full conversation history
187
+ agent_name=state_with_memory.current_agent_name
184
188
  ))))
185
-
186
- state_with_memory = await _load_conversation_history(initial_state, config)
187
189
 
188
190
  # Load approvals from storage if configured
189
191
  if config.approval_storage:
@@ -514,12 +516,15 @@ async def _run_internal(
514
516
  if len(partial_tool_calls) > 0:
515
517
  message_tool_calls = []
516
518
  for i, tc in enumerate(partial_tool_calls):
519
+ arguments = tc["function"]["arguments"]
520
+ if isinstance(arguments, str):
521
+ arguments = _normalize_tool_call_arguments(arguments)
517
522
  message_tool_calls.append({
518
523
  "id": tc["id"] or f"call_{i}",
519
524
  "type": "function",
520
525
  "function": {
521
526
  "name": tc["function"]["name"] or "",
522
- "arguments": tc["function"]["arguments"]
527
+ "arguments": arguments
523
528
  }
524
529
  })
525
530
 
@@ -532,7 +537,7 @@ async def _run_internal(
532
537
  type="function",
533
538
  function=ToolCallFunction(
534
539
  name=mc["function"]["name"],
535
- arguments=mc["function"]["arguments"],
540
+ arguments=_normalize_tool_call_arguments(mc["function"]["arguments"])
536
541
  ),
537
542
  ) for mc in message_tool_calls
538
543
  ],
@@ -551,12 +556,15 @@ async def _run_internal(
551
556
  if len(partial_tool_calls) > 0:
552
557
  final_tool_calls = []
553
558
  for i, tc in enumerate(partial_tool_calls):
559
+ arguments = tc["function"]["arguments"]
560
+ if isinstance(arguments, str):
561
+ arguments = _normalize_tool_call_arguments(arguments)
554
562
  final_tool_calls.append({
555
563
  "id": tc["id"] or f"call_{i}",
556
564
  "type": "function",
557
565
  "function": {
558
566
  "name": tc["function"]["name"] or "",
559
- "arguments": tc["function"]["arguments"]
567
+ "arguments": arguments
560
568
  }
561
569
  })
562
570
 
@@ -842,12 +850,33 @@ def _convert_tool_calls(tool_calls: Optional[List[Dict[str, Any]]]) -> Optional[
842
850
  type='function',
843
851
  function=ToolCallFunction(
844
852
  name=tc['function']['name'],
845
- arguments=tc['function']['arguments']
853
+ arguments=_normalize_tool_call_arguments(tc['function']['arguments'])
846
854
  )
847
855
  )
848
856
  for tc in tool_calls
849
857
  ]
850
858
 
859
+
860
+ def _normalize_tool_call_arguments(arguments: Any) -> Any:
861
+ """Strip trailing streaming artifacts so arguments remain valid JSON strings."""
862
+ if not arguments or not isinstance(arguments, str):
863
+ return arguments
864
+
865
+ decoder = json.JSONDecoder()
866
+ try:
867
+ obj, end = decoder.raw_decode(arguments)
868
+ except json.JSONDecodeError:
869
+ return arguments
870
+
871
+ remainder = arguments[end:].strip()
872
+ if remainder:
873
+ try:
874
+ return json.dumps(obj)
875
+ except (TypeError, ValueError):
876
+ return arguments
877
+
878
+ return arguments
879
+
851
880
  async def _execute_tool_calls(
852
881
  tool_calls: List[ToolCall],
853
882
  agent: Agent[Ctx, Any],
@@ -863,7 +892,8 @@ async def _execute_tool_calls(
863
892
  tool_name=tool_call.function.name,
864
893
  args=_try_parse_json(tool_call.function.arguments),
865
894
  trace_id=state.trace_id,
866
- run_id=state.run_id
895
+ run_id=state.run_id,
896
+ call_id=tool_call.id
867
897
  ))))
868
898
 
869
899
  try:
@@ -889,7 +919,8 @@ async def _execute_tool_calls(
889
919
  trace_id=state.trace_id,
890
920
  run_id=state.run_id,
891
921
  status='error',
892
- tool_result={'error': 'tool_not_found'}
922
+ tool_result={'error': 'tool_not_found'},
923
+ call_id=tool_call.id
893
924
  ))))
894
925
 
895
926
  return {
@@ -923,7 +954,8 @@ async def _execute_tool_calls(
923
954
  trace_id=state.trace_id,
924
955
  run_id=state.run_id,
925
956
  status='error',
926
- tool_result={'error': 'validation_error', 'details': e.errors()}
957
+ tool_result={'error': 'validation_error', 'details': e.errors()},
958
+ call_id=tool_call.id
927
959
  ))))
928
960
 
929
961
  return {
@@ -1017,7 +1049,7 @@ async def _execute_tool_calls(
1017
1049
  else:
1018
1050
  timeout = None
1019
1051
  if timeout is None:
1020
- timeout = config.default_tool_timeout if config.default_tool_timeout is not None else 30.0
1052
+ timeout = config.default_tool_timeout if config.default_tool_timeout is not None else 300.0
1021
1053
 
1022
1054
  # Merge additional context if provided through approval
1023
1055
  additional_context = approval_status.additional_context if approval_status else None
@@ -1061,7 +1093,8 @@ async def _execute_tool_calls(
1061
1093
  trace_id=state.trace_id,
1062
1094
  run_id=state.run_id,
1063
1095
  status='timeout',
1064
- tool_result={'error': 'timeout_error'}
1096
+ tool_result={'error': 'timeout_error'},
1097
+ call_id=tool_call.id
1065
1098
  ))))
1066
1099
 
1067
1100
  return {
@@ -1113,7 +1146,8 @@ async def _execute_tool_calls(
1113
1146
  trace_id=state.trace_id,
1114
1147
  run_id=state.run_id,
1115
1148
  tool_result=tool_result,
1116
- status='success'
1149
+ status='success',
1150
+ call_id=tool_call.id
1117
1151
  ))))
1118
1152
 
1119
1153
  # Check for handoff
@@ -1151,7 +1185,8 @@ async def _execute_tool_calls(
1151
1185
  trace_id=state.trace_id,
1152
1186
  run_id=state.run_id,
1153
1187
  status='error',
1154
- tool_result={'error': 'execution_error', 'detail': str(error)}
1188
+ tool_result={'error': 'execution_error', 'detail': str(error)},
1189
+ call_id=tool_call.id
1155
1190
  ))))
1156
1191
 
1157
1192
  return {
@@ -0,0 +1,339 @@
1
+ """
2
+ Parallel Agent Execution for JAF Framework.
3
+
4
+ This module provides functionality to execute multiple sub-agents in parallel groups,
5
+ allowing for coordinated parallel execution with configurable grouping and result aggregation.
6
+ """
7
+
8
+ import asyncio
9
+ import json
10
+ from dataclasses import dataclass
11
+ from typing import Any, Dict, List, Optional, Union, Callable, TypeVar
12
+
13
+ from .types import (
14
+ Agent,
15
+ Tool,
16
+ ToolSchema,
17
+ ToolSource,
18
+ RunConfig,
19
+ RunState,
20
+ RunResult,
21
+ Message,
22
+ ContentRole,
23
+ generate_run_id,
24
+ generate_trace_id,
25
+ )
26
+ from .agent_tool import create_agent_tool, AgentToolInput
27
+
28
+ Ctx = TypeVar('Ctx')
29
+ Out = TypeVar('Out')
30
+
31
+
32
+ @dataclass
33
+ class ParallelAgentGroup:
34
+ """Configuration for a group of agents to be executed in parallel."""
35
+ name: str
36
+ agents: List[Agent[Ctx, Out]]
37
+ shared_input: bool = True # Whether all agents receive the same input
38
+ result_aggregation: str = "combine" # "combine", "first", "majority", "custom"
39
+ custom_aggregator: Optional[Callable[[List[str]], str]] = None
40
+ timeout: Optional[float] = None
41
+ metadata: Optional[Dict[str, Any]] = None
42
+
43
+
44
+ @dataclass
45
+ class ParallelExecutionConfig:
46
+ """Configuration for parallel agent execution."""
47
+ groups: List[ParallelAgentGroup]
48
+ inter_group_execution: str = "sequential" # "sequential" or "parallel"
49
+ global_timeout: Optional[float] = None
50
+ preserve_session: bool = False
51
+
52
+
53
+ class ParallelAgentsTool:
54
+ """Tool that executes multiple agent groups in parallel."""
55
+
56
+ def __init__(
57
+ self,
58
+ config: ParallelExecutionConfig,
59
+ tool_name: str = "execute_parallel_agents",
60
+ tool_description: str = "Execute multiple agents in parallel groups"
61
+ ):
62
+ self.config = config
63
+ self.tool_name = tool_name
64
+ self.tool_description = tool_description
65
+
66
+ # Create tool schema
67
+ self.schema = ToolSchema(
68
+ name=tool_name,
69
+ description=tool_description,
70
+ parameters=AgentToolInput,
71
+ timeout=config.global_timeout
72
+ )
73
+ self.source = ToolSource.NATIVE
74
+ self.metadata = {"source": "parallel_agents", "groups": len(config.groups)}
75
+
76
+ async def execute(self, args: AgentToolInput, context: Ctx) -> str:
77
+ """Execute all configured agent groups."""
78
+ try:
79
+ if self.config.inter_group_execution == "parallel":
80
+ # Execute all groups in parallel
81
+ group_results = await asyncio.gather(*[
82
+ self._execute_group(group, args.input, context)
83
+ for group in self.config.groups
84
+ ])
85
+ else:
86
+ # Execute groups sequentially
87
+ group_results = []
88
+ for group in self.config.groups:
89
+ result = await self._execute_group(group, args.input, context)
90
+ group_results.append(result)
91
+
92
+ # Combine results from all groups
93
+ final_result = {
94
+ "parallel_execution_results": {
95
+ group.name: result for group, result in zip(self.config.groups, group_results)
96
+ },
97
+ "execution_mode": self.config.inter_group_execution,
98
+ "total_groups": len(self.config.groups)
99
+ }
100
+
101
+ return json.dumps(final_result, indent=2)
102
+
103
+ except Exception as e:
104
+ return json.dumps({
105
+ "error": "parallel_execution_failed",
106
+ "message": f"Failed to execute parallel agents: {str(e)}",
107
+ "groups_attempted": len(self.config.groups)
108
+ })
109
+
110
+ async def _execute_group(
111
+ self,
112
+ group: ParallelAgentGroup,
113
+ input_text: str,
114
+ context: Ctx
115
+ ) -> Dict[str, Any]:
116
+ """Execute a single group of agents in parallel."""
117
+ try:
118
+ # Create agent tools for all agents in the group
119
+ agent_tools = []
120
+ for agent in group.agents:
121
+ tool = create_agent_tool(
122
+ agent=agent,
123
+ tool_name=f"run_{agent.name.lower().replace(' ', '_')}",
124
+ tool_description=f"Execute the {agent.name} agent",
125
+ timeout=group.timeout,
126
+ preserve_session=self.config.preserve_session
127
+ )
128
+ agent_tools.append((agent.name, tool))
129
+
130
+ # Execute all agents in the group in parallel
131
+ if group.shared_input:
132
+ # All agents get the same input
133
+ tasks = [
134
+ tool.execute(AgentToolInput(input=input_text), context)
135
+ for _, tool in agent_tools
136
+ ]
137
+ else:
138
+ # This could be extended to support different inputs per agent
139
+ tasks = [
140
+ tool.execute(AgentToolInput(input=input_text), context)
141
+ for _, tool in agent_tools
142
+ ]
143
+
144
+ # Execute with timeout if specified
145
+ if group.timeout:
146
+ results = await asyncio.wait_for(
147
+ asyncio.gather(*tasks, return_exceptions=True),
148
+ timeout=group.timeout
149
+ )
150
+ else:
151
+ results = await asyncio.gather(*tasks, return_exceptions=True)
152
+
153
+ # Process results
154
+ agent_results = {}
155
+ for (agent_name, _), result in zip(agent_tools, results):
156
+ if isinstance(result, Exception):
157
+ agent_results[agent_name] = {
158
+ "error": True,
159
+ "message": str(result),
160
+ "type": type(result).__name__
161
+ }
162
+ else:
163
+ agent_results[agent_name] = {
164
+ "success": True,
165
+ "result": result
166
+ }
167
+
168
+ # Apply result aggregation
169
+ aggregated_result = self._aggregate_results(group, agent_results)
170
+
171
+ return {
172
+ "group_name": group.name,
173
+ "agent_count": len(group.agents),
174
+ "individual_results": agent_results,
175
+ "aggregated_result": aggregated_result,
176
+ "execution_time_ms": None # Could be added with timing
177
+ }
178
+
179
+ except asyncio.TimeoutError:
180
+ return {
181
+ "group_name": group.name,
182
+ "error": "timeout",
183
+ "message": f"Group {group.name} execution timed out after {group.timeout} seconds",
184
+ "agent_count": len(group.agents)
185
+ }
186
+ except Exception as e:
187
+ return {
188
+ "group_name": group.name,
189
+ "error": "execution_failed",
190
+ "message": str(e),
191
+ "agent_count": len(group.agents)
192
+ }
193
+
194
+ def _aggregate_results(
195
+ self,
196
+ group: ParallelAgentGroup,
197
+ agent_results: Dict[str, Any]
198
+ ) -> Union[str, Dict[str, Any]]:
199
+ """Aggregate results from parallel agent execution."""
200
+ successful_results = [
201
+ result["result"] for result in agent_results.values()
202
+ if result.get("success") and "result" in result
203
+ ]
204
+
205
+ if not successful_results:
206
+ return {"error": "no_successful_results", "message": "All agents failed"}
207
+
208
+ if group.result_aggregation == "first":
209
+ return successful_results[0]
210
+ elif group.result_aggregation == "combine":
211
+ return {
212
+ "combined_results": successful_results,
213
+ "result_count": len(successful_results)
214
+ }
215
+ elif group.result_aggregation == "majority":
216
+ # Simple majority logic - could be enhanced
217
+ if len(successful_results) >= len(group.agents) // 2 + 1:
218
+ return successful_results[0] # Return first as majority representative
219
+ else:
220
+ return {"error": "no_majority", "results": successful_results}
221
+ elif group.result_aggregation == "custom" and group.custom_aggregator:
222
+ try:
223
+ return group.custom_aggregator(successful_results)
224
+ except Exception as e:
225
+ return {"error": "custom_aggregation_failed", "message": str(e)}
226
+ else:
227
+ return {"combined_results": successful_results}
228
+
229
+
230
+ def create_parallel_agents_tool(
231
+ groups: List[ParallelAgentGroup],
232
+ tool_name: str = "execute_parallel_agents",
233
+ tool_description: str = "Execute multiple agents in parallel groups",
234
+ inter_group_execution: str = "sequential",
235
+ global_timeout: Optional[float] = None,
236
+ preserve_session: bool = False
237
+ ) -> Tool:
238
+ """
239
+ Create a tool that executes multiple agent groups in parallel.
240
+
241
+ Args:
242
+ groups: List of parallel agent groups to execute
243
+ tool_name: Name of the tool
244
+ tool_description: Description of the tool
245
+ inter_group_execution: How to execute groups ("sequential" or "parallel")
246
+ global_timeout: Global timeout for all executions
247
+ preserve_session: Whether to preserve session across agent calls
248
+
249
+ Returns:
250
+ A Tool that can execute parallel agent groups
251
+ """
252
+ config = ParallelExecutionConfig(
253
+ groups=groups,
254
+ inter_group_execution=inter_group_execution,
255
+ global_timeout=global_timeout,
256
+ preserve_session=preserve_session
257
+ )
258
+
259
+ return ParallelAgentsTool(config, tool_name, tool_description)
260
+
261
+
262
+ def create_simple_parallel_tool(
263
+ agents: List[Agent],
264
+ group_name: str = "parallel_group",
265
+ tool_name: str = "execute_parallel_agents",
266
+ shared_input: bool = True,
267
+ result_aggregation: str = "combine",
268
+ timeout: Optional[float] = None
269
+ ) -> Tool:
270
+ """
271
+ Create a simple parallel agents tool from a list of agents.
272
+
273
+ Args:
274
+ agents: List of agents to execute in parallel
275
+ group_name: Name for the parallel group
276
+ tool_name: Name of the tool
277
+ shared_input: Whether all agents receive the same input
278
+ result_aggregation: How to aggregate results ("combine", "first", "majority")
279
+ timeout: Timeout for parallel execution
280
+
281
+ Returns:
282
+ A Tool that executes all agents in parallel
283
+ """
284
+ group = ParallelAgentGroup(
285
+ name=group_name,
286
+ agents=agents,
287
+ shared_input=shared_input,
288
+ result_aggregation=result_aggregation,
289
+ timeout=timeout
290
+ )
291
+
292
+ return create_parallel_agents_tool([group], tool_name=tool_name)
293
+
294
+
295
+ # Convenience functions for common parallel execution patterns
296
+
297
+ def create_language_specialists_tool(
298
+ language_agents: Dict[str, Agent],
299
+ tool_name: str = "consult_language_specialists",
300
+ timeout: Optional[float] = 300.0
301
+ ) -> Tool:
302
+ """Create a tool that consults multiple language specialists in parallel."""
303
+ group = ParallelAgentGroup(
304
+ name="language_specialists",
305
+ agents=list(language_agents.values()),
306
+ shared_input=True,
307
+ result_aggregation="combine",
308
+ timeout=timeout,
309
+ metadata={"languages": list(language_agents.keys())}
310
+ )
311
+
312
+ return create_parallel_agents_tool(
313
+ [group],
314
+ tool_name=tool_name,
315
+ tool_description="Consult multiple language specialists in parallel"
316
+ )
317
+
318
+
319
+ def create_domain_experts_tool(
320
+ expert_agents: Dict[str, Agent],
321
+ tool_name: str = "consult_domain_experts",
322
+ result_aggregation: str = "combine",
323
+ timeout: Optional[float] = 60.0
324
+ ) -> Tool:
325
+ """Create a tool that consults multiple domain experts in parallel."""
326
+ group = ParallelAgentGroup(
327
+ name="domain_experts",
328
+ agents=list(expert_agents.values()),
329
+ shared_input=True,
330
+ result_aggregation=result_aggregation,
331
+ timeout=timeout,
332
+ metadata={"domains": list(expert_agents.keys())}
333
+ )
334
+
335
+ return create_parallel_agents_tool(
336
+ [group],
337
+ tool_name=tool_name,
338
+ tool_description="Consult multiple domain experts in parallel"
339
+ )
jaf/core/streaming.py CHANGED
@@ -209,20 +209,37 @@ async def run_streaming(
209
209
  trace_id=initial_state.trace_id
210
210
  )
211
211
 
212
- tool_call_ids = {} # To map tool calls to their IDs
212
+ tool_call_ids: Dict[str, str] = {} # Map call_id -> tool_name for in-flight tool calls
213
213
 
214
214
  def event_handler(event: TraceEvent) -> None:
215
215
  """Handle trace events and put them into the queue."""
216
216
  nonlocal tool_call_ids
217
217
  streaming_event = None
218
+ payload = event.data
219
+
220
+ def _get_event_value(keys: List[str]) -> Any:
221
+ for key in keys:
222
+ if isinstance(payload, dict) and key in payload:
223
+ return payload[key]
224
+ if hasattr(payload, key):
225
+ return getattr(payload, key)
226
+ return None
227
+
218
228
  if event.type == 'tool_call_start':
219
- # Generate a unique ID for the tool call
220
- call_id = f"call_{uuid.uuid4().hex[:8]}"
221
- tool_call_ids[event.data.tool_name] = call_id
222
-
229
+ tool_name = _get_event_value(['tool_name', 'toolName']) or 'unknown'
230
+ args = _get_event_value(['args', 'arguments'])
231
+ call_id = _get_event_value(['call_id', 'tool_call_id', 'toolCallId'])
232
+
233
+ if not call_id:
234
+ call_id = f"call_{uuid.uuid4().hex[:8]}"
235
+ if isinstance(payload, dict):
236
+ payload['call_id'] = call_id
237
+
238
+ tool_call_ids[call_id] = tool_name
239
+
223
240
  tool_call = StreamingToolCall(
224
- tool_name=event.data.tool_name,
225
- arguments=event.data.args,
241
+ tool_name=tool_name,
242
+ arguments=args,
226
243
  call_id=call_id,
227
244
  status='started'
228
245
  )
@@ -233,18 +250,26 @@ async def run_streaming(
233
250
  trace_id=initial_state.trace_id
234
251
  )
235
252
  elif event.type == 'tool_call_end':
236
- if event.data.tool_name not in tool_call_ids:
237
- raise RuntimeError(
238
- f"Tool call end event received for unknown tool '{event.data.tool_name}'. "
239
- f"Known tool calls: {list(tool_call_ids.keys())}. "
240
- f"This may indicate a missing tool_call_start event or a bug in the streaming implementation."
241
- )
242
- call_id = tool_call_ids[event.data.tool_name]
253
+ tool_name = _get_event_value(['tool_name', 'toolName']) or 'unknown'
254
+ call_id = _get_event_value(['call_id', 'tool_call_id', 'toolCallId'])
255
+
256
+ if not call_id:
257
+ # Fallback to locate a pending tool call with the same tool name
258
+ matching_call_id = next((cid for cid, name in tool_call_ids.items() if name == tool_name), None)
259
+ if matching_call_id:
260
+ call_id = matching_call_id
261
+ else:
262
+ raise RuntimeError(
263
+ f"Tool call end event received for unknown tool '{tool_name}'. "
264
+ f"Pending call IDs: {list(tool_call_ids.keys())}."
265
+ )
266
+
267
+ tool_call_ids.pop(call_id, None)
243
268
  tool_result = StreamingToolResult(
244
- tool_name=event.data.tool_name,
269
+ tool_name=tool_name,
245
270
  call_id=call_id,
246
- result=event.data.result,
247
- status=event.data.status or 'completed'
271
+ result=_get_event_value(['result']),
272
+ status=_get_event_value(['status']) or 'completed'
248
273
  )
249
274
  streaming_event = StreamingEvent(
250
275
  type=StreamingEventType.TOOL_RESULT,
jaf/core/tracing.py CHANGED
@@ -10,6 +10,7 @@ import json
10
10
  import time
11
11
  from datetime import datetime
12
12
  from typing import Any, Dict, List, Optional, Protocol
13
+ import uuid
13
14
 
14
15
  from opentelemetry import trace
15
16
  from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter
@@ -419,11 +420,24 @@ class LangfuseTraceCollector:
419
420
  for i in range(len(messages) - 1, -1, -1):
420
421
  msg = messages[i]
421
422
 
423
+ # Extract message data comprehensively
424
+ msg_data = {}
425
+
422
426
  if isinstance(msg, dict):
423
427
  role = msg.get("role")
424
428
  content = msg.get("content", "")
429
+ # Capture all additional fields from dict messages
430
+ msg_data = {
431
+ "role": role,
432
+ "content": content,
433
+ "tool_calls": msg.get("tool_calls"),
434
+ "tool_call_id": msg.get("tool_call_id"),
435
+ "name": msg.get("name"),
436
+ "function_call": msg.get("function_call"),
437
+ "timestamp": msg.get("timestamp", datetime.now().isoformat())
438
+ }
425
439
  elif hasattr(msg, 'role'):
426
- role = msg.role
440
+ role = getattr(msg, 'role', None)
427
441
  content = getattr(msg, 'content', "")
428
442
  # Handle both string content and complex content structures
429
443
  if not isinstance(content, str):
@@ -436,9 +450,25 @@ class LangfuseTraceCollector:
436
450
  content = str(content)
437
451
  else:
438
452
  content = str(content)
453
+
454
+ # Capture all additional fields from object messages
455
+ msg_data = {
456
+ "role": role,
457
+ "content": content,
458
+ "tool_calls": getattr(msg, 'tool_calls', None),
459
+ "tool_call_id": getattr(msg, 'tool_call_id', None),
460
+ "name": getattr(msg, 'name', None),
461
+ "function_call": getattr(msg, 'function_call', None),
462
+ "timestamp": getattr(msg, 'timestamp', datetime.now().isoformat())
463
+ }
439
464
  else:
465
+ # Handle messages that don't have expected structure
466
+ print(f"[LANGFUSE DEBUG] Skipping message with unexpected structure: {type(msg)}")
440
467
  continue
441
468
 
469
+ # Clean up None values from msg_data
470
+ msg_data = {k: v for k, v in msg_data.items() if v is not None}
471
+
442
472
  # If we haven't found the current user message yet and this is a user message
443
473
  if not current_user_message_found and (role == "user" or role == 'user'):
444
474
  user_query = content
@@ -446,15 +476,29 @@ class LangfuseTraceCollector:
446
476
  print(f"[LANGFUSE DEBUG] Found current user query: {user_query}")
447
477
  elif current_user_message_found:
448
478
  # Add to conversation history (excluding the current user message)
449
- conversation_history.insert(0, {
450
- "role": role,
451
- "content": content,
452
- "timestamp": datetime.now().isoformat() if not hasattr(msg, 'timestamp') else getattr(msg, 'timestamp', datetime.now().isoformat())
453
- })
479
+ # Include ALL message types: assistant, tool, system, function, etc.
480
+ conversation_history.insert(0, msg_data)
481
+ print(f"[LANGFUSE DEBUG] Added to conversation history: role={role}, content_length={len(str(content))}, has_tool_calls={bool(msg_data.get('tool_calls'))}")
454
482
 
455
483
  print(f"[LANGFUSE DEBUG] Final extracted - user_query: {user_query}, user_id: {user_id}")
456
484
  print(f"[LANGFUSE DEBUG] Conversation history length: {len(conversation_history)}")
457
485
 
486
+ # Debug: Log the roles and types captured in conversation history
487
+ if conversation_history:
488
+ roles_summary = {}
489
+ for msg in conversation_history:
490
+ role = msg.get("role", "unknown")
491
+ roles_summary[role] = roles_summary.get(role, 0) + 1
492
+ print(f"[LANGFUSE DEBUG] Conversation history roles breakdown: {roles_summary}")
493
+
494
+ # Log first few messages for verification
495
+ for i, msg in enumerate(conversation_history[:3]):
496
+ role = msg.get("role", "unknown")
497
+ content_preview = str(msg.get("content", ""))[:100]
498
+ has_tool_calls = bool(msg.get("tool_calls"))
499
+ has_tool_call_id = bool(msg.get("tool_call_id"))
500
+ print(f"[LANGFUSE DEBUG] History msg {i}: role={role}, content='{content_preview}...', tool_calls={has_tool_calls}, tool_call_id={has_tool_call_id}")
501
+
458
502
  # Create comprehensive input data for the trace
459
503
  trace_input = {
460
504
  "user_query": user_query,
@@ -495,6 +539,7 @@ class LangfuseTraceCollector:
495
539
  print(f"[LANGFUSE] Ending trace for run: {trace_id}")
496
540
 
497
541
  # Update the trace metadata with final tool calls and results
542
+ conversation_history = getattr(self.trace_spans[trace_id], '_conversation_history', [])
498
543
  final_metadata = {
499
544
  "framework": "jaf",
500
545
  "event_type": "run_end",
@@ -502,7 +547,7 @@ class LangfuseTraceCollector:
502
547
  "user_query": getattr(self.trace_spans[trace_id], '_user_query', None),
503
548
  "user_id": getattr(self.trace_spans[trace_id], '_user_id', None),
504
549
  "agent_name": event.data.get("agent_name", "analytics_agent_jaf"),
505
- "conversation_history": getattr(self.trace_spans[trace_id], '_conversation_history', []),
550
+ "conversation_history": conversation_history,
506
551
  "tool_calls": self.trace_tool_calls.get(trace_id, []),
507
552
  "tool_results": self.trace_tool_results.get(trace_id, [])
508
553
  }
@@ -608,28 +653,36 @@ class LangfuseTraceCollector:
608
653
  # Start a span for tool calls with detailed input information
609
654
  tool_name = event.data.get('tool_name', 'unknown')
610
655
  tool_args = event.data.get("args", {})
656
+ call_id = event.data.get("call_id")
657
+ if not call_id:
658
+ call_id = f"{tool_name}-{uuid.uuid4().hex[:8]}"
659
+ try:
660
+ event.data["call_id"] = call_id
661
+ except TypeError:
662
+ # event.data may be immutable; log and rely on synthetic ID tracking downstream
663
+ print(f"[LANGFUSE] Generated synthetic call_id for tool start: {call_id}")
611
664
 
612
- print(f"[LANGFUSE] Starting span for tool call: {tool_name}")
665
+ print(f"[LANGFUSE] Starting span for tool call: {tool_name} ({call_id})")
613
666
 
614
667
  # Track this tool call for the trace
615
668
  tool_call_data = {
616
669
  "tool_name": tool_name,
617
670
  "arguments": tool_args,
618
- "call_id": event.data.get("call_id"),
671
+ "call_id": call_id,
619
672
  "timestamp": datetime.now().isoformat()
620
673
  }
621
674
 
622
675
  # Ensure trace_id exists in tracking
623
676
  if trace_id not in self.trace_tool_calls:
624
677
  self.trace_tool_calls[trace_id] = []
625
-
678
+
626
679
  self.trace_tool_calls[trace_id].append(tool_call_data)
627
680
 
628
681
  # Create comprehensive input data for the tool call
629
682
  tool_input = {
630
683
  "tool_name": tool_name,
631
684
  "arguments": tool_args,
632
- "call_id": event.data.get("call_id"),
685
+ "call_id": call_id,
633
686
  "timestamp": datetime.now().isoformat()
634
687
  }
635
688
 
@@ -638,7 +691,7 @@ class LangfuseTraceCollector:
638
691
  input=tool_input,
639
692
  metadata={
640
693
  "tool_name": tool_name,
641
- "call_id": event.data.get("call_id"),
694
+ "call_id": call_id,
642
695
  "framework": "jaf",
643
696
  "event_type": "tool_call"
644
697
  }
@@ -652,14 +705,15 @@ class LangfuseTraceCollector:
652
705
  if span_id in self.active_spans:
653
706
  tool_name = event.data.get('tool_name', 'unknown')
654
707
  tool_result = event.data.get("result")
708
+ call_id = event.data.get("call_id")
655
709
 
656
- print(f"[LANGFUSE] Ending span for tool call: {tool_name}")
710
+ print(f"[LANGFUSE] Ending span for tool call: {tool_name} ({call_id})")
657
711
 
658
712
  # Track this tool result for the trace
659
713
  tool_result_data = {
660
714
  "tool_name": tool_name,
661
715
  "result": tool_result,
662
- "call_id": event.data.get("call_id"),
716
+ "call_id": call_id,
663
717
  "timestamp": datetime.now().isoformat(),
664
718
  "status": event.data.get("status", "completed"),
665
719
  "tool_result": event.data.get("tool_result")
@@ -674,7 +728,7 @@ class LangfuseTraceCollector:
674
728
  tool_output = {
675
729
  "tool_name": tool_name,
676
730
  "result": tool_result,
677
- "call_id": event.data.get("call_id"),
731
+ "call_id": call_id,
678
732
  "timestamp": datetime.now().isoformat(),
679
733
  "status": event.data.get("status", "completed")
680
734
  }
@@ -685,7 +739,7 @@ class LangfuseTraceCollector:
685
739
  output=tool_output,
686
740
  metadata={
687
741
  "tool_name": tool_name,
688
- "call_id": event.data.get("call_id"),
742
+ "call_id": call_id,
689
743
  "result_length": len(str(tool_result)) if tool_result else 0,
690
744
  "framework": "jaf",
691
745
  "event_type": "tool_call_end"
@@ -747,6 +801,9 @@ class LangfuseTraceCollector:
747
801
 
748
802
  # Use consistent identifiers that don't depend on timestamp
749
803
  if event.type.startswith('tool_call'):
804
+ call_id = event.data.get('call_id') or event.data.get('tool_call_id')
805
+ if call_id:
806
+ return f"tool-{trace_id}-{call_id}"
750
807
  tool_name = event.data.get('tool_name') or event.data.get('toolName', 'unknown')
751
808
  return f"tool-{tool_name}-{trace_id}"
752
809
  elif event.type.startswith('llm_call'):
jaf/core/types.py CHANGED
@@ -490,6 +490,7 @@ class RunStartEventData:
490
490
  session_id: Optional[str] = None
491
491
  context: Optional[Any] = None
492
492
  messages: Optional[List[Message]] = None
493
+ agent_name: Optional[str] = None
493
494
 
494
495
  @dataclass(frozen=True)
495
496
  class RunStartEvent:
@@ -540,11 +541,12 @@ class ToolCallStartEventData:
540
541
  args: Any
541
542
  trace_id: TraceId
542
543
  run_id: RunId
544
+ call_id: Optional[str] = None
543
545
 
544
546
  @dataclass(frozen=True)
545
547
  class ToolCallStartEvent:
546
548
  type: Literal['tool_call_start'] = 'tool_call_start'
547
- data: ToolCallStartEventData = field(default_factory=lambda: ToolCallStartEventData("", None, TraceId(""), RunId("")))
549
+ data: ToolCallStartEventData = field(default_factory=lambda: ToolCallStartEventData("", None, TraceId(""), RunId(""), None))
548
550
 
549
551
  @dataclass(frozen=True)
550
552
  class ToolCallEndEventData:
@@ -555,11 +557,12 @@ class ToolCallEndEventData:
555
557
  run_id: RunId
556
558
  tool_result: Optional[Any] = None
557
559
  status: Optional[str] = None
560
+ call_id: Optional[str] = None
558
561
 
559
562
  @dataclass(frozen=True)
560
563
  class ToolCallEndEvent:
561
564
  type: Literal['tool_call_end'] = 'tool_call_end'
562
- data: ToolCallEndEventData = field(default_factory=lambda: ToolCallEndEventData("", "", TraceId(""), RunId("")))
565
+ data: ToolCallEndEventData = field(default_factory=lambda: ToolCallEndEventData("", "", TraceId(""), RunId(""), None, None))
563
566
 
564
567
  @dataclass(frozen=True)
565
568
  class HandoffEventData:
@@ -709,5 +712,5 @@ class RunConfig(Generic[Ctx]):
709
712
  on_event: Optional[Callable[[TraceEvent], None]] = None
710
713
  memory: Optional['MemoryConfig'] = None
711
714
  conversation_id: Optional[str] = None
712
- default_tool_timeout: Optional[float] = 30.0 # Default timeout for tool execution in seconds
713
- approval_storage: Optional['ApprovalStorage'] = None # Storage for approval decisions
715
+ default_tool_timeout: Optional[float] = 300.0 # Default timeout for tool execution in seconds
716
+ approval_storage: Optional['ApprovalStorage'] = None # Storage for approval decisions
jaf/server/server.py CHANGED
@@ -533,12 +533,12 @@ def create_jaf_server(config: ServerConfig[Ctx]) -> FastAPI:
533
533
  async def event_stream():
534
534
  try:
535
535
  # Send initial metadata
536
- yield f"event: stream_start\ndata: {json.dumps({
536
+ yield f"""event: stream_start data: {json.dumps({
537
537
  'runId': str(initial_state.run_id),
538
538
  'traceId': str(initial_state.trace_id),
539
539
  'conversationId': conversation_id,
540
540
  'agent': request.agent_name
541
- })}\n\n"
541
+ })}"""
542
542
 
543
543
  # Stream events from the engine
544
544
  async for event in run_streaming(initial_state, run_config_with_memory):
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: jaf-py
3
- Version: 2.4.3
3
+ Version: 2.4.5
4
4
  Summary: A purely functional agent framework with immutable state and composable tools - Python implementation
5
5
  Author: JAF Contributors
6
6
  Maintainer: JAF Contributors
@@ -38,21 +38,22 @@ jaf/a2a/tests/test_client.py,sha256=L5h7DtQRVlULiRhRLtrmaCoYdvmbXsgLTy3QQ6KgmNM,
38
38
  jaf/a2a/tests/test_integration.py,sha256=I7LdgwN99mAOljM9kYtK7dGMMntTSWKMw_oLOcJjinU,18454
39
39
  jaf/a2a/tests/test_protocol.py,sha256=He3vGlBfIazpppAnuSybutrvjIN3VGxEleAohrVd9hc,23287
40
40
  jaf/a2a/tests/test_types.py,sha256=PgRjDVJrHSXuu05z0B5lsSUUY5qEdQLFJbLBIExyVgI,18384
41
- jaf/core/__init__.py,sha256=rBvP_7TGbJICDJnA7a3qyX8yQErCDWaGAn5WzpyH4gU,1339
41
+ jaf/core/__init__.py,sha256=PIGKm8n6OQ8jcXRS0Hn3_Zsl8m2qX91N80YJoLCJ4eU,1762
42
42
  jaf/core/agent_tool.py,sha256=tfLNaTIcOZ0dR9GBP1AHLPkLExm_dLbURnVIN4R84FQ,11806
43
43
  jaf/core/analytics.py,sha256=zFHIWqWal0bbEFCmJDc4DKeM0Ja7b_D19PqVaBI12pA,23338
44
44
  jaf/core/composition.py,sha256=IVxRO1Q9nK7JRH32qQ4p8WMIUu66BhqPNrlTNMGFVwE,26317
45
- jaf/core/engine.py,sha256=uijM1N-W-OG1oelMtV4m-o2cdrBziLU75jn8JOZBaWA,50631
45
+ jaf/core/engine.py,sha256=bjHNn8MoE3o0BAuBCMBY3EjtpMckjWlBotJ-oinfSZ0,52111
46
46
  jaf/core/errors.py,sha256=5fwTNhkojKRQ4wZj3lZlgDnAsrYyjYOwXJkIr5EGNUc,5539
47
+ jaf/core/parallel_agents.py,sha256=ahwYoTnkrF4xQgV-hjc5sUaWhQWQFENMZG5riNa_Ieg,12165
47
48
  jaf/core/performance.py,sha256=jedQmTEkrKMD6_Aw1h8PdG-5TsdYSFFT7Or6k5dmN2g,9974
48
49
  jaf/core/proxy.py,sha256=_WM3cpRlSQLYpgSBrnY30UPMe2iZtlqDQ65kppE-WY0,4609
49
50
  jaf/core/proxy_helpers.py,sha256=i7a5fAX9rLmO4FMBX51-yRkTFwfWedzQNgnLmeLUd_A,4370
50
51
  jaf/core/state.py,sha256=NMtYTpUYa64m1Kte6lD8LGnF2bl69HAcdgXH6f-M97c,5650
51
- jaf/core/streaming.py,sha256=c5o9iqpjoYV2LrUpG6qLWCYrWcP-DCcZsvMbyqKunp8,16089
52
+ jaf/core/streaming.py,sha256=h_lYHQA9ee_D5QsDO9-Vhevgi7rFXPslPzd9605AJGo,17034
52
53
  jaf/core/tool_results.py,sha256=-bTOqOX02lMyslp5Z4Dmuhx0cLd5o7kgR88qK2HO_sw,11323
53
54
  jaf/core/tools.py,sha256=84N9A7QQ3xxcOs2eUUot3nmCnt5i7iZT9VwkuzuFBxQ,16274
54
- jaf/core/tracing.py,sha256=pOswPTvhIYXj88kxMSJDySfbcnROU3eB4dooVl1e_18,36604
55
- jaf/core/types.py,sha256=_e7F368BwzdZ2BZuakSp5Bw7gzi8xYD0LlOIy2S25Ds,23061
55
+ jaf/core/tracing.py,sha256=iuVgykFUSkoBjem1k6jdVLrhRZzJn-avyxc_6W9BXPI,40159
56
+ jaf/core/types.py,sha256=8TA5cyNAc_rp5Tn-zmvt6rMi_0atRXbewpaiB5Ss7-g,23186
56
57
  jaf/core/workflows.py,sha256=Ul-82gzjIXtkhnSMSPv-8igikjkMtW1EBo9yrfodtvI,26294
57
58
  jaf/memory/__init__.py,sha256=-L98xlvihurGAzF0DnXtkueDVvO_wV2XxxEwAWdAj50,1400
58
59
  jaf/memory/approval_storage.py,sha256=HHZ_b57kIthdR53QE5XNSII9xy1Cg-1cFUCSAZ8A4Rk,11083
@@ -73,7 +74,7 @@ jaf/providers/mcp.py,sha256=WxcC8gUFpDBBYyhorMcc1jHq3xMDMBtnwyRPthfL0S0,13074
73
74
  jaf/providers/model.py,sha256=NJTa-1k0EruDdLf2HS1ZdDpFJhHXzzfQyXAbJx9kZVc,25468
74
75
  jaf/server/__init__.py,sha256=fMPnLZBRm6t3yQrr7-PnoHAQ8qj9o6Z1AJLM1M6bIS0,392
75
76
  jaf/server/main.py,sha256=CTb0ywbPIq9ELfay5MKChVR7BpIQOoEbPjPfpzo2aBQ,2152
76
- jaf/server/server.py,sha256=LooAxxdeitwZHJff-UTE3L7zM6pSNqixMipOgspL5SA,38787
77
+ jaf/server/server.py,sha256=K8XKNyadP_YqkCRSK9jCVZh52d2_IbHp_jHkKzBeB9Q,38786
77
78
  jaf/server/types.py,sha256=Gg8z1bkA7IYg94lp31iB92-4VkJr9syKA41uVCfNZBc,10544
78
79
  jaf/utils/__init__.py,sha256=4Lte7HPIpmEuGvWd9lSH9gljV11wy-yNFjECPhcejAY,1236
79
80
  jaf/utils/attachments.py,sha256=9xNzzQanCwJnBR1L6P79YQtbuRupiDluDn46SYUlHok,13542
@@ -84,9 +85,9 @@ jaf/visualization/functional_core.py,sha256=zedMDZbvjuOugWwnh6SJ2stvRNQX1Hlkb9Ab
84
85
  jaf/visualization/graphviz.py,sha256=WTOM6UP72-lVKwI4_SAr5-GCC3ouckxHv88ypCDQWJ0,12056
85
86
  jaf/visualization/imperative_shell.py,sha256=GpMrAlMnLo2IQgyB2nardCz09vMvAzaYI46MyrvJ0i4,2593
86
87
  jaf/visualization/types.py,sha256=QQcbVeQJLuAOXk8ynd08DXIS-PVCnv3R-XVE9iAcglw,1389
87
- jaf_py-2.4.3.dist-info/licenses/LICENSE,sha256=LXUQBJxdyr-7C4bk9cQBwvsF_xwA-UVstDTKabpcjlI,1063
88
- jaf_py-2.4.3.dist-info/METADATA,sha256=b3kDMPBoPR_Z44jyVnZHnpHM-bSDTkuXeSegWXsUSHw,27712
89
- jaf_py-2.4.3.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
90
- jaf_py-2.4.3.dist-info/entry_points.txt,sha256=OtIJeNJpb24kgGrqRx9szGgDx1vL9ayq8uHErmu7U5w,41
91
- jaf_py-2.4.3.dist-info/top_level.txt,sha256=Xu1RZbGaM4_yQX7bpalo881hg7N_dybaOW282F15ruE,4
92
- jaf_py-2.4.3.dist-info/RECORD,,
88
+ jaf_py-2.4.5.dist-info/licenses/LICENSE,sha256=LXUQBJxdyr-7C4bk9cQBwvsF_xwA-UVstDTKabpcjlI,1063
89
+ jaf_py-2.4.5.dist-info/METADATA,sha256=9oUGQOOBTFoMdCVPfnyC9ucxrBXWzvporO5hDuDXkjA,27712
90
+ jaf_py-2.4.5.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
91
+ jaf_py-2.4.5.dist-info/entry_points.txt,sha256=OtIJeNJpb24kgGrqRx9szGgDx1vL9ayq8uHErmu7U5w,41
92
+ jaf_py-2.4.5.dist-info/top_level.txt,sha256=Xu1RZbGaM4_yQX7bpalo881hg7N_dybaOW282F15ruE,4
93
+ jaf_py-2.4.5.dist-info/RECORD,,
File without changes