jaf-py 2.5.10__py3-none-any.whl → 2.5.11__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (92) hide show
  1. jaf/__init__.py +154 -57
  2. jaf/a2a/__init__.py +42 -21
  3. jaf/a2a/agent.py +79 -126
  4. jaf/a2a/agent_card.py +87 -78
  5. jaf/a2a/client.py +30 -66
  6. jaf/a2a/examples/client_example.py +12 -12
  7. jaf/a2a/examples/integration_example.py +38 -47
  8. jaf/a2a/examples/server_example.py +56 -53
  9. jaf/a2a/memory/__init__.py +0 -4
  10. jaf/a2a/memory/cleanup.py +28 -21
  11. jaf/a2a/memory/factory.py +155 -133
  12. jaf/a2a/memory/providers/composite.py +21 -26
  13. jaf/a2a/memory/providers/in_memory.py +89 -83
  14. jaf/a2a/memory/providers/postgres.py +117 -115
  15. jaf/a2a/memory/providers/redis.py +128 -121
  16. jaf/a2a/memory/serialization.py +77 -87
  17. jaf/a2a/memory/tests/run_comprehensive_tests.py +112 -83
  18. jaf/a2a/memory/tests/test_cleanup.py +211 -94
  19. jaf/a2a/memory/tests/test_serialization.py +73 -68
  20. jaf/a2a/memory/tests/test_stress_concurrency.py +186 -133
  21. jaf/a2a/memory/tests/test_task_lifecycle.py +138 -120
  22. jaf/a2a/memory/types.py +91 -53
  23. jaf/a2a/protocol.py +95 -125
  24. jaf/a2a/server.py +90 -118
  25. jaf/a2a/standalone_client.py +30 -43
  26. jaf/a2a/tests/__init__.py +16 -33
  27. jaf/a2a/tests/run_tests.py +17 -53
  28. jaf/a2a/tests/test_agent.py +40 -140
  29. jaf/a2a/tests/test_client.py +54 -117
  30. jaf/a2a/tests/test_integration.py +28 -82
  31. jaf/a2a/tests/test_protocol.py +54 -139
  32. jaf/a2a/tests/test_types.py +50 -136
  33. jaf/a2a/types.py +58 -34
  34. jaf/cli.py +21 -41
  35. jaf/core/__init__.py +7 -1
  36. jaf/core/agent_tool.py +93 -72
  37. jaf/core/analytics.py +257 -207
  38. jaf/core/checkpoint.py +223 -0
  39. jaf/core/composition.py +249 -235
  40. jaf/core/engine.py +817 -519
  41. jaf/core/errors.py +55 -42
  42. jaf/core/guardrails.py +276 -202
  43. jaf/core/handoff.py +47 -31
  44. jaf/core/parallel_agents.py +69 -75
  45. jaf/core/performance.py +75 -73
  46. jaf/core/proxy.py +43 -44
  47. jaf/core/proxy_helpers.py +24 -27
  48. jaf/core/regeneration.py +220 -129
  49. jaf/core/state.py +68 -66
  50. jaf/core/streaming.py +115 -108
  51. jaf/core/tool_results.py +111 -101
  52. jaf/core/tools.py +114 -116
  53. jaf/core/tracing.py +269 -210
  54. jaf/core/types.py +371 -151
  55. jaf/core/workflows.py +209 -168
  56. jaf/exceptions.py +46 -38
  57. jaf/memory/__init__.py +1 -6
  58. jaf/memory/approval_storage.py +54 -77
  59. jaf/memory/factory.py +4 -4
  60. jaf/memory/providers/in_memory.py +216 -180
  61. jaf/memory/providers/postgres.py +216 -146
  62. jaf/memory/providers/redis.py +173 -116
  63. jaf/memory/types.py +70 -51
  64. jaf/memory/utils.py +36 -34
  65. jaf/plugins/__init__.py +12 -12
  66. jaf/plugins/base.py +105 -96
  67. jaf/policies/__init__.py +0 -1
  68. jaf/policies/handoff.py +37 -46
  69. jaf/policies/validation.py +76 -52
  70. jaf/providers/__init__.py +6 -3
  71. jaf/providers/mcp.py +97 -51
  72. jaf/providers/model.py +360 -279
  73. jaf/server/__init__.py +1 -1
  74. jaf/server/main.py +7 -11
  75. jaf/server/server.py +514 -359
  76. jaf/server/types.py +208 -52
  77. jaf/utils/__init__.py +17 -18
  78. jaf/utils/attachments.py +111 -116
  79. jaf/utils/document_processor.py +175 -174
  80. jaf/visualization/__init__.py +1 -1
  81. jaf/visualization/example.py +111 -110
  82. jaf/visualization/functional_core.py +46 -71
  83. jaf/visualization/graphviz.py +154 -189
  84. jaf/visualization/imperative_shell.py +7 -16
  85. jaf/visualization/types.py +8 -4
  86. {jaf_py-2.5.10.dist-info → jaf_py-2.5.11.dist-info}/METADATA +2 -2
  87. jaf_py-2.5.11.dist-info/RECORD +97 -0
  88. jaf_py-2.5.10.dist-info/RECORD +0 -96
  89. {jaf_py-2.5.10.dist-info → jaf_py-2.5.11.dist-info}/WHEEL +0 -0
  90. {jaf_py-2.5.10.dist-info → jaf_py-2.5.11.dist-info}/entry_points.txt +0 -0
  91. {jaf_py-2.5.10.dist-info → jaf_py-2.5.11.dist-info}/licenses/LICENSE +0 -0
  92. {jaf_py-2.5.10.dist-info → jaf_py-2.5.11.dist-info}/top_level.txt +0 -0
jaf/core/handoff.py CHANGED
@@ -17,30 +17,36 @@ except ImportError:
17
17
  BaseModel = None
18
18
  Field = None
19
19
 
20
- Ctx = TypeVar('Ctx')
20
+ Ctx = TypeVar("Ctx")
21
21
 
22
22
 
23
23
  def _create_handoff_json(agent_name: str, message: str = "") -> str:
24
24
  """Create the JSON structure for handoff requests."""
25
- return json.dumps({
26
- "handoff_to": agent_name,
27
- "message": message or f"Handing off to {agent_name}",
28
- "type": "handoff"
29
- })
25
+ return json.dumps(
26
+ {
27
+ "handoff_to": agent_name,
28
+ "message": message or f"Handing off to {agent_name}",
29
+ "type": "handoff",
30
+ }
31
+ )
30
32
 
31
33
 
32
34
  if BaseModel is not None and Field is not None:
35
+
33
36
  class _HandoffInput(BaseModel):
34
37
  """Input parameters for handoff tool (Pydantic model)."""
38
+
35
39
  agent_name: str = Field(description="Name of the agent to hand off to")
36
40
  message: str = Field(description="Message or context to pass to the target agent")
37
41
  else:
42
+
38
43
  class _HandoffInput(object):
39
44
  """Plain-Python fallback for handoff input when Pydantic is unavailable.
40
45
 
41
46
  This class intentionally does not call Field() so it is safe to import
42
47
  when Pydantic is not installed.
43
48
  """
49
+
44
50
  agent_name: str
45
51
  message: str
46
52
 
@@ -48,12 +54,14 @@ else:
48
54
  self.agent_name = agent_name
49
55
  self.message = message
50
56
 
57
+
51
58
  HandoffInput = _HandoffInput
52
59
 
53
60
 
54
61
  @dataclass
55
62
  class HandoffResult:
56
63
  """Result of a handoff operation."""
64
+
57
65
  target_agent: str
58
66
  message: str
59
67
  success: bool = True
@@ -74,20 +82,20 @@ class HandoffTool:
74
82
  "properties": {
75
83
  "agent_name": {
76
84
  "type": "string",
77
- "description": "Name of the agent to hand off to"
85
+ "description": "Name of the agent to hand off to",
78
86
  },
79
87
  "message": {
80
88
  "type": "string",
81
- "description": "Message or context to pass to the target agent"
82
- }
89
+ "description": "Message or context to pass to the target agent",
90
+ },
83
91
  },
84
- "required": ["agent_name", "message"]
92
+ "required": ["agent_name", "message"],
85
93
  }
86
94
 
87
95
  self.schema = ToolSchema(
88
96
  name="handoff",
89
97
  description="Hand off the conversation to another agent",
90
- parameters=parameters_model
98
+ parameters=parameters_model,
91
99
  )
92
100
  self.source = ToolSource.NATIVE
93
101
  self.metadata = {"type": "handoff", "system": True}
@@ -101,35 +109,41 @@ class HandoffTool:
101
109
  context (Any): Context containing current agent and run state information.
102
110
  """
103
111
  # Extract arguments
104
- if hasattr(args, 'agent_name'):
112
+ if hasattr(args, "agent_name"):
105
113
  agent_name = args.agent_name
106
114
  message = args.message
107
115
  elif isinstance(args, dict):
108
- agent_name = args.get('agent_name', '')
109
- message = args.get('message', '')
116
+ agent_name = args.get("agent_name", "")
117
+ message = args.get("message", "")
110
118
  else:
111
- return json.dumps({
112
- "error": "invalid_handoff_args",
113
- "message": "Invalid handoff arguments provided",
114
- "usage": "handoff(agent_name='target_agent', message='optional context')"
115
- })
119
+ return json.dumps(
120
+ {
121
+ "error": "invalid_handoff_args",
122
+ "message": "Invalid handoff arguments provided",
123
+ "usage": "handoff(agent_name='target_agent', message='optional context')",
124
+ }
125
+ )
116
126
 
117
127
  if not agent_name:
118
- return json.dumps({
119
- "error": "missing_agent_name",
120
- "message": "Agent name is required for handoff",
121
- "usage": "handoff(agent_name='target_agent', message='optional context')"
122
- })
128
+ return json.dumps(
129
+ {
130
+ "error": "missing_agent_name",
131
+ "message": "Agent name is required for handoff",
132
+ "usage": "handoff(agent_name='target_agent', message='optional context')",
133
+ }
134
+ )
123
135
 
124
136
  # Add agent validation if we have access to current agent info
125
- if context and hasattr(context, 'current_agent'):
137
+ if context and hasattr(context, "current_agent"):
126
138
  current_agent = context.current_agent
127
139
  if current_agent.handoffs and agent_name not in current_agent.handoffs:
128
- return json.dumps({
129
- "error": "handoff_not_allowed",
130
- "message": f"Agent {current_agent.name} cannot handoff to {agent_name}",
131
- "allowed_handoffs": current_agent.handoffs
132
- })
140
+ return json.dumps(
141
+ {
142
+ "error": "handoff_not_allowed",
143
+ "message": f"Agent {current_agent.name} cannot handoff to {agent_name}",
144
+ "allowed_handoffs": current_agent.handoffs,
145
+ }
146
+ )
133
147
 
134
148
  # Return the special handoff JSON that the engine recognizes
135
149
  return _create_handoff_json(agent_name, message)
@@ -139,8 +153,10 @@ def create_handoff_tool() -> Tool:
139
153
  """Create a handoff tool that can be added to any agent."""
140
154
  return HandoffTool()
141
155
 
156
+
142
157
  handoff_tool = create_handoff_tool()
143
158
 
159
+
144
160
  def handoff(agent_name: str, message: str = "") -> str:
145
161
  """
146
162
  Simple function to perform a handoff (for use in agent tools).
@@ -188,4 +204,4 @@ def extract_handoff_target(result: str) -> Optional[str]:
188
204
  return parsed["handoff_to"]
189
205
  except (json.JSONDecodeError, TypeError):
190
206
  pass
191
- return None
207
+ return None
@@ -25,13 +25,14 @@ from .types import (
25
25
  )
26
26
  from .agent_tool import create_agent_tool, AgentToolInput
27
27
 
28
- Ctx = TypeVar('Ctx')
29
- Out = TypeVar('Out')
28
+ Ctx = TypeVar("Ctx")
29
+ Out = TypeVar("Out")
30
30
 
31
31
 
32
32
  @dataclass
33
33
  class ParallelAgentGroup:
34
34
  """Configuration for a group of agents to be executed in parallel."""
35
+
35
36
  name: str
36
37
  agents: List[Agent[Ctx, Out]]
37
38
  shared_input: bool = True # Whether all agents receive the same input
@@ -44,6 +45,7 @@ class ParallelAgentGroup:
44
45
  @dataclass
45
46
  class ParallelExecutionConfig:
46
47
  """Configuration for parallel agent execution."""
48
+
47
49
  groups: List[ParallelAgentGroup]
48
50
  inter_group_execution: str = "sequential" # "sequential" or "parallel"
49
51
  global_timeout: Optional[float] = None
@@ -52,66 +54,67 @@ class ParallelExecutionConfig:
52
54
 
53
55
  class ParallelAgentsTool:
54
56
  """Tool that executes multiple agent groups in parallel."""
55
-
57
+
56
58
  def __init__(
57
59
  self,
58
60
  config: ParallelExecutionConfig,
59
61
  tool_name: str = "execute_parallel_agents",
60
- tool_description: str = "Execute multiple agents in parallel groups"
62
+ tool_description: str = "Execute multiple agents in parallel groups",
61
63
  ):
62
64
  self.config = config
63
65
  self.tool_name = tool_name
64
66
  self.tool_description = tool_description
65
-
67
+
66
68
  # Create tool schema
67
69
  self.schema = ToolSchema(
68
70
  name=tool_name,
69
71
  description=tool_description,
70
72
  parameters=AgentToolInput,
71
- timeout=config.global_timeout
73
+ timeout=config.global_timeout,
72
74
  )
73
75
  self.source = ToolSource.NATIVE
74
76
  self.metadata = {"source": "parallel_agents", "groups": len(config.groups)}
75
-
77
+
76
78
  async def execute(self, args: AgentToolInput, context: Ctx) -> str:
77
79
  """Execute all configured agent groups."""
78
80
  try:
79
81
  if self.config.inter_group_execution == "parallel":
80
82
  # Execute all groups in parallel
81
- group_results = await asyncio.gather(*[
82
- self._execute_group(group, args.input, context)
83
- for group in self.config.groups
84
- ])
83
+ group_results = await asyncio.gather(
84
+ *[
85
+ self._execute_group(group, args.input, context)
86
+ for group in self.config.groups
87
+ ]
88
+ )
85
89
  else:
86
90
  # Execute groups sequentially
87
91
  group_results = []
88
92
  for group in self.config.groups:
89
93
  result = await self._execute_group(group, args.input, context)
90
94
  group_results.append(result)
91
-
95
+
92
96
  # Combine results from all groups
93
97
  final_result = {
94
98
  "parallel_execution_results": {
95
99
  group.name: result for group, result in zip(self.config.groups, group_results)
96
100
  },
97
101
  "execution_mode": self.config.inter_group_execution,
98
- "total_groups": len(self.config.groups)
102
+ "total_groups": len(self.config.groups),
99
103
  }
100
-
104
+
101
105
  return json.dumps(final_result, indent=2)
102
-
106
+
103
107
  except Exception as e:
104
- return json.dumps({
105
- "error": "parallel_execution_failed",
106
- "message": f"Failed to execute parallel agents: {str(e)}",
107
- "groups_attempted": len(self.config.groups)
108
- })
109
-
108
+ return json.dumps(
109
+ {
110
+ "error": "parallel_execution_failed",
111
+ "message": f"Failed to execute parallel agents: {str(e)}",
112
+ "groups_attempted": len(self.config.groups),
113
+ }
114
+ )
115
+
110
116
  async def _execute_group(
111
- self,
112
- group: ParallelAgentGroup,
113
- input_text: str,
114
- context: Ctx
117
+ self, group: ParallelAgentGroup, input_text: str, context: Ctx
115
118
  ) -> Dict[str, Any]:
116
119
  """Execute a single group of agents in parallel."""
117
120
  try:
@@ -123,10 +126,10 @@ class ParallelAgentsTool:
123
126
  tool_name=f"run_{agent.name.lower().replace(' ', '_')}",
124
127
  tool_description=f"Execute the {agent.name} agent",
125
128
  timeout=group.timeout,
126
- preserve_session=self.config.preserve_session
129
+ preserve_session=self.config.preserve_session,
127
130
  )
128
131
  agent_tools.append((agent.name, tool))
129
-
132
+
130
133
  # Execute all agents in the group in parallel
131
134
  if group.shared_input:
132
135
  # All agents get the same input
@@ -140,16 +143,15 @@ class ParallelAgentsTool:
140
143
  tool.execute(AgentToolInput(input=input_text), context)
141
144
  for _, tool in agent_tools
142
145
  ]
143
-
146
+
144
147
  # Execute with timeout if specified
145
148
  if group.timeout:
146
149
  results = await asyncio.wait_for(
147
- asyncio.gather(*tasks, return_exceptions=True),
148
- timeout=group.timeout
150
+ asyncio.gather(*tasks, return_exceptions=True), timeout=group.timeout
149
151
  )
150
152
  else:
151
153
  results = await asyncio.gather(*tasks, return_exceptions=True)
152
-
154
+
153
155
  # Process results
154
156
  agent_results = {}
155
157
  for (agent_name, _), result in zip(agent_tools, results):
@@ -157,61 +159,54 @@ class ParallelAgentsTool:
157
159
  agent_results[agent_name] = {
158
160
  "error": True,
159
161
  "message": str(result),
160
- "type": type(result).__name__
162
+ "type": type(result).__name__,
161
163
  }
162
164
  else:
163
- agent_results[agent_name] = {
164
- "success": True,
165
- "result": result
166
- }
167
-
165
+ agent_results[agent_name] = {"success": True, "result": result}
166
+
168
167
  # Apply result aggregation
169
168
  aggregated_result = self._aggregate_results(group, agent_results)
170
-
169
+
171
170
  return {
172
171
  "group_name": group.name,
173
172
  "agent_count": len(group.agents),
174
173
  "individual_results": agent_results,
175
174
  "aggregated_result": aggregated_result,
176
- "execution_time_ms": None # Could be added with timing
175
+ "execution_time_ms": None, # Could be added with timing
177
176
  }
178
-
177
+
179
178
  except asyncio.TimeoutError:
180
179
  return {
181
180
  "group_name": group.name,
182
181
  "error": "timeout",
183
182
  "message": f"Group {group.name} execution timed out after {group.timeout} seconds",
184
- "agent_count": len(group.agents)
183
+ "agent_count": len(group.agents),
185
184
  }
186
185
  except Exception as e:
187
186
  return {
188
187
  "group_name": group.name,
189
188
  "error": "execution_failed",
190
189
  "message": str(e),
191
- "agent_count": len(group.agents)
190
+ "agent_count": len(group.agents),
192
191
  }
193
-
192
+
194
193
  def _aggregate_results(
195
- self,
196
- group: ParallelAgentGroup,
197
- agent_results: Dict[str, Any]
194
+ self, group: ParallelAgentGroup, agent_results: Dict[str, Any]
198
195
  ) -> Union[str, Dict[str, Any]]:
199
196
  """Aggregate results from parallel agent execution."""
200
197
  successful_results = [
201
- result["result"] for result in agent_results.values()
198
+ result["result"]
199
+ for result in agent_results.values()
202
200
  if result.get("success") and "result" in result
203
201
  ]
204
-
202
+
205
203
  if not successful_results:
206
204
  return {"error": "no_successful_results", "message": "All agents failed"}
207
-
205
+
208
206
  if group.result_aggregation == "first":
209
207
  return successful_results[0]
210
208
  elif group.result_aggregation == "combine":
211
- return {
212
- "combined_results": successful_results,
213
- "result_count": len(successful_results)
214
- }
209
+ return {"combined_results": successful_results, "result_count": len(successful_results)}
215
210
  elif group.result_aggregation == "majority":
216
211
  # Simple majority logic - could be enhanced
217
212
  if len(successful_results) >= len(group.agents) // 2 + 1:
@@ -233,11 +228,11 @@ def create_parallel_agents_tool(
233
228
  tool_description: str = "Execute multiple agents in parallel groups",
234
229
  inter_group_execution: str = "sequential",
235
230
  global_timeout: Optional[float] = None,
236
- preserve_session: bool = False
231
+ preserve_session: bool = False,
237
232
  ) -> Tool:
238
233
  """
239
234
  Create a tool that executes multiple agent groups in parallel.
240
-
235
+
241
236
  Args:
242
237
  groups: List of parallel agent groups to execute
243
238
  tool_name: Name of the tool
@@ -245,7 +240,7 @@ def create_parallel_agents_tool(
245
240
  inter_group_execution: How to execute groups ("sequential" or "parallel")
246
241
  global_timeout: Global timeout for all executions
247
242
  preserve_session: Whether to preserve session across agent calls
248
-
243
+
249
244
  Returns:
250
245
  A Tool that can execute parallel agent groups
251
246
  """
@@ -253,9 +248,9 @@ def create_parallel_agents_tool(
253
248
  groups=groups,
254
249
  inter_group_execution=inter_group_execution,
255
250
  global_timeout=global_timeout,
256
- preserve_session=preserve_session
251
+ preserve_session=preserve_session,
257
252
  )
258
-
253
+
259
254
  return ParallelAgentsTool(config, tool_name, tool_description)
260
255
 
261
256
 
@@ -265,11 +260,11 @@ def create_simple_parallel_tool(
265
260
  tool_name: str = "execute_parallel_agents",
266
261
  shared_input: bool = True,
267
262
  result_aggregation: str = "combine",
268
- timeout: Optional[float] = None
263
+ timeout: Optional[float] = None,
269
264
  ) -> Tool:
270
265
  """
271
266
  Create a simple parallel agents tool from a list of agents.
272
-
267
+
273
268
  Args:
274
269
  agents: List of agents to execute in parallel
275
270
  group_name: Name for the parallel group
@@ -277,7 +272,7 @@ def create_simple_parallel_tool(
277
272
  shared_input: Whether all agents receive the same input
278
273
  result_aggregation: How to aggregate results ("combine", "first", "majority")
279
274
  timeout: Timeout for parallel execution
280
-
275
+
281
276
  Returns:
282
277
  A Tool that executes all agents in parallel
283
278
  """
@@ -286,18 +281,19 @@ def create_simple_parallel_tool(
286
281
  agents=agents,
287
282
  shared_input=shared_input,
288
283
  result_aggregation=result_aggregation,
289
- timeout=timeout
284
+ timeout=timeout,
290
285
  )
291
-
286
+
292
287
  return create_parallel_agents_tool([group], tool_name=tool_name)
293
288
 
294
289
 
295
290
  # Convenience functions for common parallel execution patterns
296
291
 
292
+
297
293
  def create_language_specialists_tool(
298
294
  language_agents: Dict[str, Agent],
299
295
  tool_name: str = "consult_language_specialists",
300
- timeout: Optional[float] = 300.0
296
+ timeout: Optional[float] = 300.0,
301
297
  ) -> Tool:
302
298
  """Create a tool that consults multiple language specialists in parallel."""
303
299
  group = ParallelAgentGroup(
@@ -306,13 +302,13 @@ def create_language_specialists_tool(
306
302
  shared_input=True,
307
303
  result_aggregation="combine",
308
304
  timeout=timeout,
309
- metadata={"languages": list(language_agents.keys())}
305
+ metadata={"languages": list(language_agents.keys())},
310
306
  )
311
-
307
+
312
308
  return create_parallel_agents_tool(
313
- [group],
309
+ [group],
314
310
  tool_name=tool_name,
315
- tool_description="Consult multiple language specialists in parallel"
311
+ tool_description="Consult multiple language specialists in parallel",
316
312
  )
317
313
 
318
314
 
@@ -320,7 +316,7 @@ def create_domain_experts_tool(
320
316
  expert_agents: Dict[str, Agent],
321
317
  tool_name: str = "consult_domain_experts",
322
318
  result_aggregation: str = "combine",
323
- timeout: Optional[float] = 60.0
319
+ timeout: Optional[float] = 60.0,
324
320
  ) -> Tool:
325
321
  """Create a tool that consults multiple domain experts in parallel."""
326
322
  group = ParallelAgentGroup(
@@ -329,11 +325,9 @@ def create_domain_experts_tool(
329
325
  shared_input=True,
330
326
  result_aggregation=result_aggregation,
331
327
  timeout=timeout,
332
- metadata={"domains": list(expert_agents.keys())}
328
+ metadata={"domains": list(expert_agents.keys())},
333
329
  )
334
-
330
+
335
331
  return create_parallel_agents_tool(
336
- [group],
337
- tool_name=tool_name,
338
- tool_description="Consult multiple domain experts in parallel"
339
- )
332
+ [group], tool_name=tool_name, tool_description="Consult multiple domain experts in parallel"
333
+ )