jaf-py 2.4.4__py3-none-any.whl → 2.4.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- jaf/core/__init__.py +14 -0
- jaf/core/engine.py +44 -11
- jaf/core/parallel_agents.py +339 -0
- jaf/core/streaming.py +42 -17
- jaf/core/tracing.py +22 -9
- jaf/core/types.py +6 -4
- jaf/server/server.py +2 -2
- {jaf_py-2.4.4.dist-info → jaf_py-2.4.5.dist-info}/METADATA +1 -1
- {jaf_py-2.4.4.dist-info → jaf_py-2.4.5.dist-info}/RECORD +13 -12
- {jaf_py-2.4.4.dist-info → jaf_py-2.4.5.dist-info}/WHEEL +0 -0
- {jaf_py-2.4.4.dist-info → jaf_py-2.4.5.dist-info}/entry_points.txt +0 -0
- {jaf_py-2.4.4.dist-info → jaf_py-2.4.5.dist-info}/licenses/LICENSE +0 -0
- {jaf_py-2.4.4.dist-info → jaf_py-2.4.5.dist-info}/top_level.txt +0 -0
jaf/core/__init__.py
CHANGED
|
@@ -13,6 +13,14 @@ from .agent_tool import (
|
|
|
13
13
|
get_current_run_config,
|
|
14
14
|
set_current_run_config,
|
|
15
15
|
)
|
|
16
|
+
from .parallel_agents import (
|
|
17
|
+
ParallelAgentGroup,
|
|
18
|
+
ParallelExecutionConfig,
|
|
19
|
+
create_parallel_agents_tool,
|
|
20
|
+
create_simple_parallel_tool,
|
|
21
|
+
create_language_specialists_tool,
|
|
22
|
+
create_domain_experts_tool,
|
|
23
|
+
)
|
|
16
24
|
from .proxy import ProxyConfig, ProxyAuth, create_proxy_config, get_default_proxy_config
|
|
17
25
|
|
|
18
26
|
__all__ = [
|
|
@@ -23,6 +31,8 @@ __all__ = [
|
|
|
23
31
|
"Message",
|
|
24
32
|
"ModelConfig",
|
|
25
33
|
"ModelProvider",
|
|
34
|
+
"ParallelAgentGroup",
|
|
35
|
+
"ParallelExecutionConfig",
|
|
26
36
|
"ProxyAuth",
|
|
27
37
|
"ProxyConfig",
|
|
28
38
|
"RunConfig",
|
|
@@ -41,9 +51,13 @@ __all__ = [
|
|
|
41
51
|
"create_agent_tool",
|
|
42
52
|
"create_conditional_enabler",
|
|
43
53
|
"create_default_output_extractor",
|
|
54
|
+
"create_domain_experts_tool",
|
|
44
55
|
"create_json_output_extractor",
|
|
56
|
+
"create_language_specialists_tool",
|
|
57
|
+
"create_parallel_agents_tool",
|
|
45
58
|
"create_proxy_config",
|
|
46
59
|
"create_run_id",
|
|
60
|
+
"create_simple_parallel_tool",
|
|
47
61
|
"create_trace_id",
|
|
48
62
|
"get_current_run_config",
|
|
49
63
|
"get_default_proxy_config",
|
jaf/core/engine.py
CHANGED
|
@@ -516,12 +516,15 @@ async def _run_internal(
|
|
|
516
516
|
if len(partial_tool_calls) > 0:
|
|
517
517
|
message_tool_calls = []
|
|
518
518
|
for i, tc in enumerate(partial_tool_calls):
|
|
519
|
+
arguments = tc["function"]["arguments"]
|
|
520
|
+
if isinstance(arguments, str):
|
|
521
|
+
arguments = _normalize_tool_call_arguments(arguments)
|
|
519
522
|
message_tool_calls.append({
|
|
520
523
|
"id": tc["id"] or f"call_{i}",
|
|
521
524
|
"type": "function",
|
|
522
525
|
"function": {
|
|
523
526
|
"name": tc["function"]["name"] or "",
|
|
524
|
-
"arguments":
|
|
527
|
+
"arguments": arguments
|
|
525
528
|
}
|
|
526
529
|
})
|
|
527
530
|
|
|
@@ -534,7 +537,7 @@ async def _run_internal(
|
|
|
534
537
|
type="function",
|
|
535
538
|
function=ToolCallFunction(
|
|
536
539
|
name=mc["function"]["name"],
|
|
537
|
-
arguments=mc["function"]["arguments"]
|
|
540
|
+
arguments=_normalize_tool_call_arguments(mc["function"]["arguments"])
|
|
538
541
|
),
|
|
539
542
|
) for mc in message_tool_calls
|
|
540
543
|
],
|
|
@@ -553,12 +556,15 @@ async def _run_internal(
|
|
|
553
556
|
if len(partial_tool_calls) > 0:
|
|
554
557
|
final_tool_calls = []
|
|
555
558
|
for i, tc in enumerate(partial_tool_calls):
|
|
559
|
+
arguments = tc["function"]["arguments"]
|
|
560
|
+
if isinstance(arguments, str):
|
|
561
|
+
arguments = _normalize_tool_call_arguments(arguments)
|
|
556
562
|
final_tool_calls.append({
|
|
557
563
|
"id": tc["id"] or f"call_{i}",
|
|
558
564
|
"type": "function",
|
|
559
565
|
"function": {
|
|
560
566
|
"name": tc["function"]["name"] or "",
|
|
561
|
-
"arguments":
|
|
567
|
+
"arguments": arguments
|
|
562
568
|
}
|
|
563
569
|
})
|
|
564
570
|
|
|
@@ -844,12 +850,33 @@ def _convert_tool_calls(tool_calls: Optional[List[Dict[str, Any]]]) -> Optional[
|
|
|
844
850
|
type='function',
|
|
845
851
|
function=ToolCallFunction(
|
|
846
852
|
name=tc['function']['name'],
|
|
847
|
-
arguments=tc['function']['arguments']
|
|
853
|
+
arguments=_normalize_tool_call_arguments(tc['function']['arguments'])
|
|
848
854
|
)
|
|
849
855
|
)
|
|
850
856
|
for tc in tool_calls
|
|
851
857
|
]
|
|
852
858
|
|
|
859
|
+
|
|
860
|
+
def _normalize_tool_call_arguments(arguments: Any) -> Any:
|
|
861
|
+
"""Strip trailing streaming artifacts so arguments remain valid JSON strings."""
|
|
862
|
+
if not arguments or not isinstance(arguments, str):
|
|
863
|
+
return arguments
|
|
864
|
+
|
|
865
|
+
decoder = json.JSONDecoder()
|
|
866
|
+
try:
|
|
867
|
+
obj, end = decoder.raw_decode(arguments)
|
|
868
|
+
except json.JSONDecodeError:
|
|
869
|
+
return arguments
|
|
870
|
+
|
|
871
|
+
remainder = arguments[end:].strip()
|
|
872
|
+
if remainder:
|
|
873
|
+
try:
|
|
874
|
+
return json.dumps(obj)
|
|
875
|
+
except (TypeError, ValueError):
|
|
876
|
+
return arguments
|
|
877
|
+
|
|
878
|
+
return arguments
|
|
879
|
+
|
|
853
880
|
async def _execute_tool_calls(
|
|
854
881
|
tool_calls: List[ToolCall],
|
|
855
882
|
agent: Agent[Ctx, Any],
|
|
@@ -865,7 +892,8 @@ async def _execute_tool_calls(
|
|
|
865
892
|
tool_name=tool_call.function.name,
|
|
866
893
|
args=_try_parse_json(tool_call.function.arguments),
|
|
867
894
|
trace_id=state.trace_id,
|
|
868
|
-
run_id=state.run_id
|
|
895
|
+
run_id=state.run_id,
|
|
896
|
+
call_id=tool_call.id
|
|
869
897
|
))))
|
|
870
898
|
|
|
871
899
|
try:
|
|
@@ -891,7 +919,8 @@ async def _execute_tool_calls(
|
|
|
891
919
|
trace_id=state.trace_id,
|
|
892
920
|
run_id=state.run_id,
|
|
893
921
|
status='error',
|
|
894
|
-
tool_result={'error': 'tool_not_found'}
|
|
922
|
+
tool_result={'error': 'tool_not_found'},
|
|
923
|
+
call_id=tool_call.id
|
|
895
924
|
))))
|
|
896
925
|
|
|
897
926
|
return {
|
|
@@ -925,7 +954,8 @@ async def _execute_tool_calls(
|
|
|
925
954
|
trace_id=state.trace_id,
|
|
926
955
|
run_id=state.run_id,
|
|
927
956
|
status='error',
|
|
928
|
-
tool_result={'error': 'validation_error', 'details': e.errors()}
|
|
957
|
+
tool_result={'error': 'validation_error', 'details': e.errors()},
|
|
958
|
+
call_id=tool_call.id
|
|
929
959
|
))))
|
|
930
960
|
|
|
931
961
|
return {
|
|
@@ -1019,7 +1049,7 @@ async def _execute_tool_calls(
|
|
|
1019
1049
|
else:
|
|
1020
1050
|
timeout = None
|
|
1021
1051
|
if timeout is None:
|
|
1022
|
-
timeout = config.default_tool_timeout if config.default_tool_timeout is not None else
|
|
1052
|
+
timeout = config.default_tool_timeout if config.default_tool_timeout is not None else 300.0
|
|
1023
1053
|
|
|
1024
1054
|
# Merge additional context if provided through approval
|
|
1025
1055
|
additional_context = approval_status.additional_context if approval_status else None
|
|
@@ -1063,7 +1093,8 @@ async def _execute_tool_calls(
|
|
|
1063
1093
|
trace_id=state.trace_id,
|
|
1064
1094
|
run_id=state.run_id,
|
|
1065
1095
|
status='timeout',
|
|
1066
|
-
tool_result={'error': 'timeout_error'}
|
|
1096
|
+
tool_result={'error': 'timeout_error'},
|
|
1097
|
+
call_id=tool_call.id
|
|
1067
1098
|
))))
|
|
1068
1099
|
|
|
1069
1100
|
return {
|
|
@@ -1115,7 +1146,8 @@ async def _execute_tool_calls(
|
|
|
1115
1146
|
trace_id=state.trace_id,
|
|
1116
1147
|
run_id=state.run_id,
|
|
1117
1148
|
tool_result=tool_result,
|
|
1118
|
-
status='success'
|
|
1149
|
+
status='success',
|
|
1150
|
+
call_id=tool_call.id
|
|
1119
1151
|
))))
|
|
1120
1152
|
|
|
1121
1153
|
# Check for handoff
|
|
@@ -1153,7 +1185,8 @@ async def _execute_tool_calls(
|
|
|
1153
1185
|
trace_id=state.trace_id,
|
|
1154
1186
|
run_id=state.run_id,
|
|
1155
1187
|
status='error',
|
|
1156
|
-
tool_result={'error': 'execution_error', 'detail': str(error)}
|
|
1188
|
+
tool_result={'error': 'execution_error', 'detail': str(error)},
|
|
1189
|
+
call_id=tool_call.id
|
|
1157
1190
|
))))
|
|
1158
1191
|
|
|
1159
1192
|
return {
|
|
@@ -0,0 +1,339 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Parallel Agent Execution for JAF Framework.
|
|
3
|
+
|
|
4
|
+
This module provides functionality to execute multiple sub-agents in parallel groups,
|
|
5
|
+
allowing for coordinated parallel execution with configurable grouping and result aggregation.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import asyncio
|
|
9
|
+
import json
|
|
10
|
+
from dataclasses import dataclass
|
|
11
|
+
from typing import Any, Dict, List, Optional, Union, Callable, TypeVar
|
|
12
|
+
|
|
13
|
+
from .types import (
|
|
14
|
+
Agent,
|
|
15
|
+
Tool,
|
|
16
|
+
ToolSchema,
|
|
17
|
+
ToolSource,
|
|
18
|
+
RunConfig,
|
|
19
|
+
RunState,
|
|
20
|
+
RunResult,
|
|
21
|
+
Message,
|
|
22
|
+
ContentRole,
|
|
23
|
+
generate_run_id,
|
|
24
|
+
generate_trace_id,
|
|
25
|
+
)
|
|
26
|
+
from .agent_tool import create_agent_tool, AgentToolInput
|
|
27
|
+
|
|
28
|
+
Ctx = TypeVar('Ctx')
|
|
29
|
+
Out = TypeVar('Out')
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
@dataclass
|
|
33
|
+
class ParallelAgentGroup:
|
|
34
|
+
"""Configuration for a group of agents to be executed in parallel."""
|
|
35
|
+
name: str
|
|
36
|
+
agents: List[Agent[Ctx, Out]]
|
|
37
|
+
shared_input: bool = True # Whether all agents receive the same input
|
|
38
|
+
result_aggregation: str = "combine" # "combine", "first", "majority", "custom"
|
|
39
|
+
custom_aggregator: Optional[Callable[[List[str]], str]] = None
|
|
40
|
+
timeout: Optional[float] = None
|
|
41
|
+
metadata: Optional[Dict[str, Any]] = None
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
@dataclass
|
|
45
|
+
class ParallelExecutionConfig:
|
|
46
|
+
"""Configuration for parallel agent execution."""
|
|
47
|
+
groups: List[ParallelAgentGroup]
|
|
48
|
+
inter_group_execution: str = "sequential" # "sequential" or "parallel"
|
|
49
|
+
global_timeout: Optional[float] = None
|
|
50
|
+
preserve_session: bool = False
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
class ParallelAgentsTool:
|
|
54
|
+
"""Tool that executes multiple agent groups in parallel."""
|
|
55
|
+
|
|
56
|
+
def __init__(
|
|
57
|
+
self,
|
|
58
|
+
config: ParallelExecutionConfig,
|
|
59
|
+
tool_name: str = "execute_parallel_agents",
|
|
60
|
+
tool_description: str = "Execute multiple agents in parallel groups"
|
|
61
|
+
):
|
|
62
|
+
self.config = config
|
|
63
|
+
self.tool_name = tool_name
|
|
64
|
+
self.tool_description = tool_description
|
|
65
|
+
|
|
66
|
+
# Create tool schema
|
|
67
|
+
self.schema = ToolSchema(
|
|
68
|
+
name=tool_name,
|
|
69
|
+
description=tool_description,
|
|
70
|
+
parameters=AgentToolInput,
|
|
71
|
+
timeout=config.global_timeout
|
|
72
|
+
)
|
|
73
|
+
self.source = ToolSource.NATIVE
|
|
74
|
+
self.metadata = {"source": "parallel_agents", "groups": len(config.groups)}
|
|
75
|
+
|
|
76
|
+
async def execute(self, args: AgentToolInput, context: Ctx) -> str:
|
|
77
|
+
"""Execute all configured agent groups."""
|
|
78
|
+
try:
|
|
79
|
+
if self.config.inter_group_execution == "parallel":
|
|
80
|
+
# Execute all groups in parallel
|
|
81
|
+
group_results = await asyncio.gather(*[
|
|
82
|
+
self._execute_group(group, args.input, context)
|
|
83
|
+
for group in self.config.groups
|
|
84
|
+
])
|
|
85
|
+
else:
|
|
86
|
+
# Execute groups sequentially
|
|
87
|
+
group_results = []
|
|
88
|
+
for group in self.config.groups:
|
|
89
|
+
result = await self._execute_group(group, args.input, context)
|
|
90
|
+
group_results.append(result)
|
|
91
|
+
|
|
92
|
+
# Combine results from all groups
|
|
93
|
+
final_result = {
|
|
94
|
+
"parallel_execution_results": {
|
|
95
|
+
group.name: result for group, result in zip(self.config.groups, group_results)
|
|
96
|
+
},
|
|
97
|
+
"execution_mode": self.config.inter_group_execution,
|
|
98
|
+
"total_groups": len(self.config.groups)
|
|
99
|
+
}
|
|
100
|
+
|
|
101
|
+
return json.dumps(final_result, indent=2)
|
|
102
|
+
|
|
103
|
+
except Exception as e:
|
|
104
|
+
return json.dumps({
|
|
105
|
+
"error": "parallel_execution_failed",
|
|
106
|
+
"message": f"Failed to execute parallel agents: {str(e)}",
|
|
107
|
+
"groups_attempted": len(self.config.groups)
|
|
108
|
+
})
|
|
109
|
+
|
|
110
|
+
async def _execute_group(
|
|
111
|
+
self,
|
|
112
|
+
group: ParallelAgentGroup,
|
|
113
|
+
input_text: str,
|
|
114
|
+
context: Ctx
|
|
115
|
+
) -> Dict[str, Any]:
|
|
116
|
+
"""Execute a single group of agents in parallel."""
|
|
117
|
+
try:
|
|
118
|
+
# Create agent tools for all agents in the group
|
|
119
|
+
agent_tools = []
|
|
120
|
+
for agent in group.agents:
|
|
121
|
+
tool = create_agent_tool(
|
|
122
|
+
agent=agent,
|
|
123
|
+
tool_name=f"run_{agent.name.lower().replace(' ', '_')}",
|
|
124
|
+
tool_description=f"Execute the {agent.name} agent",
|
|
125
|
+
timeout=group.timeout,
|
|
126
|
+
preserve_session=self.config.preserve_session
|
|
127
|
+
)
|
|
128
|
+
agent_tools.append((agent.name, tool))
|
|
129
|
+
|
|
130
|
+
# Execute all agents in the group in parallel
|
|
131
|
+
if group.shared_input:
|
|
132
|
+
# All agents get the same input
|
|
133
|
+
tasks = [
|
|
134
|
+
tool.execute(AgentToolInput(input=input_text), context)
|
|
135
|
+
for _, tool in agent_tools
|
|
136
|
+
]
|
|
137
|
+
else:
|
|
138
|
+
# This could be extended to support different inputs per agent
|
|
139
|
+
tasks = [
|
|
140
|
+
tool.execute(AgentToolInput(input=input_text), context)
|
|
141
|
+
for _, tool in agent_tools
|
|
142
|
+
]
|
|
143
|
+
|
|
144
|
+
# Execute with timeout if specified
|
|
145
|
+
if group.timeout:
|
|
146
|
+
results = await asyncio.wait_for(
|
|
147
|
+
asyncio.gather(*tasks, return_exceptions=True),
|
|
148
|
+
timeout=group.timeout
|
|
149
|
+
)
|
|
150
|
+
else:
|
|
151
|
+
results = await asyncio.gather(*tasks, return_exceptions=True)
|
|
152
|
+
|
|
153
|
+
# Process results
|
|
154
|
+
agent_results = {}
|
|
155
|
+
for (agent_name, _), result in zip(agent_tools, results):
|
|
156
|
+
if isinstance(result, Exception):
|
|
157
|
+
agent_results[agent_name] = {
|
|
158
|
+
"error": True,
|
|
159
|
+
"message": str(result),
|
|
160
|
+
"type": type(result).__name__
|
|
161
|
+
}
|
|
162
|
+
else:
|
|
163
|
+
agent_results[agent_name] = {
|
|
164
|
+
"success": True,
|
|
165
|
+
"result": result
|
|
166
|
+
}
|
|
167
|
+
|
|
168
|
+
# Apply result aggregation
|
|
169
|
+
aggregated_result = self._aggregate_results(group, agent_results)
|
|
170
|
+
|
|
171
|
+
return {
|
|
172
|
+
"group_name": group.name,
|
|
173
|
+
"agent_count": len(group.agents),
|
|
174
|
+
"individual_results": agent_results,
|
|
175
|
+
"aggregated_result": aggregated_result,
|
|
176
|
+
"execution_time_ms": None # Could be added with timing
|
|
177
|
+
}
|
|
178
|
+
|
|
179
|
+
except asyncio.TimeoutError:
|
|
180
|
+
return {
|
|
181
|
+
"group_name": group.name,
|
|
182
|
+
"error": "timeout",
|
|
183
|
+
"message": f"Group {group.name} execution timed out after {group.timeout} seconds",
|
|
184
|
+
"agent_count": len(group.agents)
|
|
185
|
+
}
|
|
186
|
+
except Exception as e:
|
|
187
|
+
return {
|
|
188
|
+
"group_name": group.name,
|
|
189
|
+
"error": "execution_failed",
|
|
190
|
+
"message": str(e),
|
|
191
|
+
"agent_count": len(group.agents)
|
|
192
|
+
}
|
|
193
|
+
|
|
194
|
+
def _aggregate_results(
|
|
195
|
+
self,
|
|
196
|
+
group: ParallelAgentGroup,
|
|
197
|
+
agent_results: Dict[str, Any]
|
|
198
|
+
) -> Union[str, Dict[str, Any]]:
|
|
199
|
+
"""Aggregate results from parallel agent execution."""
|
|
200
|
+
successful_results = [
|
|
201
|
+
result["result"] for result in agent_results.values()
|
|
202
|
+
if result.get("success") and "result" in result
|
|
203
|
+
]
|
|
204
|
+
|
|
205
|
+
if not successful_results:
|
|
206
|
+
return {"error": "no_successful_results", "message": "All agents failed"}
|
|
207
|
+
|
|
208
|
+
if group.result_aggregation == "first":
|
|
209
|
+
return successful_results[0]
|
|
210
|
+
elif group.result_aggregation == "combine":
|
|
211
|
+
return {
|
|
212
|
+
"combined_results": successful_results,
|
|
213
|
+
"result_count": len(successful_results)
|
|
214
|
+
}
|
|
215
|
+
elif group.result_aggregation == "majority":
|
|
216
|
+
# Simple majority logic - could be enhanced
|
|
217
|
+
if len(successful_results) >= len(group.agents) // 2 + 1:
|
|
218
|
+
return successful_results[0] # Return first as majority representative
|
|
219
|
+
else:
|
|
220
|
+
return {"error": "no_majority", "results": successful_results}
|
|
221
|
+
elif group.result_aggregation == "custom" and group.custom_aggregator:
|
|
222
|
+
try:
|
|
223
|
+
return group.custom_aggregator(successful_results)
|
|
224
|
+
except Exception as e:
|
|
225
|
+
return {"error": "custom_aggregation_failed", "message": str(e)}
|
|
226
|
+
else:
|
|
227
|
+
return {"combined_results": successful_results}
|
|
228
|
+
|
|
229
|
+
|
|
230
|
+
def create_parallel_agents_tool(
|
|
231
|
+
groups: List[ParallelAgentGroup],
|
|
232
|
+
tool_name: str = "execute_parallel_agents",
|
|
233
|
+
tool_description: str = "Execute multiple agents in parallel groups",
|
|
234
|
+
inter_group_execution: str = "sequential",
|
|
235
|
+
global_timeout: Optional[float] = None,
|
|
236
|
+
preserve_session: bool = False
|
|
237
|
+
) -> Tool:
|
|
238
|
+
"""
|
|
239
|
+
Create a tool that executes multiple agent groups in parallel.
|
|
240
|
+
|
|
241
|
+
Args:
|
|
242
|
+
groups: List of parallel agent groups to execute
|
|
243
|
+
tool_name: Name of the tool
|
|
244
|
+
tool_description: Description of the tool
|
|
245
|
+
inter_group_execution: How to execute groups ("sequential" or "parallel")
|
|
246
|
+
global_timeout: Global timeout for all executions
|
|
247
|
+
preserve_session: Whether to preserve session across agent calls
|
|
248
|
+
|
|
249
|
+
Returns:
|
|
250
|
+
A Tool that can execute parallel agent groups
|
|
251
|
+
"""
|
|
252
|
+
config = ParallelExecutionConfig(
|
|
253
|
+
groups=groups,
|
|
254
|
+
inter_group_execution=inter_group_execution,
|
|
255
|
+
global_timeout=global_timeout,
|
|
256
|
+
preserve_session=preserve_session
|
|
257
|
+
)
|
|
258
|
+
|
|
259
|
+
return ParallelAgentsTool(config, tool_name, tool_description)
|
|
260
|
+
|
|
261
|
+
|
|
262
|
+
def create_simple_parallel_tool(
|
|
263
|
+
agents: List[Agent],
|
|
264
|
+
group_name: str = "parallel_group",
|
|
265
|
+
tool_name: str = "execute_parallel_agents",
|
|
266
|
+
shared_input: bool = True,
|
|
267
|
+
result_aggregation: str = "combine",
|
|
268
|
+
timeout: Optional[float] = None
|
|
269
|
+
) -> Tool:
|
|
270
|
+
"""
|
|
271
|
+
Create a simple parallel agents tool from a list of agents.
|
|
272
|
+
|
|
273
|
+
Args:
|
|
274
|
+
agents: List of agents to execute in parallel
|
|
275
|
+
group_name: Name for the parallel group
|
|
276
|
+
tool_name: Name of the tool
|
|
277
|
+
shared_input: Whether all agents receive the same input
|
|
278
|
+
result_aggregation: How to aggregate results ("combine", "first", "majority")
|
|
279
|
+
timeout: Timeout for parallel execution
|
|
280
|
+
|
|
281
|
+
Returns:
|
|
282
|
+
A Tool that executes all agents in parallel
|
|
283
|
+
"""
|
|
284
|
+
group = ParallelAgentGroup(
|
|
285
|
+
name=group_name,
|
|
286
|
+
agents=agents,
|
|
287
|
+
shared_input=shared_input,
|
|
288
|
+
result_aggregation=result_aggregation,
|
|
289
|
+
timeout=timeout
|
|
290
|
+
)
|
|
291
|
+
|
|
292
|
+
return create_parallel_agents_tool([group], tool_name=tool_name)
|
|
293
|
+
|
|
294
|
+
|
|
295
|
+
# Convenience functions for common parallel execution patterns
|
|
296
|
+
|
|
297
|
+
def create_language_specialists_tool(
|
|
298
|
+
language_agents: Dict[str, Agent],
|
|
299
|
+
tool_name: str = "consult_language_specialists",
|
|
300
|
+
timeout: Optional[float] = 300.0
|
|
301
|
+
) -> Tool:
|
|
302
|
+
"""Create a tool that consults multiple language specialists in parallel."""
|
|
303
|
+
group = ParallelAgentGroup(
|
|
304
|
+
name="language_specialists",
|
|
305
|
+
agents=list(language_agents.values()),
|
|
306
|
+
shared_input=True,
|
|
307
|
+
result_aggregation="combine",
|
|
308
|
+
timeout=timeout,
|
|
309
|
+
metadata={"languages": list(language_agents.keys())}
|
|
310
|
+
)
|
|
311
|
+
|
|
312
|
+
return create_parallel_agents_tool(
|
|
313
|
+
[group],
|
|
314
|
+
tool_name=tool_name,
|
|
315
|
+
tool_description="Consult multiple language specialists in parallel"
|
|
316
|
+
)
|
|
317
|
+
|
|
318
|
+
|
|
319
|
+
def create_domain_experts_tool(
|
|
320
|
+
expert_agents: Dict[str, Agent],
|
|
321
|
+
tool_name: str = "consult_domain_experts",
|
|
322
|
+
result_aggregation: str = "combine",
|
|
323
|
+
timeout: Optional[float] = 60.0
|
|
324
|
+
) -> Tool:
|
|
325
|
+
"""Create a tool that consults multiple domain experts in parallel."""
|
|
326
|
+
group = ParallelAgentGroup(
|
|
327
|
+
name="domain_experts",
|
|
328
|
+
agents=list(expert_agents.values()),
|
|
329
|
+
shared_input=True,
|
|
330
|
+
result_aggregation=result_aggregation,
|
|
331
|
+
timeout=timeout,
|
|
332
|
+
metadata={"domains": list(expert_agents.keys())}
|
|
333
|
+
)
|
|
334
|
+
|
|
335
|
+
return create_parallel_agents_tool(
|
|
336
|
+
[group],
|
|
337
|
+
tool_name=tool_name,
|
|
338
|
+
tool_description="Consult multiple domain experts in parallel"
|
|
339
|
+
)
|
jaf/core/streaming.py
CHANGED
|
@@ -209,20 +209,37 @@ async def run_streaming(
|
|
|
209
209
|
trace_id=initial_state.trace_id
|
|
210
210
|
)
|
|
211
211
|
|
|
212
|
-
tool_call_ids = {}
|
|
212
|
+
tool_call_ids: Dict[str, str] = {} # Map call_id -> tool_name for in-flight tool calls
|
|
213
213
|
|
|
214
214
|
def event_handler(event: TraceEvent) -> None:
|
|
215
215
|
"""Handle trace events and put them into the queue."""
|
|
216
216
|
nonlocal tool_call_ids
|
|
217
217
|
streaming_event = None
|
|
218
|
+
payload = event.data
|
|
219
|
+
|
|
220
|
+
def _get_event_value(keys: List[str]) -> Any:
|
|
221
|
+
for key in keys:
|
|
222
|
+
if isinstance(payload, dict) and key in payload:
|
|
223
|
+
return payload[key]
|
|
224
|
+
if hasattr(payload, key):
|
|
225
|
+
return getattr(payload, key)
|
|
226
|
+
return None
|
|
227
|
+
|
|
218
228
|
if event.type == 'tool_call_start':
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
229
|
+
tool_name = _get_event_value(['tool_name', 'toolName']) or 'unknown'
|
|
230
|
+
args = _get_event_value(['args', 'arguments'])
|
|
231
|
+
call_id = _get_event_value(['call_id', 'tool_call_id', 'toolCallId'])
|
|
232
|
+
|
|
233
|
+
if not call_id:
|
|
234
|
+
call_id = f"call_{uuid.uuid4().hex[:8]}"
|
|
235
|
+
if isinstance(payload, dict):
|
|
236
|
+
payload['call_id'] = call_id
|
|
237
|
+
|
|
238
|
+
tool_call_ids[call_id] = tool_name
|
|
239
|
+
|
|
223
240
|
tool_call = StreamingToolCall(
|
|
224
|
-
tool_name=
|
|
225
|
-
arguments=
|
|
241
|
+
tool_name=tool_name,
|
|
242
|
+
arguments=args,
|
|
226
243
|
call_id=call_id,
|
|
227
244
|
status='started'
|
|
228
245
|
)
|
|
@@ -233,18 +250,26 @@ async def run_streaming(
|
|
|
233
250
|
trace_id=initial_state.trace_id
|
|
234
251
|
)
|
|
235
252
|
elif event.type == 'tool_call_end':
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
)
|
|
242
|
-
|
|
253
|
+
tool_name = _get_event_value(['tool_name', 'toolName']) or 'unknown'
|
|
254
|
+
call_id = _get_event_value(['call_id', 'tool_call_id', 'toolCallId'])
|
|
255
|
+
|
|
256
|
+
if not call_id:
|
|
257
|
+
# Fallback to locate a pending tool call with the same tool name
|
|
258
|
+
matching_call_id = next((cid for cid, name in tool_call_ids.items() if name == tool_name), None)
|
|
259
|
+
if matching_call_id:
|
|
260
|
+
call_id = matching_call_id
|
|
261
|
+
else:
|
|
262
|
+
raise RuntimeError(
|
|
263
|
+
f"Tool call end event received for unknown tool '{tool_name}'. "
|
|
264
|
+
f"Pending call IDs: {list(tool_call_ids.keys())}."
|
|
265
|
+
)
|
|
266
|
+
|
|
267
|
+
tool_call_ids.pop(call_id, None)
|
|
243
268
|
tool_result = StreamingToolResult(
|
|
244
|
-
tool_name=
|
|
269
|
+
tool_name=tool_name,
|
|
245
270
|
call_id=call_id,
|
|
246
|
-
result=
|
|
247
|
-
status=
|
|
271
|
+
result=_get_event_value(['result']),
|
|
272
|
+
status=_get_event_value(['status']) or 'completed'
|
|
248
273
|
)
|
|
249
274
|
streaming_event = StreamingEvent(
|
|
250
275
|
type=StreamingEventType.TOOL_RESULT,
|
jaf/core/tracing.py
CHANGED
|
@@ -10,6 +10,7 @@ import json
|
|
|
10
10
|
import time
|
|
11
11
|
from datetime import datetime
|
|
12
12
|
from typing import Any, Dict, List, Optional, Protocol
|
|
13
|
+
import uuid
|
|
13
14
|
|
|
14
15
|
from opentelemetry import trace
|
|
15
16
|
from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter
|
|
@@ -652,28 +653,36 @@ class LangfuseTraceCollector:
|
|
|
652
653
|
# Start a span for tool calls with detailed input information
|
|
653
654
|
tool_name = event.data.get('tool_name', 'unknown')
|
|
654
655
|
tool_args = event.data.get("args", {})
|
|
656
|
+
call_id = event.data.get("call_id")
|
|
657
|
+
if not call_id:
|
|
658
|
+
call_id = f"{tool_name}-{uuid.uuid4().hex[:8]}"
|
|
659
|
+
try:
|
|
660
|
+
event.data["call_id"] = call_id
|
|
661
|
+
except TypeError:
|
|
662
|
+
# event.data may be immutable; log and rely on synthetic ID tracking downstream
|
|
663
|
+
print(f"[LANGFUSE] Generated synthetic call_id for tool start: {call_id}")
|
|
655
664
|
|
|
656
|
-
print(f"[LANGFUSE] Starting span for tool call: {tool_name}")
|
|
665
|
+
print(f"[LANGFUSE] Starting span for tool call: {tool_name} ({call_id})")
|
|
657
666
|
|
|
658
667
|
# Track this tool call for the trace
|
|
659
668
|
tool_call_data = {
|
|
660
669
|
"tool_name": tool_name,
|
|
661
670
|
"arguments": tool_args,
|
|
662
|
-
"call_id":
|
|
671
|
+
"call_id": call_id,
|
|
663
672
|
"timestamp": datetime.now().isoformat()
|
|
664
673
|
}
|
|
665
674
|
|
|
666
675
|
# Ensure trace_id exists in tracking
|
|
667
676
|
if trace_id not in self.trace_tool_calls:
|
|
668
677
|
self.trace_tool_calls[trace_id] = []
|
|
669
|
-
|
|
678
|
+
|
|
670
679
|
self.trace_tool_calls[trace_id].append(tool_call_data)
|
|
671
680
|
|
|
672
681
|
# Create comprehensive input data for the tool call
|
|
673
682
|
tool_input = {
|
|
674
683
|
"tool_name": tool_name,
|
|
675
684
|
"arguments": tool_args,
|
|
676
|
-
"call_id":
|
|
685
|
+
"call_id": call_id,
|
|
677
686
|
"timestamp": datetime.now().isoformat()
|
|
678
687
|
}
|
|
679
688
|
|
|
@@ -682,7 +691,7 @@ class LangfuseTraceCollector:
|
|
|
682
691
|
input=tool_input,
|
|
683
692
|
metadata={
|
|
684
693
|
"tool_name": tool_name,
|
|
685
|
-
"call_id":
|
|
694
|
+
"call_id": call_id,
|
|
686
695
|
"framework": "jaf",
|
|
687
696
|
"event_type": "tool_call"
|
|
688
697
|
}
|
|
@@ -696,14 +705,15 @@ class LangfuseTraceCollector:
|
|
|
696
705
|
if span_id in self.active_spans:
|
|
697
706
|
tool_name = event.data.get('tool_name', 'unknown')
|
|
698
707
|
tool_result = event.data.get("result")
|
|
708
|
+
call_id = event.data.get("call_id")
|
|
699
709
|
|
|
700
|
-
print(f"[LANGFUSE] Ending span for tool call: {tool_name}")
|
|
710
|
+
print(f"[LANGFUSE] Ending span for tool call: {tool_name} ({call_id})")
|
|
701
711
|
|
|
702
712
|
# Track this tool result for the trace
|
|
703
713
|
tool_result_data = {
|
|
704
714
|
"tool_name": tool_name,
|
|
705
715
|
"result": tool_result,
|
|
706
|
-
"call_id":
|
|
716
|
+
"call_id": call_id,
|
|
707
717
|
"timestamp": datetime.now().isoformat(),
|
|
708
718
|
"status": event.data.get("status", "completed"),
|
|
709
719
|
"tool_result": event.data.get("tool_result")
|
|
@@ -718,7 +728,7 @@ class LangfuseTraceCollector:
|
|
|
718
728
|
tool_output = {
|
|
719
729
|
"tool_name": tool_name,
|
|
720
730
|
"result": tool_result,
|
|
721
|
-
"call_id":
|
|
731
|
+
"call_id": call_id,
|
|
722
732
|
"timestamp": datetime.now().isoformat(),
|
|
723
733
|
"status": event.data.get("status", "completed")
|
|
724
734
|
}
|
|
@@ -729,7 +739,7 @@ class LangfuseTraceCollector:
|
|
|
729
739
|
output=tool_output,
|
|
730
740
|
metadata={
|
|
731
741
|
"tool_name": tool_name,
|
|
732
|
-
"call_id":
|
|
742
|
+
"call_id": call_id,
|
|
733
743
|
"result_length": len(str(tool_result)) if tool_result else 0,
|
|
734
744
|
"framework": "jaf",
|
|
735
745
|
"event_type": "tool_call_end"
|
|
@@ -791,6 +801,9 @@ class LangfuseTraceCollector:
|
|
|
791
801
|
|
|
792
802
|
# Use consistent identifiers that don't depend on timestamp
|
|
793
803
|
if event.type.startswith('tool_call'):
|
|
804
|
+
call_id = event.data.get('call_id') or event.data.get('tool_call_id')
|
|
805
|
+
if call_id:
|
|
806
|
+
return f"tool-{trace_id}-{call_id}"
|
|
794
807
|
tool_name = event.data.get('tool_name') or event.data.get('toolName', 'unknown')
|
|
795
808
|
return f"tool-{tool_name}-{trace_id}"
|
|
796
809
|
elif event.type.startswith('llm_call'):
|
jaf/core/types.py
CHANGED
|
@@ -541,11 +541,12 @@ class ToolCallStartEventData:
|
|
|
541
541
|
args: Any
|
|
542
542
|
trace_id: TraceId
|
|
543
543
|
run_id: RunId
|
|
544
|
+
call_id: Optional[str] = None
|
|
544
545
|
|
|
545
546
|
@dataclass(frozen=True)
|
|
546
547
|
class ToolCallStartEvent:
|
|
547
548
|
type: Literal['tool_call_start'] = 'tool_call_start'
|
|
548
|
-
data: ToolCallStartEventData = field(default_factory=lambda: ToolCallStartEventData("", None, TraceId(""), RunId("")))
|
|
549
|
+
data: ToolCallStartEventData = field(default_factory=lambda: ToolCallStartEventData("", None, TraceId(""), RunId(""), None))
|
|
549
550
|
|
|
550
551
|
@dataclass(frozen=True)
|
|
551
552
|
class ToolCallEndEventData:
|
|
@@ -556,11 +557,12 @@ class ToolCallEndEventData:
|
|
|
556
557
|
run_id: RunId
|
|
557
558
|
tool_result: Optional[Any] = None
|
|
558
559
|
status: Optional[str] = None
|
|
560
|
+
call_id: Optional[str] = None
|
|
559
561
|
|
|
560
562
|
@dataclass(frozen=True)
|
|
561
563
|
class ToolCallEndEvent:
|
|
562
564
|
type: Literal['tool_call_end'] = 'tool_call_end'
|
|
563
|
-
data: ToolCallEndEventData = field(default_factory=lambda: ToolCallEndEventData("", "", TraceId(""), RunId("")))
|
|
565
|
+
data: ToolCallEndEventData = field(default_factory=lambda: ToolCallEndEventData("", "", TraceId(""), RunId(""), None, None))
|
|
564
566
|
|
|
565
567
|
@dataclass(frozen=True)
|
|
566
568
|
class HandoffEventData:
|
|
@@ -710,5 +712,5 @@ class RunConfig(Generic[Ctx]):
|
|
|
710
712
|
on_event: Optional[Callable[[TraceEvent], None]] = None
|
|
711
713
|
memory: Optional['MemoryConfig'] = None
|
|
712
714
|
conversation_id: Optional[str] = None
|
|
713
|
-
default_tool_timeout: Optional[float] =
|
|
714
|
-
approval_storage: Optional['ApprovalStorage'] = None # Storage for approval decisions
|
|
715
|
+
default_tool_timeout: Optional[float] = 300.0 # Default timeout for tool execution in seconds
|
|
716
|
+
approval_storage: Optional['ApprovalStorage'] = None # Storage for approval decisions
|
jaf/server/server.py
CHANGED
|
@@ -533,12 +533,12 @@ def create_jaf_server(config: ServerConfig[Ctx]) -> FastAPI:
|
|
|
533
533
|
async def event_stream():
|
|
534
534
|
try:
|
|
535
535
|
# Send initial metadata
|
|
536
|
-
yield f"event: stream_start
|
|
536
|
+
yield f"""event: stream_start data: {json.dumps({
|
|
537
537
|
'runId': str(initial_state.run_id),
|
|
538
538
|
'traceId': str(initial_state.trace_id),
|
|
539
539
|
'conversationId': conversation_id,
|
|
540
540
|
'agent': request.agent_name
|
|
541
|
-
})}
|
|
541
|
+
})}"""
|
|
542
542
|
|
|
543
543
|
# Stream events from the engine
|
|
544
544
|
async for event in run_streaming(initial_state, run_config_with_memory):
|
|
@@ -38,21 +38,22 @@ jaf/a2a/tests/test_client.py,sha256=L5h7DtQRVlULiRhRLtrmaCoYdvmbXsgLTy3QQ6KgmNM,
|
|
|
38
38
|
jaf/a2a/tests/test_integration.py,sha256=I7LdgwN99mAOljM9kYtK7dGMMntTSWKMw_oLOcJjinU,18454
|
|
39
39
|
jaf/a2a/tests/test_protocol.py,sha256=He3vGlBfIazpppAnuSybutrvjIN3VGxEleAohrVd9hc,23287
|
|
40
40
|
jaf/a2a/tests/test_types.py,sha256=PgRjDVJrHSXuu05z0B5lsSUUY5qEdQLFJbLBIExyVgI,18384
|
|
41
|
-
jaf/core/__init__.py,sha256=
|
|
41
|
+
jaf/core/__init__.py,sha256=PIGKm8n6OQ8jcXRS0Hn3_Zsl8m2qX91N80YJoLCJ4eU,1762
|
|
42
42
|
jaf/core/agent_tool.py,sha256=tfLNaTIcOZ0dR9GBP1AHLPkLExm_dLbURnVIN4R84FQ,11806
|
|
43
43
|
jaf/core/analytics.py,sha256=zFHIWqWal0bbEFCmJDc4DKeM0Ja7b_D19PqVaBI12pA,23338
|
|
44
44
|
jaf/core/composition.py,sha256=IVxRO1Q9nK7JRH32qQ4p8WMIUu66BhqPNrlTNMGFVwE,26317
|
|
45
|
-
jaf/core/engine.py,sha256=
|
|
45
|
+
jaf/core/engine.py,sha256=bjHNn8MoE3o0BAuBCMBY3EjtpMckjWlBotJ-oinfSZ0,52111
|
|
46
46
|
jaf/core/errors.py,sha256=5fwTNhkojKRQ4wZj3lZlgDnAsrYyjYOwXJkIr5EGNUc,5539
|
|
47
|
+
jaf/core/parallel_agents.py,sha256=ahwYoTnkrF4xQgV-hjc5sUaWhQWQFENMZG5riNa_Ieg,12165
|
|
47
48
|
jaf/core/performance.py,sha256=jedQmTEkrKMD6_Aw1h8PdG-5TsdYSFFT7Or6k5dmN2g,9974
|
|
48
49
|
jaf/core/proxy.py,sha256=_WM3cpRlSQLYpgSBrnY30UPMe2iZtlqDQ65kppE-WY0,4609
|
|
49
50
|
jaf/core/proxy_helpers.py,sha256=i7a5fAX9rLmO4FMBX51-yRkTFwfWedzQNgnLmeLUd_A,4370
|
|
50
51
|
jaf/core/state.py,sha256=NMtYTpUYa64m1Kte6lD8LGnF2bl69HAcdgXH6f-M97c,5650
|
|
51
|
-
jaf/core/streaming.py,sha256=
|
|
52
|
+
jaf/core/streaming.py,sha256=h_lYHQA9ee_D5QsDO9-Vhevgi7rFXPslPzd9605AJGo,17034
|
|
52
53
|
jaf/core/tool_results.py,sha256=-bTOqOX02lMyslp5Z4Dmuhx0cLd5o7kgR88qK2HO_sw,11323
|
|
53
54
|
jaf/core/tools.py,sha256=84N9A7QQ3xxcOs2eUUot3nmCnt5i7iZT9VwkuzuFBxQ,16274
|
|
54
|
-
jaf/core/tracing.py,sha256=
|
|
55
|
-
jaf/core/types.py,sha256=
|
|
55
|
+
jaf/core/tracing.py,sha256=iuVgykFUSkoBjem1k6jdVLrhRZzJn-avyxc_6W9BXPI,40159
|
|
56
|
+
jaf/core/types.py,sha256=8TA5cyNAc_rp5Tn-zmvt6rMi_0atRXbewpaiB5Ss7-g,23186
|
|
56
57
|
jaf/core/workflows.py,sha256=Ul-82gzjIXtkhnSMSPv-8igikjkMtW1EBo9yrfodtvI,26294
|
|
57
58
|
jaf/memory/__init__.py,sha256=-L98xlvihurGAzF0DnXtkueDVvO_wV2XxxEwAWdAj50,1400
|
|
58
59
|
jaf/memory/approval_storage.py,sha256=HHZ_b57kIthdR53QE5XNSII9xy1Cg-1cFUCSAZ8A4Rk,11083
|
|
@@ -73,7 +74,7 @@ jaf/providers/mcp.py,sha256=WxcC8gUFpDBBYyhorMcc1jHq3xMDMBtnwyRPthfL0S0,13074
|
|
|
73
74
|
jaf/providers/model.py,sha256=NJTa-1k0EruDdLf2HS1ZdDpFJhHXzzfQyXAbJx9kZVc,25468
|
|
74
75
|
jaf/server/__init__.py,sha256=fMPnLZBRm6t3yQrr7-PnoHAQ8qj9o6Z1AJLM1M6bIS0,392
|
|
75
76
|
jaf/server/main.py,sha256=CTb0ywbPIq9ELfay5MKChVR7BpIQOoEbPjPfpzo2aBQ,2152
|
|
76
|
-
jaf/server/server.py,sha256=
|
|
77
|
+
jaf/server/server.py,sha256=K8XKNyadP_YqkCRSK9jCVZh52d2_IbHp_jHkKzBeB9Q,38786
|
|
77
78
|
jaf/server/types.py,sha256=Gg8z1bkA7IYg94lp31iB92-4VkJr9syKA41uVCfNZBc,10544
|
|
78
79
|
jaf/utils/__init__.py,sha256=4Lte7HPIpmEuGvWd9lSH9gljV11wy-yNFjECPhcejAY,1236
|
|
79
80
|
jaf/utils/attachments.py,sha256=9xNzzQanCwJnBR1L6P79YQtbuRupiDluDn46SYUlHok,13542
|
|
@@ -84,9 +85,9 @@ jaf/visualization/functional_core.py,sha256=zedMDZbvjuOugWwnh6SJ2stvRNQX1Hlkb9Ab
|
|
|
84
85
|
jaf/visualization/graphviz.py,sha256=WTOM6UP72-lVKwI4_SAr5-GCC3ouckxHv88ypCDQWJ0,12056
|
|
85
86
|
jaf/visualization/imperative_shell.py,sha256=GpMrAlMnLo2IQgyB2nardCz09vMvAzaYI46MyrvJ0i4,2593
|
|
86
87
|
jaf/visualization/types.py,sha256=QQcbVeQJLuAOXk8ynd08DXIS-PVCnv3R-XVE9iAcglw,1389
|
|
87
|
-
jaf_py-2.4.
|
|
88
|
-
jaf_py-2.4.
|
|
89
|
-
jaf_py-2.4.
|
|
90
|
-
jaf_py-2.4.
|
|
91
|
-
jaf_py-2.4.
|
|
92
|
-
jaf_py-2.4.
|
|
88
|
+
jaf_py-2.4.5.dist-info/licenses/LICENSE,sha256=LXUQBJxdyr-7C4bk9cQBwvsF_xwA-UVstDTKabpcjlI,1063
|
|
89
|
+
jaf_py-2.4.5.dist-info/METADATA,sha256=9oUGQOOBTFoMdCVPfnyC9ucxrBXWzvporO5hDuDXkjA,27712
|
|
90
|
+
jaf_py-2.4.5.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
91
|
+
jaf_py-2.4.5.dist-info/entry_points.txt,sha256=OtIJeNJpb24kgGrqRx9szGgDx1vL9ayq8uHErmu7U5w,41
|
|
92
|
+
jaf_py-2.4.5.dist-info/top_level.txt,sha256=Xu1RZbGaM4_yQX7bpalo881hg7N_dybaOW282F15ruE,4
|
|
93
|
+
jaf_py-2.4.5.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|