xgae 0.2.3__py3-none-any.whl → 0.3.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of xgae might be problematic. Click here for more details.
- xgae/engine/engine_base.py +5 -1
- xgae/engine/mcp_tool_box.py +14 -8
- xgae/engine/responser/non_stream_responser.py +2 -1
- xgae/engine/responser/stream_responser.py +3 -2
- xgae/engine/task_engine.py +42 -5
- xgae/gaia2/are_engine.py +123 -0
- {xgae-0.2.3.dist-info → xgae-0.3.1.dist-info}/METADATA +4 -4
- {xgae-0.2.3.dist-info → xgae-0.3.1.dist-info}/RECORD +10 -9
- {xgae-0.2.3.dist-info → xgae-0.3.1.dist-info}/WHEEL +0 -0
- {xgae-0.2.3.dist-info → xgae-0.3.1.dist-info}/entry_points.txt +0 -0
xgae/engine/engine_base.py
CHANGED
|
@@ -6,7 +6,7 @@ class XGAError(Exception):
|
|
|
6
6
|
"""Custom exception for errors in the XGA system."""
|
|
7
7
|
pass
|
|
8
8
|
|
|
9
|
-
XGAMsgStatusType = Literal["error", "finish", "tool_started", "tool_completed", "tool_error", "tool_failed"]
|
|
9
|
+
XGAMsgStatusType = Literal["error", "stop", "finish", "tool_started", "tool_completed", "tool_error", "tool_failed"]
|
|
10
10
|
XGAResponseMsgType = Literal["user", "status", "tool", "assistant", "assistant_chunk"]
|
|
11
11
|
|
|
12
12
|
class XGAResponseMessage(TypedDict, total=False):
|
|
@@ -40,6 +40,10 @@ class XGAToolResult:
|
|
|
40
40
|
|
|
41
41
|
|
|
42
42
|
class XGAToolBox(ABC):
|
|
43
|
+
@abstractmethod
|
|
44
|
+
async def init_tool_schemas(self):
|
|
45
|
+
pass
|
|
46
|
+
|
|
43
47
|
@abstractmethod
|
|
44
48
|
async def creat_task_tool_box(self, task_id: str, general_tools: List[str], custom_tools: List[str]):
|
|
45
49
|
pass
|
xgae/engine/mcp_tool_box.py
CHANGED
|
@@ -2,7 +2,8 @@ import json
|
|
|
2
2
|
import logging
|
|
3
3
|
import os
|
|
4
4
|
|
|
5
|
-
from typing import List, Any, Dict, Optional, Literal
|
|
5
|
+
from typing import List, Any, Dict, Optional, Literal
|
|
6
|
+
from typing_extensions import override
|
|
6
7
|
|
|
7
8
|
from langchain_mcp_adapters.client import MultiServerMCPClient
|
|
8
9
|
from langchain_mcp_adapters.tools import load_mcp_tools
|
|
@@ -33,7 +34,11 @@ class XGAMcpToolBox(XGAToolBox):
|
|
|
33
34
|
self.mcp_tool_schemas: Dict[str, List[XGAToolSchema]] = {}
|
|
34
35
|
self.task_tool_schemas: Dict[str, Dict[str,XGAToolSchema]] = {}
|
|
35
36
|
|
|
36
|
-
self.
|
|
37
|
+
self._is_loaded_mcp_tool_schemas = False
|
|
38
|
+
|
|
39
|
+
@override
|
|
40
|
+
async def init_tool_schemas(self):
|
|
41
|
+
await self._load_mcp_tools_schema()
|
|
37
42
|
|
|
38
43
|
@override
|
|
39
44
|
async def creat_task_tool_box(self, task_id: str, general_tools: List[str], custom_tools: List[str]):
|
|
@@ -139,8 +144,8 @@ class XGAMcpToolBox(XGAToolBox):
|
|
|
139
144
|
return result
|
|
140
145
|
|
|
141
146
|
|
|
142
|
-
async def
|
|
143
|
-
if not self.
|
|
147
|
+
async def _load_mcp_tools_schema(self)-> None:
|
|
148
|
+
if not self._is_loaded_mcp_tool_schemas:
|
|
144
149
|
for server_name in self.mcp_server_names:
|
|
145
150
|
self.mcp_tool_schemas[server_name] = []
|
|
146
151
|
try:
|
|
@@ -169,11 +174,12 @@ class XGAMcpToolBox(XGAToolBox):
|
|
|
169
174
|
input_schema=input_schema,
|
|
170
175
|
metadata=metadata)
|
|
171
176
|
self.mcp_tool_schemas[server_name].append(tool_schema)
|
|
172
|
-
|
|
177
|
+
|
|
178
|
+
self._is_loaded_mcp_tool_schemas = True
|
|
173
179
|
|
|
174
180
|
async def reload_mcp_tools_schema(self) -> None:
|
|
175
|
-
self.
|
|
176
|
-
await self.
|
|
181
|
+
self._is_loaded_mcp_tool_schemas = False
|
|
182
|
+
await self.init_tool_schemas()
|
|
177
183
|
|
|
178
184
|
|
|
179
185
|
def _load_mcp_servers_config(self, mcp_config_path: str) -> Dict[str, Any]:
|
|
@@ -219,7 +225,7 @@ if __name__ == "__main__":
|
|
|
219
225
|
#mcp_tool_box = XGAMcpToolBox()
|
|
220
226
|
|
|
221
227
|
task_id = "task1"
|
|
222
|
-
await mcp_tool_box.
|
|
228
|
+
await mcp_tool_box.init_tool_schemas()
|
|
223
229
|
await mcp_tool_box.creat_task_tool_box(task_id=task_id, general_tools=["*"], custom_tools=["*"])
|
|
224
230
|
tool_schemas = mcp_tool_box.get_task_tool_schemas(task_id, "general")
|
|
225
231
|
print("general_tools_schemas" + "*"*50)
|
xgae/engine/task_engine.py
CHANGED
|
@@ -59,6 +59,8 @@ class XGATaskEngine:
|
|
|
59
59
|
|
|
60
60
|
self.task_response_msgs: List[XGAResponseMessage] = []
|
|
61
61
|
|
|
62
|
+
self.terminate_task = False
|
|
63
|
+
|
|
62
64
|
async def run_task_with_final_answer(self,
|
|
63
65
|
task_input: Dict[str, Any],
|
|
64
66
|
trace_id: Optional[str] = None) -> XGATaskResult:
|
|
@@ -114,8 +116,7 @@ class XGATaskEngine:
|
|
|
114
116
|
general_tools.append("ask")
|
|
115
117
|
|
|
116
118
|
custom_tools = self.custom_tools or []
|
|
117
|
-
|
|
118
|
-
await self.tool_box.load_mcp_tools_schema()
|
|
119
|
+
await self.tool_box.init_tool_schemas()
|
|
119
120
|
|
|
120
121
|
await self.tool_box.creat_task_tool_box(self.task_id, general_tools, custom_tools)
|
|
121
122
|
general_tool_schemas = self.tool_box.get_task_tool_schemas(self.task_id, "general")
|
|
@@ -148,6 +149,7 @@ class XGATaskEngine:
|
|
|
148
149
|
auto_continue = True
|
|
149
150
|
while auto_continue and auto_continue_count < self.max_auto_run:
|
|
150
151
|
auto_continue = False
|
|
152
|
+
iterations = auto_continue_count
|
|
151
153
|
|
|
152
154
|
try:
|
|
153
155
|
async for chunk in self._run_task_once(continuous_state):
|
|
@@ -193,11 +195,15 @@ class XGATaskEngine:
|
|
|
193
195
|
self.task_langfuse.root_span.event(name="engine_task_run_once_error", level="ERROR",
|
|
194
196
|
status_message=f"Call task_run_once error: {run_error}",
|
|
195
197
|
metadata={"trace": trace})
|
|
196
|
-
|
|
197
198
|
status_content = {'status_type': "error", 'role': "system", 'message': "Call run_task_once error"}
|
|
198
199
|
error_msg = self.add_response_message(type="status", content=status_content, is_llm_message=False)
|
|
199
200
|
yield error_msg
|
|
200
|
-
|
|
201
|
+
finally:
|
|
202
|
+
if not self.running_task_checkpoint("termination_check", iterations):
|
|
203
|
+
status_content = {'status_type': "stop", 'role': "system", 'message': "Task is termiated by Stop Command"}
|
|
204
|
+
error_msg = self.add_response_message(type="status", content=status_content, is_llm_message=False)
|
|
205
|
+
yield error_msg
|
|
206
|
+
break
|
|
201
207
|
|
|
202
208
|
async def _run_task_once(self, continuous_state: TaskRunContinuousState) -> AsyncGenerator[Dict[str, Any], None]:
|
|
203
209
|
llm_messages = [{"role": "system", "content": self.task_prompt}]
|
|
@@ -213,6 +219,9 @@ class XGATaskEngine:
|
|
|
213
219
|
llm_messages.append(temp_assistant_message)
|
|
214
220
|
|
|
215
221
|
auto_count = continuous_state.get("auto_continue_count")
|
|
222
|
+
|
|
223
|
+
self.running_task_checkpoint("before_completion", auto_count, llm_messages)
|
|
224
|
+
|
|
216
225
|
langfuse_metadata = self.task_langfuse.create_llm_langfuse_meta(auto_count)
|
|
217
226
|
|
|
218
227
|
llm_response = await self.llm_client.acompletion(llm_messages, langfuse_metadata)
|
|
@@ -220,12 +229,21 @@ class XGATaskEngine:
|
|
|
220
229
|
|
|
221
230
|
async for chunk in response_processor.process_response(llm_response, llm_messages, continuous_state):
|
|
222
231
|
self._logging_reponse_chunk(chunk, auto_count)
|
|
232
|
+
|
|
233
|
+
if chunk['type'] == "assistant":
|
|
234
|
+
assis_content = chunk['content']
|
|
235
|
+
self.running_task_checkpoint("after_completion", auto_count, llm_messages, assis_content)
|
|
236
|
+
|
|
223
237
|
yield chunk
|
|
224
238
|
|
|
225
239
|
def parse_final_result(self, chunks: List[Dict[str, Any]]) -> XGATaskResult:
|
|
226
240
|
final_result: XGATaskResult = None
|
|
227
241
|
reverse_chunks = reversed(chunks)
|
|
228
242
|
chunk = None
|
|
243
|
+
|
|
244
|
+
# if self.terminate_task:
|
|
245
|
+
# return XGATaskResult(type="error", content="LLM Task is terminated !")
|
|
246
|
+
|
|
229
247
|
try:
|
|
230
248
|
finish_reason = ''
|
|
231
249
|
for chunk in reverse_chunks:
|
|
@@ -233,7 +251,7 @@ class XGATaskEngine:
|
|
|
233
251
|
if chunk_type == "status":
|
|
234
252
|
status_content = chunk['content']
|
|
235
253
|
status_type = status_content['status_type']
|
|
236
|
-
if status_type == "error":
|
|
254
|
+
if status_type == "error" or status_type == "stop":
|
|
237
255
|
error = status_content['message']
|
|
238
256
|
final_result = XGATaskResult(type="error", content=error)
|
|
239
257
|
elif status_type == "finish":
|
|
@@ -284,6 +302,25 @@ class XGATaskEngine:
|
|
|
284
302
|
|
|
285
303
|
return final_result
|
|
286
304
|
|
|
305
|
+
|
|
306
|
+
def stop_task(self):
|
|
307
|
+
logging.warning(f"⚠️ Begin Terminate Task: {self.task_id}")
|
|
308
|
+
self.task_langfuse.root_span.event(name="stop_task", level="DEFAULT",
|
|
309
|
+
status_message="Begin Terminate Task")
|
|
310
|
+
self.terminate_task = True
|
|
311
|
+
|
|
312
|
+
|
|
313
|
+
def running_task_checkpoint(self,
|
|
314
|
+
task_state: Literal["before_completion", "after_completion", "termination_check"],
|
|
315
|
+
iterations: int,
|
|
316
|
+
llm_messages: List[Dict[str, Any]] = None,
|
|
317
|
+
llm_response: Dict[str, Any] = None
|
|
318
|
+
)-> bool:
|
|
319
|
+
if self.terminate_task and task_state == "termination_check":
|
|
320
|
+
logging.warning(f"⚠️ TASK: {self.task_id} STOP RUNNING for STOP Command !")
|
|
321
|
+
return not self.terminate_task
|
|
322
|
+
|
|
323
|
+
|
|
287
324
|
def create_response_message(self, type: XGAResponseMsgType,
|
|
288
325
|
content: Union[Dict[str, Any], List[Any], str],
|
|
289
326
|
is_llm_message: bool,
|
xgae/gaia2/are_engine.py
ADDED
|
@@ -0,0 +1,123 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
from typing import Any, Optional, Callable, Literal, Dict, List
|
|
3
|
+
from typing_extensions import override
|
|
4
|
+
|
|
5
|
+
from xgae.engine.engine_base import XGAToolBox
|
|
6
|
+
from xgae.engine.task_engine import XGATaskEngine
|
|
7
|
+
from xgae.utils.llm_client import LLMConfig
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class ARETaskEngine(XGATaskEngine):
|
|
11
|
+
def __init__(self,
|
|
12
|
+
agent: Any,
|
|
13
|
+
agent_id: str,
|
|
14
|
+
system_prompt: str,
|
|
15
|
+
max_auto_run: int,
|
|
16
|
+
llm_config: Optional[LLMConfig] = None,
|
|
17
|
+
tool_box: Optional[XGAToolBox] = None,
|
|
18
|
+
pre_run_task_fn : Callable[[Any, int, List[Dict[str, Any]]], Any] = None,
|
|
19
|
+
post_run_task_fn : Callable[[Any, int, Dict[str, Any]], Any] = None,
|
|
20
|
+
terminate_task_fn : Callable[[Any, int], bool] = None,
|
|
21
|
+
):
|
|
22
|
+
super().__init__(agent_id = agent_id,
|
|
23
|
+
general_tools = [],
|
|
24
|
+
custom_tools = ["*"],
|
|
25
|
+
system_prompt = system_prompt,
|
|
26
|
+
max_auto_run = max_auto_run,
|
|
27
|
+
llm_config = llm_config,
|
|
28
|
+
tool_box = tool_box,
|
|
29
|
+
)
|
|
30
|
+
self.agent = agent
|
|
31
|
+
self.pre_run_task_fn = pre_run_task_fn
|
|
32
|
+
self.post_run_task_fn = post_run_task_fn
|
|
33
|
+
self.terminate_task_fn = terminate_task_fn
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
@override
|
|
37
|
+
def running_task_checkpoint(self,
|
|
38
|
+
task_state: Literal["before_completion", "after_completion", "termination_check"],
|
|
39
|
+
iterations: int,
|
|
40
|
+
llm_messages: List[Dict[str, Any]] = None,
|
|
41
|
+
llm_response: Dict[str, Any] = None
|
|
42
|
+
)-> bool:
|
|
43
|
+
is_continue_task = True
|
|
44
|
+
|
|
45
|
+
if task_state == "before_completion" and self.pre_run_task_fn:
|
|
46
|
+
self.pre_run_task_fn(self.agent, iterations, llm_messages)
|
|
47
|
+
elif task_state == "after_completion" and self.post_run_task_fn:
|
|
48
|
+
self.post_run_task_fn(self.agent, iterations, llm_response)
|
|
49
|
+
elif task_state == "termination_check":
|
|
50
|
+
if self.terminate_task:
|
|
51
|
+
logging.warning(f"running_task_checkpoint: ⚠️ TASK: {self.task_id} STOP RUNNING for STOP Command !")
|
|
52
|
+
|
|
53
|
+
if self.terminate_task_fn:
|
|
54
|
+
is_terminate = self.terminate_task_fn(self.agent, iterations) if self.terminate_task_fn else False
|
|
55
|
+
if is_terminate:
|
|
56
|
+
logging.warning(f"running_task_checkpoint: ⚠️ TASK: {self.task_id} STOP RUNNING for Termination Function !")
|
|
57
|
+
self.stop_task()
|
|
58
|
+
|
|
59
|
+
is_continue_task = not self.terminate_task
|
|
60
|
+
|
|
61
|
+
return is_continue_task
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
if __name__ == "__main__":
|
|
66
|
+
import asyncio
|
|
67
|
+
import os
|
|
68
|
+
from xgae.utils.misc import read_file
|
|
69
|
+
from xgae.utils.setup_env import setup_logging
|
|
70
|
+
from xgae.engine.mcp_tool_box import XGAMcpToolBox
|
|
71
|
+
|
|
72
|
+
setup_logging()
|
|
73
|
+
|
|
74
|
+
def pre_run_task(agent, iterations:int, llm_messages: List[Dict[str, Any]]):
|
|
75
|
+
prompt = "\n\n".join([f"{key}: {value}" for d in llm_messages for key, value in d.items()]) if llm_messages else ""
|
|
76
|
+
logging.info(f"pre_run_task: iterations={iterations}, prompt: \n{prompt}\n")
|
|
77
|
+
|
|
78
|
+
|
|
79
|
+
def post_run_task(agent, iterations: int, llm_response: Dict[str, Any]):
|
|
80
|
+
logging.info(f"post_run_task: iterations={iterations}, prompt: \n{llm_response}\n")
|
|
81
|
+
|
|
82
|
+
|
|
83
|
+
def terminate_task(agent, iterations: int) -> bool:
|
|
84
|
+
logging.info(f"terminate_task: iterations={iterations}")
|
|
85
|
+
return iterations > 3 # can test terminate by > 3
|
|
86
|
+
|
|
87
|
+
|
|
88
|
+
async def main():
|
|
89
|
+
# Before Run Exec: uv run example-fault-tools
|
|
90
|
+
# LLAMA_API_KEY , LLAMA_API_BASE
|
|
91
|
+
tool_box = XGAMcpToolBox(custom_mcp_server_file="mcpservers/custom_servers.json")
|
|
92
|
+
system_prompt = read_file("templates/example/fault_user_prompt.txt")
|
|
93
|
+
llm_config = LLMConfig(
|
|
94
|
+
model = "openai/qwen-plus",
|
|
95
|
+
api_key = os.getenv('LLAMA_API_KEY') ,
|
|
96
|
+
api_base = os.getenv('LLAMA_API_BASE'),
|
|
97
|
+
stream =True,
|
|
98
|
+
enable_thinking = False,
|
|
99
|
+
)
|
|
100
|
+
|
|
101
|
+
engine = ARETaskEngine(
|
|
102
|
+
agent = "AREAgent", # Just for test,ARE use real Agent Object
|
|
103
|
+
agent_id = "agent_1",
|
|
104
|
+
system_prompt = system_prompt,
|
|
105
|
+
max_auto_run = 15,
|
|
106
|
+
llm_config = llm_config,
|
|
107
|
+
tool_box = tool_box,
|
|
108
|
+
pre_run_task_fn = pre_run_task,
|
|
109
|
+
post_run_task_fn = post_run_task,
|
|
110
|
+
terminate_task_fn = terminate_task
|
|
111
|
+
)
|
|
112
|
+
|
|
113
|
+
user_input = "locate 10.0.0.1 fault and solution"
|
|
114
|
+
chunks = []
|
|
115
|
+
async for chunk in engine.run_task(task_input={"role": "user", "content": user_input}):
|
|
116
|
+
chunks.append(chunk)
|
|
117
|
+
print(chunk)
|
|
118
|
+
|
|
119
|
+
final_result = engine.parse_final_result(chunks)
|
|
120
|
+
print(f"\n\nFINAL_RESULT: {final_result}")
|
|
121
|
+
|
|
122
|
+
|
|
123
|
+
asyncio.run(main())
|
|
@@ -1,13 +1,13 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: xgae
|
|
3
|
-
Version: 0.
|
|
3
|
+
Version: 0.3.1
|
|
4
4
|
Summary: Extreme General Agent Engine
|
|
5
|
-
Requires-Python: >=3.
|
|
5
|
+
Requires-Python: >=3.11
|
|
6
6
|
Requires-Dist: colorlog==6.9.0
|
|
7
7
|
Requires-Dist: langchain-mcp-adapters==0.1.9
|
|
8
8
|
Requires-Dist: langfuse==2.60.9
|
|
9
|
-
Requires-Dist: litellm==1.
|
|
10
|
-
Requires-Dist: mcp==1.
|
|
9
|
+
Requires-Dist: litellm==1.71.1
|
|
10
|
+
Requires-Dist: mcp==1.11.0
|
|
11
11
|
Provides-Extra: examples
|
|
12
12
|
Requires-Dist: chromadb==1.1.0; extra == 'examples'
|
|
13
13
|
Requires-Dist: langchain-community==0.3.29; extra == 'examples'
|
|
@@ -1,13 +1,14 @@
|
|
|
1
1
|
xgae/__init__.py,sha256=oBX_YzTliM-343BAlR-sD7BUZmsCJ7PY2oYrGBhsdLM,79
|
|
2
2
|
xgae/engine_cli_app.py,sha256=FdmIpq8KDsgyZNfwCDgNX7FEZFeRFyGOt_H1oZF8aKs,2890
|
|
3
|
-
xgae/engine/engine_base.py,sha256=
|
|
4
|
-
xgae/engine/mcp_tool_box.py,sha256=
|
|
3
|
+
xgae/engine/engine_base.py,sha256=bXwLxli7CK7JndAcOu-Rx4uTuy0pJDh_ZOGyhym76WY,1796
|
|
4
|
+
xgae/engine/mcp_tool_box.py,sha256=7dOiP9cfBU8TfAxzbdw0V31ISF-o2TgdTVUqEemz004,11044
|
|
5
5
|
xgae/engine/prompt_builder.py,sha256=6I5rjgvNJ27QJ8DDuBTplutoPZdGs9LYFv3TSgT7zmc,5045
|
|
6
|
-
xgae/engine/task_engine.py,sha256=
|
|
6
|
+
xgae/engine/task_engine.py,sha256=QhjTPQ_aREsT19QfT1nQwNLKUMwWdSgqlfjfCNq_dho,23470
|
|
7
7
|
xgae/engine/task_langfuse.py,sha256=ifkGrPBv2daLTKE-fCfEtOoI0n4Pd-lCwhyRRL0h308,2850
|
|
8
|
-
xgae/engine/responser/non_stream_responser.py,sha256=
|
|
8
|
+
xgae/engine/responser/non_stream_responser.py,sha256=WFzrT0tHGVLi_AR1IrIPPvpQ94ne7slwbFyXaoTpCQc,5318
|
|
9
9
|
xgae/engine/responser/responser_base.py,sha256=jhl1Bdz1Fs3KofGEymThNXlQuCORFTTkTAR_U47krds,24403
|
|
10
|
-
xgae/engine/responser/stream_responser.py,sha256=
|
|
10
|
+
xgae/engine/responser/stream_responser.py,sha256=5n7i2QnE_u7RP1bcJFky8sFPjYMjfYY9NcR5TeFxl9Y,7642
|
|
11
|
+
xgae/gaia2/are_engine.py,sha256=QPvyWEydeiOECQ7WfxjMIurQHD_ET7uWQCnXVCh_ZQk,5094
|
|
11
12
|
xgae/tools/without_general_tools_app.py,sha256=KqsdhxD3hvTpiygaGUVHysRFjvv_1A8zOwMKN1J0J0U,3821
|
|
12
13
|
xgae/utils/__init__.py,sha256=ElaGS-zdeZeu6is41u3Ny7lkvhg7BDSK-jMNg9j6K5A,499
|
|
13
14
|
xgae/utils/json_helpers.py,sha256=WD4G5U9Dh8N6J9O0L5wGyqj-NHi09kcXHGdLD_26nlc,3607
|
|
@@ -15,7 +16,7 @@ xgae/utils/llm_client.py,sha256=rqnu_NYXBC0hl4aozP5UOSyf0q-ONB5ywtnrXzA88OE,1505
|
|
|
15
16
|
xgae/utils/misc.py,sha256=aMWOvJ9VW52q-L9Lkjl1hvXqLwpJAmyxA-Z8jzqFG0U,907
|
|
16
17
|
xgae/utils/setup_env.py,sha256=MqNG0c2QQBDFU1kI8frxr9kB5d08Mmi3QZ1OoorgIa0,2662
|
|
17
18
|
xgae/utils/xml_tool_parser.py,sha256=Mb0d8kBrfyAEvUwW1Nqir-3BgxZRr0ZX3WymQouuFSo,4859
|
|
18
|
-
xgae-0.
|
|
19
|
-
xgae-0.
|
|
20
|
-
xgae-0.
|
|
21
|
-
xgae-0.
|
|
19
|
+
xgae-0.3.1.dist-info/METADATA,sha256=vOaoxeqkHDKwSuRUscc9--4KpuOTqM5BwEXqsCPUQ1I,470
|
|
20
|
+
xgae-0.3.1.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
|
21
|
+
xgae-0.3.1.dist-info/entry_points.txt,sha256=wmvgtMQbtzTbDPETS-tbQJD7jVlcs4hp0w6wOB0ooCc,229
|
|
22
|
+
xgae-0.3.1.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|