xgae 0.1.4__py3-none-any.whl → 0.1.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of xgae might be problematic. Click here for more details.

@@ -1,213 +0,0 @@
1
- import uuid
2
- import logging
3
-
4
- from typing import List, Dict, Any, AsyncGenerator, override,Optional
5
-
6
- from xgae.engine.responser.xga_responser_base import TaskResponseProcessor, TaskResponseContext, TaskRunContinuousState
7
-
8
- from xgae.utils.json_helpers import (
9
- safe_json_parse,
10
- to_json_string, format_for_yield
11
- )
12
-
13
- class NonStreamTaskResponser(TaskResponseProcessor):
14
- def __init__(self, response_context: TaskResponseContext):
15
- super().__init__(response_context)
16
-
17
- @override
18
- async def process_response(self,
19
- llm_response: Any,
20
- prompt_messages: List[Dict[str, Any]],
21
- continuous_state: Optional[TaskRunContinuousState] = None) -> AsyncGenerator[Dict[str, Any], None]:
22
- content = ""
23
- all_tool_data = [] # Stores {'tool_call': ..., 'parsing_details': ...}
24
- tool_index = 0
25
- assistant_message_object = None
26
- tool_result_message_objects = {}
27
- finish_reason = None
28
-
29
- native_tool_calls_for_message = []
30
- llm_model = self.response_context.get("model_name")
31
- thread_id = self.response_context.get("task_id")
32
- thread_run_id = self.response_context.get("task_run_id")
33
- tool_execution_strategy = self.response_context.get("tool_execution_strategy", "parallel")
34
- xml_adding_strategy = self.response_context.get("xml_adding_strategy", "user_message")
35
- max_xml_tool_calls = self.response_context.get("max_xml_tool_calls", 0)
36
- try:
37
- # Save and Yield thread_run_start status message
38
- start_content = {"status_type": "thread_run_start", "thread_run_id": thread_run_id}
39
- start_msg_obj = self.add_message(
40
- type="status", content=start_content,
41
- is_llm_message=False, metadata={"thread_run_id": thread_run_id}
42
- )
43
- if start_msg_obj: yield format_for_yield(start_msg_obj)
44
-
45
- # Extract finish_reason, content, tool calls
46
- if hasattr(llm_response, 'choices') and llm_response.choices:
47
- if hasattr(llm_response.choices[0], 'finish_reason'):
48
- finish_reason = llm_response.choices[0].finish_reason
49
- logging.info(f"Non-streaming finish_reason: {finish_reason}")
50
- self.trace.event(name="non_streaming_finish_reason", level="DEFAULT",
51
- status_message=(f"Non-streaming finish_reason: {finish_reason}"))
52
- response_message = llm_response.choices[0].message if hasattr(llm_response.choices[0],
53
- 'message') else None
54
- if response_message:
55
- if hasattr(response_message, 'content') and response_message.content:
56
- content = response_message.content
57
-
58
- parsed_xml_data = self._parse_xml_tool_calls(content)
59
- if max_xml_tool_calls > 0 and len(parsed_xml_data) > max_xml_tool_calls:
60
- # Truncate content and tool data if limit exceeded
61
- # ... (Truncation logic similar to streaming) ...
62
- if parsed_xml_data:
63
- xml_chunks = self._extract_xml_chunks(content)[:max_xml_tool_calls]
64
- if xml_chunks:
65
- last_chunk = xml_chunks[-1]
66
- last_chunk_pos = content.find(last_chunk)
67
- if last_chunk_pos >= 0: content = content[:last_chunk_pos + len(last_chunk)]
68
- parsed_xml_data = parsed_xml_data[:max_xml_tool_calls]
69
- finish_reason = "xml_tool_limit_reached"
70
- all_tool_data.extend(parsed_xml_data)
71
-
72
- # if config.native_tool_calling and hasattr(response_message,
73
- # 'tool_calls') and response_message.tool_calls:
74
- # for tool_call in response_message.tool_calls:
75
- # if hasattr(tool_call, 'function'):
76
- # exec_tool_call = {
77
- # "function_name": tool_call.function.name,
78
- # "arguments": safe_json_parse(tool_call.function.arguments) if isinstance(
79
- # tool_call.function.arguments, str) else tool_call.function.arguments,
80
- # "id": tool_call.id if hasattr(tool_call, 'id') else str(uuid.uuid4())
81
- # }
82
- # all_tool_data.append({"tool_call": exec_tool_call, "parsing_details": None})
83
- # native_tool_calls_for_message.append({
84
- # "id": exec_tool_call["id"], "type": "function",
85
- # "function": {
86
- # "name": tool_call.function.name,
87
- # "arguments": tool_call.function.arguments if isinstance(
88
- # tool_call.function.arguments, str) else to_json_string(
89
- # tool_call.function.arguments)
90
- # }
91
- # })
92
-
93
- # --- SAVE and YIELD Final Assistant Message ---
94
- message_data = {"role": "assistant", "content": content,
95
- "tool_calls": native_tool_calls_for_message or None}
96
- assistant_message_object = self._add_message_with_agent_info(type="assistant", content=message_data,
97
- is_llm_message=True, metadata={"thread_run_id": thread_run_id}
98
- )
99
- if assistant_message_object:
100
- yield assistant_message_object
101
- else:
102
- logging.error(f"Failed to save non-streaming assistant message for thread {thread_id}")
103
- self.trace.event(name="failed_to_save_non_streaming_assistant_message_for_thread", level="ERROR",
104
- status_message=(
105
- f"Failed to save non-streaming assistant message for thread {thread_id}"))
106
- err_content = {"role": "system", "status_type": "error", "message": "Failed to save assistant message"}
107
- err_msg_obj = self.add_message(
108
- type="status", content=err_content,
109
- is_llm_message=False, metadata={"thread_run_id": thread_run_id}
110
- )
111
- if err_msg_obj: yield format_for_yield(err_msg_obj)
112
-
113
- # --- Execute Tools and Yield Results ---
114
- tool_calls_to_execute = [item['tool_call'] for item in all_tool_data]
115
- if tool_calls_to_execute:
116
- logging.info(
117
- f"Executing {len(tool_calls_to_execute)} tools with strategy: {tool_execution_strategy}")
118
- self.trace.event(name="executing_tools_with_strategy", level="DEFAULT", status_message=(
119
- f"Executing {len(tool_calls_to_execute)} tools with strategy: {tool_execution_strategy}"))
120
- tool_results = await self._execute_tools(tool_calls_to_execute, tool_execution_strategy)
121
-
122
- for i, (returned_tool_call, result) in enumerate(tool_results):
123
- original_data = all_tool_data[i]
124
- tool_call_from_data = original_data['tool_call']
125
- parsing_details = original_data['parsing_details']
126
- current_assistant_id = assistant_message_object['message_id'] if assistant_message_object else None
127
-
128
- context = self._create_tool_context(
129
- tool_call_from_data, tool_index, current_assistant_id, parsing_details
130
- )
131
- context.result = result
132
-
133
- # Save and Yield start status
134
- started_msg_obj = self._yield_and_save_tool_started(context, thread_id, thread_run_id)
135
- if started_msg_obj: yield format_for_yield(started_msg_obj)
136
-
137
- # Save tool result
138
- saved_tool_result_object = self._add_tool_result(
139
- thread_id, tool_call_from_data, result, xml_adding_strategy,
140
- current_assistant_id, parsing_details
141
- )
142
-
143
- # Save and Yield completed/failed status
144
- completed_msg_obj = self._yield_and_save_tool_completed(
145
- context,
146
- saved_tool_result_object['message_id'] if saved_tool_result_object else None,
147
- thread_id, thread_run_id
148
- )
149
- if completed_msg_obj: yield format_for_yield(completed_msg_obj)
150
-
151
- # Yield the saved tool result object
152
- if saved_tool_result_object:
153
- tool_result_message_objects[tool_index] = saved_tool_result_object
154
- yield format_for_yield(saved_tool_result_object)
155
- else:
156
- logging.error(f"Failed to save tool result for index {tool_index}")
157
- self.trace.event(name="failed_to_save_tool_result_for_index", level="ERROR",
158
- status_message=(f"Failed to save tool result for index {tool_index}"))
159
-
160
- tool_index += 1
161
-
162
- # --- Save and Yield Final Status ---
163
- if finish_reason:
164
- finish_content = {"status_type": "finish", "finish_reason": finish_reason}
165
- finish_msg_obj = self.add_message(
166
- type="status", content=finish_content,
167
- is_llm_message=False, metadata={"thread_run_id": thread_run_id}
168
- )
169
- if finish_msg_obj: yield format_for_yield(finish_msg_obj)
170
-
171
- # --- Save and Yield assistant_response_end ---
172
- if assistant_message_object: # Only save if assistant message was saved
173
- try:
174
- # Save the full LiteLLM response object directly in content
175
- self.add_message(
176
- type="assistant_response_end",
177
- content=llm_response,
178
- is_llm_message=False,
179
- metadata={"thread_run_id": thread_run_id}
180
- )
181
- logging.info("Assistant response end saved for non-stream")
182
- except Exception as e:
183
- logging.error(f"Error saving assistant response end for non-stream: {str(e)}")
184
- self.trace.event(name="error_saving_assistant_response_end_for_non_stream", level="ERROR",
185
- status_message=(f"Error saving assistant response end for non-stream: {str(e)}"))
186
-
187
- except Exception as e:
188
- logging.error(f"Error processing non-streaming response: {str(e)}", exc_info=True)
189
- self.trace.event(name="error_processing_non_streaming_response", level="ERROR",
190
- status_message=(f"Error processing non-streaming response: {str(e)}"))
191
- # Save and yield error status
192
- err_content = {"role": "system", "status_type": "error", "message": str(e)}
193
- err_msg_obj = self.add_message(
194
- type="status", content=err_content,
195
- is_llm_message=False, metadata={"thread_run_id": thread_run_id if 'thread_run_id' in locals() else None}
196
- )
197
- if err_msg_obj: yield format_for_yield(err_msg_obj)
198
-
199
- # Re-raise the same exception (not a new one) to ensure proper error propagation
200
- logging.critical(f"Re-raising error to stop further processing: {str(e)}")
201
- self.trace.event(name="re_raising_error_to_stop_further_processing", level="CRITICAL",
202
- status_message=(f"Re-raising error to stop further processing: {str(e)}"))
203
- raise # Use bare 'raise' to preserve the original exception with its traceback
204
-
205
- finally:
206
- # Save and Yield the final thread_run_end status
207
- end_content = {"status_type": "thread_run_end"}
208
- end_msg_obj = self.add_message(
209
- type="status", content=end_content,
210
- is_llm_message=False, metadata={"thread_run_id": thread_run_id if 'thread_run_id' in locals() else None}
211
- )
212
- if end_msg_obj: yield format_for_yield(end_msg_obj)
213
-
xgae/engine/xga_engine.py DELETED
@@ -1,278 +0,0 @@
1
-
2
- import logging
3
- import json
4
-
5
- from typing import List, Any, Dict, Optional, AsyncGenerator, cast, Union, Literal
6
- from uuid import uuid4
7
-
8
- from xgae.engine.responser.xga_responser_base import TaskResponseContext, TaskResponseProcessor, TaskRunContinuousState
9
- from xgae.engine.xga_base import XGAContextMsg, XGAToolBox, XGAResponseMsg
10
- from xgae.utils.llm_client import LLMClient, LLMConfig
11
- from xgae.utils.setup_env import langfuse
12
- from xgae.utils.utils import handle_error
13
-
14
- from xga_prompt_builder import XGAPromptBuilder
15
- from xga_mcp_tool_box import XGAMcpToolBox
16
-
17
- class XGATaskEngine:
18
- def __init__(self,
19
- session_id: Optional[str] = None,
20
- task_id: Optional[str] = None,
21
- agent_id: Optional[str] = None,
22
- system_prompt: Optional[str] = None,
23
- llm_config: Optional[LLMConfig] = None,
24
- prompt_builder: Optional[XGAPromptBuilder] = None,
25
- tool_box: Optional[XGAToolBox] = None):
26
- self.session_id = session_id if session_id else f"xga_sid_{uuid4()}"
27
- self.task_id = task_id if task_id else f"xga_task_{uuid4()}"
28
- self.agent_id = agent_id
29
-
30
- self.llm_client = LLMClient(llm_config)
31
- self.model_name = self.llm_client.model_name
32
- self.is_stream = self.llm_client.is_stream
33
-
34
- self.prompt_builder = prompt_builder or XGAPromptBuilder(system_prompt)
35
- self.tool_box = tool_box or XGAMcpToolBox()
36
-
37
- self.task_context_msgs: List[XGAContextMsg] = []
38
- self.task_no = -1
39
- self.task_run_id = f"{self.task_id}[{self.task_no}]"
40
- self.trace_id = None
41
-
42
- async def __async_init__(self, general_tools:List[str], custom_tools: List[str]) -> None:
43
- await self.tool_box.load_mcp_tools_schema()
44
- await self.tool_box.creat_task_tool_box(self.task_id, general_tools, custom_tools)
45
- general_tool_schemas = self.tool_box.get_task_tool_schemas(self.task_id, "general_tool")
46
- custom_tool_schemas = self.tool_box.get_task_tool_schemas(self.task_id, "custom_tool")
47
-
48
- self.task_prompt = self.prompt_builder.build_task_prompt(self.model_name, general_tool_schemas, custom_tool_schemas)
49
-
50
- @classmethod
51
- async def create(cls,
52
- session_id: Optional[str] = None,
53
- task_id: Optional[str] = None,
54
- agent_id: Optional[str] = None,
55
- system_prompt: Optional[str] = None,
56
- general_tools: Optional[List[str]] = None,
57
- custom_tools: Optional[List[str]] = None,
58
- llm_config: Optional[LLMConfig] = None,
59
- prompt_builder: Optional[XGAPromptBuilder] = None,
60
- tool_box: Optional[XGAToolBox] = None) -> 'XGATaskEngine':
61
- engine: XGATaskEngine = cls(session_id=session_id,
62
- task_id=task_id,
63
- agent_id=agent_id,
64
- system_prompt=system_prompt,
65
- llm_config=llm_config,
66
- prompt_builder=prompt_builder,
67
- tool_box=tool_box)
68
-
69
- general_tools = general_tools or ["complete"]
70
- custom_tools = custom_tools or []
71
- await engine.__async_init__(general_tools, custom_tools)
72
-
73
- logging.info("*"*30 + f" XGATaskEngine Task'{engine.task_id}' Initialized " + "*"*30)
74
- logging.info(f"model_name={engine.model_name}, is_stream={engine.is_stream}, trace_id={engine.trace_id}")
75
- logging.info(f"general_tools={general_tools}, custom_tools={custom_tools}")
76
-
77
- return engine
78
-
79
-
80
- async def run_task(self,
81
- task_message: Dict[str, Any],
82
- max_auto_run: int = 25,
83
- trace_id: Optional[str] = None) -> AsyncGenerator[Dict[str, Any], None]:
84
- try:
85
- self.trace_id = trace_id or self.trace_id or langfuse.create_trace_id()
86
-
87
- self.task_no += 1
88
- self.task_run_id = f"{self.task_id}[{self.task_no}]"
89
-
90
- self.add_context_msg(type="user", content=task_message, is_llm_message=True)
91
-
92
- if max_auto_run <= 1:
93
- continuous_state:TaskRunContinuousState = {
94
- "accumulated_content": "",
95
- "auto_continue_count": 0,
96
- "auto_continue": False
97
- }
98
- async for chunk in self._run_task_once(continuous_state):
99
- yield chunk
100
- else:
101
- async for chunk in self._run_task_auto(max_auto_run):
102
- yield chunk
103
- finally:
104
- await self.tool_box.destroy_task_tool_box(self.task_id)
105
-
106
- async def _run_task_once(self, continuous_state: TaskRunContinuousState) -> AsyncGenerator[Dict[str, Any], None]:
107
- llm_messages = [{"role": "system", "content": self.task_prompt}]
108
- cxt_llm_contents = self._get_context_llm_contents()
109
- llm_messages.extend(cxt_llm_contents)
110
-
111
- partial_content = continuous_state.get('accumulated_content', '')
112
- if partial_content:
113
- temp_assistant_message = {
114
- "role": "assistant",
115
- "content": partial_content
116
- }
117
- llm_messages.append(temp_assistant_message)
118
-
119
- llm_response = await self.llm_client.create_completion(llm_messages)
120
- response_processor = self._create_response_processer()
121
-
122
- async for chunk in response_processor.process_response(llm_response, llm_messages, continuous_state):
123
- yield chunk
124
-
125
-
126
- async def _run_task_auto(self, max_auto_run: int) -> AsyncGenerator:
127
- continuous_state: TaskRunContinuousState = {
128
- "accumulated_content": "",
129
- "auto_continue_count": 0,
130
- "auto_continue": True
131
- }
132
-
133
- def update_continuous_state(_auto_continue_count, _auto_continue):
134
- continuous_state["auto_continue_count"] = _auto_continue_count
135
- continuous_state["auto_continue"] = _auto_continue
136
-
137
- auto_continue_count = 0
138
- auto_continue = True
139
- while auto_continue and auto_continue_count < max_auto_run:
140
- auto_continue = False
141
-
142
- try:
143
- async for chunk in self._run_task_once(continuous_state):
144
- try:
145
- if chunk.get("type") == "status":
146
- content = json.loads(chunk.get('content', '{}'))
147
- status_type = content.get('status_type', None)
148
- if status_type == "error":
149
- logging.error(f"run_task_auto: task_response error: {chunk.get('message', 'Unknown error')}")
150
- yield chunk
151
- return
152
- elif status_type == 'finish':
153
- finish_reason = content.get('finish_reason', None)
154
- if finish_reason == 'stop' :
155
- auto_continue = True
156
- auto_continue_count += 1
157
- update_continuous_state(auto_continue_count, auto_continue)
158
- logging.info(f"run_task_auto: Detected finish_reason='stop', auto-continuing ({auto_continue_count}/{max_auto_run})")
159
- continue
160
- elif finish_reason == 'xml_tool_limit_reached':
161
- logging.info(f"run_task_auto: Detected finish_reason='xml_tool_limit_reached', stopping auto-continue")
162
- auto_continue = False
163
- update_continuous_state(auto_continue_count, auto_continue)
164
- elif finish_reason == 'length':
165
- auto_continue = True
166
- auto_continue_count += 1
167
- update_continuous_state(auto_continue_count, auto_continue)
168
- logging.info(f"run_task_auto: Detected finish_reason='length', auto-continuing ({auto_continue_count}/{max_auto_run})")
169
- continue
170
- except Exception as parse_error:
171
- logging.error(f"run_task_auto: Error in parse chunk: {str(parse_error)}")
172
- yield {
173
- "type": "status",
174
- "status": "error",
175
- "message": f"Error in parse chunk: {str(parse_error)}"
176
- }
177
- return
178
-
179
- # Otherwise just yield the chunk normally
180
- yield chunk
181
-
182
- # If not auto-continuing, we're done
183
- if not auto_continue:
184
- break
185
- except Exception as run_error:
186
- logging.error(f"run_task_auto: Call task_run_once error: {str(run_error)}")
187
- yield {
188
- "type": "status",
189
- "status": "error",
190
- "message": f"Call task_run_once error: {str(run_error)}"
191
- }
192
- return
193
-
194
- def add_context_msg(self, type: Literal["user", "status", "tool", "assistant", "assistant_response_end"],
195
- content: Union[Dict[str, Any], List[Any], str],
196
- is_llm_message: bool,
197
- metadata: Optional[Dict[str, Any]]=None)-> XGAContextMsg:
198
- message = XGAContextMsg(
199
- message_id = f"xga_msg_{uuid4()}",
200
- type = type,
201
- content = content,
202
- is_llm_message=is_llm_message,
203
- metadata = metadata,
204
- session_id = self.session_id,
205
- agent_id = self.agent_id,
206
- task_id = self.task_id,
207
- task_run_id = self.task_run_id,
208
- trace_id = self.trace_id
209
- )
210
- self.task_context_msgs.append(message)
211
-
212
- return message
213
-
214
- def _get_context_llm_contents (self) -> List[Dict[str, Any]]:
215
- llm_messages = []
216
- for message in self.task_context_msgs:
217
- if message["is_llm_message"]:
218
- llm_messages.append(message)
219
-
220
- cxt_llm_contents = []
221
- for llm_message in llm_messages:
222
- content = llm_message["content"]
223
- # @todo content List type
224
- if isinstance(content, str):
225
- try:
226
- _content = json.loads(content)
227
- cxt_llm_contents.append(_content)
228
- except json.JSONDecodeError as e:
229
- logging.error(f"get_context_llm_contents: Failed to decode json, content=:{content}")
230
- handle_error(e)
231
- else:
232
- cxt_llm_contents.append(content)
233
-
234
- return cxt_llm_contents
235
-
236
- def _create_response_processer(self) -> TaskResponseProcessor:
237
- response_context = self._create_response_context()
238
- is_stream = response_context.get("is_stream", False)
239
- if is_stream:
240
- from xgae.engine.responser.xga_stream_responser import StreamTaskResponser
241
- return StreamTaskResponser(response_context)
242
- else:
243
- from xgae.engine.responser.xga_non_stream_responser import NonStreamTaskResponser
244
- return NonStreamTaskResponser(response_context)
245
-
246
- def _create_response_context(self) -> TaskResponseContext:
247
- response_context: TaskResponseContext = {
248
- "is_stream": self.is_stream,
249
- "task_id": self.task_id,
250
- "task_run_id": self.task_run_id,
251
- "trace_id": self.trace_id,
252
- "model_name": self.model_name,
253
- "max_xml_tool_calls": 0,
254
- "add_context_msg": self.add_context_msg,
255
- "tool_box": self.tool_box,
256
- "tool_execution_strategy": "parallel",
257
- "xml_adding_strategy": "user_message",
258
- }
259
- return response_context
260
-
261
- if __name__ == "__main__":
262
- import asyncio
263
- from xgae.utils.utils import read_file
264
- async def main():
265
- tool_box = XGAMcpToolBox(custom_mcp_server_file="mcpservers/custom_servers.json")
266
- system_prompt = read_file("templates/scp_test_prompt.txt")
267
- engine = await XGATaskEngine.create(tool_box=tool_box,
268
- general_tools=[],
269
- custom_tools=["bomc_fault.*"],
270
- llm_config=LLMConfig(stream=False),
271
- system_prompt=system_prompt)
272
- #engine = await XGATaskEngine.create()
273
-
274
- async for chunk in engine.run_task(task_message={"role": "user", "content": "定位10.0.0.1的故障"},
275
- max_auto_run=8):
276
- print(chunk)
277
-
278
- asyncio.run(main())
@@ -1,16 +0,0 @@
1
- xgae/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
- xgae/engine/xga_base.py,sha256=yM8YdThOVHo_TFrLUiu33hZzU2H9knQLs3JfD6hOx24,1669
3
- xgae/engine/xga_engine.py,sha256=7XRRC6KKz3a2EvH7P9k8szbCCm2O6MxIL674GzUkwHI,12940
4
- xgae/engine/xga_mcp_tool_box.py,sha256=dvToorrw8FJq4NrzUz494czI6QhT3bvmJQlt5oFgVBA,9994
5
- xgae/engine/xga_prompt_builder.py,sha256=RuTvQCNufqxDwVvSOPXR0qxAc42cG7NuIaUy9amu66A,4351
6
- xgae/engine/responser/xga_non_stream_responser.py,sha256=HlSN025jIsl-JY_n6fEdqJQkqc1UqCrgFr6K23uXF3E,12704
7
- xgae/engine/responser/xga_responser_base.py,sha256=IBJPQELMxZSpnz8YlSCgvPNSHEEUp8_vglotVnHoSeY,36808
8
- xgae/engine/responser/xga_stream_responser.py,sha256=FESVliTzHFy8BkTudi_Ftcty6QFpJWx7kYRubSuLqsg,50370
9
- xgae/utils/json_helpers.py,sha256=K1ja6GJCatrAheW9bEWAYSQbDI42__boBCZgtsv1gtk,4865
10
- xgae/utils/llm_client.py,sha256=mgzn8heUyRm92HTLEYGdfsGEpFtD-xLFr39P98_JP0s,12402
11
- xgae/utils/setup_env.py,sha256=-Ehv7_E9udHc8AjP66Y78E4X7_G6gpuNJkioCh5fn4A,2902
12
- xgae/utils/utils.py,sha256=cCYmWjKFksZ8BRD1YYnaM_jTLVHAg1ibEdjsczEUO6k,1134
13
- xgae/utils/xml_tool_parser.py,sha256=7Ei7X8zSgVct0fFCSmxDtknCLtdrUIwL9hy_0qSNlvs,7546
14
- xgae-0.1.4.dist-info/METADATA,sha256=R_nN1j5mESZfEQIiLFBQufsOc_Hbd6190GBLgo7-k-o,309
15
- xgae-0.1.4.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
16
- xgae-0.1.4.dist-info/RECORD,,
File without changes