xgae 0.1.17__py3-none-any.whl → 0.1.19__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of xgae might be problematic. Click here for more details.

xgae/__init__.py CHANGED
@@ -1,4 +1,4 @@
1
1
  if __name__ == "__main__":
2
- from xgae.cli_app import main
2
+ from xgae.engine_cli_app import main
3
3
 
4
4
  main()
@@ -21,9 +21,12 @@ class XGATaskResult(TypedDict, total=False):
21
21
  content: str
22
22
  attachments: Optional[List[str]]
23
23
 
24
+ XGAToolType = Literal["general", "custom", "agent"]
25
+
24
26
  @dataclass
25
27
  class XGAToolSchema:
26
28
  tool_name: str
29
+ tool_type: XGAToolType
27
30
  server_name: str
28
31
  description: str
29
32
  input_schema: Dict[str, Any]
@@ -46,7 +49,7 @@ class XGAToolBox(ABC):
46
49
  pass
47
50
 
48
51
  @abstractmethod
49
- def get_task_tool_schemas(self, task_id: str, type: Literal["general_tool", "custom_tool"]) -> List[XGAToolSchema]:
52
+ def get_task_tool_schemas(self, task_id: str, type: XGAToolType) -> List[XGAToolSchema]:
50
53
  pass
51
54
 
52
55
  @abstractmethod
@@ -7,10 +7,11 @@ from typing import List, Any, Dict, Optional, Literal, override
7
7
  from langchain_mcp_adapters.client import MultiServerMCPClient
8
8
  from langchain_mcp_adapters.tools import load_mcp_tools
9
9
 
10
- from xgae.engine.engine_base import XGAError, XGAToolSchema, XGAToolBox, XGAToolResult
10
+ from xgae.engine.engine_base import XGAError, XGAToolSchema, XGAToolBox, XGAToolResult, XGAToolType
11
11
 
12
12
  class XGAMcpToolBox(XGAToolBox):
13
13
  GENERAL_MCP_SERVER_NAME = "xga_general"
14
+ AGENT_MCP_SERVER_PREFIX = "_@_"
14
15
 
15
16
  def __init__(self,
16
17
  custom_mcp_server_file: Optional[str] = None,
@@ -37,7 +38,7 @@ class XGAMcpToolBox(XGAToolBox):
37
38
  @override
38
39
  async def creat_task_tool_box(self, task_id: str, general_tools: List[str], custom_tools: List[str]):
39
40
  task_tool_schemas = {}
40
- general_tool_schemas = self.mcp_tool_schemas.get(XGAMcpToolBox.GENERAL_MCP_SERVER_NAME, {})
41
+ general_tool_schemas = self.mcp_tool_schemas.get(self.GENERAL_MCP_SERVER_NAME, {})
41
42
  if "*" in general_tools:
42
43
  task_tool_schemas = {tool_schema.tool_name: tool_schema for tool_schema in general_tool_schemas}
43
44
  else:
@@ -49,7 +50,7 @@ class XGAMcpToolBox(XGAToolBox):
49
50
  if len(custom_tools) == 1 and custom_tools[0] == "*":
50
51
  custom_tools = []
51
52
  for server_name in self.mcp_server_names:
52
- if server_name != XGAMcpToolBox.GENERAL_MCP_SERVER_NAME:
53
+ if server_name != self.GENERAL_MCP_SERVER_NAME:
53
54
  custom_tools.append(f"{server_name}.*")
54
55
 
55
56
  for server_tool_name in custom_tools:
@@ -76,7 +77,7 @@ class XGAMcpToolBox(XGAToolBox):
76
77
 
77
78
  @override
78
79
  async def destroy_task_tool_box(self, task_id: str):
79
- tool_schemas = self.get_task_tool_schemas(task_id, type="general_tool")
80
+ tool_schemas = self.get_task_tool_schemas(task_id, "general")
80
81
  if len(tool_schemas) > 0:
81
82
  await self.call_tool(task_id, "end_task", {'task_id': task_id})
82
83
  self.task_tool_schemas.pop(task_id, None)
@@ -88,14 +89,12 @@ class XGAMcpToolBox(XGAToolBox):
88
89
  return task_tool_names
89
90
 
90
91
  @override
91
- def get_task_tool_schemas(self, task_id: str, type: Literal["general_tool", "custom_tool"]) -> List[XGAToolSchema]:
92
+ def get_task_tool_schemas(self, task_id: str, tool_type: XGAToolType) -> List[XGAToolSchema]:
92
93
  task_tool_schemas = []
93
94
 
94
95
  all_task_tool_schemas = self.task_tool_schemas.get(task_id, {})
95
96
  for tool_schema in all_task_tool_schemas.values():
96
- if type == "general_tool" and tool_schema.server_name == self.GENERAL_MCP_SERVER_NAME:
97
- task_tool_schemas.append(tool_schema)
98
- elif type == "custom_tool" and tool_schema.server_name != self.GENERAL_MCP_SERVER_NAME:
97
+ if tool_schema.tool_type == tool_type:
99
98
  task_tool_schemas.append(tool_schema)
100
99
 
101
100
  return task_tool_schemas
@@ -114,16 +113,16 @@ class XGAMcpToolBox(XGAToolBox):
114
113
  async with self._mcp_client.session(server_name) as session:
115
114
  tools = await load_mcp_tools(session)
116
115
  mcp_tool = next((t for t in tools if t.name == tool_name), None)
117
- is_general_tool = False
116
+
118
117
  if mcp_tool:
119
118
  tool_args = args or {}
120
- if server_name == self.GENERAL_MCP_SERVER_NAME:
119
+ tool_type = self._get_tool_type(server_name)
120
+ if tool_type == "general" or tool_type == "agent":
121
121
  tool_args = dict({'task_id': task_id}, **tool_args)
122
- is_general_tool = True
123
122
 
124
123
  try:
125
124
  tool_result = await mcp_tool.arun(tool_args)
126
- if is_general_tool:
125
+ if tool_type == "general":
127
126
  tool_result = json.loads(tool_result)
128
127
  result = XGAToolResult(success=tool_result['success'], output=str(tool_result['output']))
129
128
  else:
@@ -144,11 +143,17 @@ class XGAMcpToolBox(XGAToolBox):
144
143
  if not self.is_loaded_tool_schemas:
145
144
  for server_name in self.mcp_server_names:
146
145
  self.mcp_tool_schemas[server_name] = []
147
- mcp_tools = await self._mcp_client.get_tools(server_name=server_name)
146
+ try:
147
+ mcp_tools = await self._mcp_client.get_tools(server_name=server_name)
148
+ except Exception as e:
149
+ logging.error(f"### McpToolBox load_mcp_tools_schema: Langchain mcp get_tools failed, "
150
+ f"need start mcp server '{server_name}' !")
151
+ continue
148
152
 
153
+ tool_type = self._get_tool_type(server_name)
149
154
  for tool in mcp_tools:
150
155
  input_schema = tool.args_schema
151
- if server_name == self.GENERAL_MCP_SERVER_NAME:
156
+ if tool_type == "general" or tool_type == "agent":
152
157
  input_schema['properties'].pop("task_id", None)
153
158
  if 'task_id' in input_schema['required']:
154
159
  input_schema['required'].remove('task_id')
@@ -158,6 +163,7 @@ class XGAMcpToolBox(XGAToolBox):
158
163
 
159
164
  metadata = tool.metadata or {}
160
165
  tool_schema = XGAToolSchema(tool_name=tool.name,
166
+ tool_type=tool_type,
161
167
  server_name=server_name,
162
168
  description=tool.description,
163
169
  input_schema=input_schema,
@@ -169,8 +175,8 @@ class XGAMcpToolBox(XGAToolBox):
169
175
  self.is_loaded_tool_schemas = False
170
176
  await self.load_mcp_tools_schema()
171
177
 
172
- @staticmethod
173
- def _load_mcp_servers_config(mcp_config_path: str) -> Dict[str, Any]:
178
+
179
+ def _load_mcp_servers_config(self, mcp_config_path: str) -> Dict[str, Any]:
174
180
  try:
175
181
  if os.path.exists(mcp_config_path):
176
182
  with open(mcp_config_path, 'r', encoding="utf-8") as f:
@@ -192,6 +198,13 @@ class XGAMcpToolBox(XGAToolBox):
192
198
  logging.error(f"McpToolBox load_mcp_servers_config: Failed to load MCP servers config: {e}")
193
199
  return {'mcpServers': {}}
194
200
 
201
+ def _get_tool_type(self, server_name: str) -> XGAToolType:
202
+ tool_type: XGAToolType = "custom"
203
+ if server_name == self.GENERAL_MCP_SERVER_NAME:
204
+ tool_type = "general"
205
+ elif server_name.startswith(self.AGENT_MCP_SERVER_PREFIX):
206
+ tool_type = "agent"
207
+ return tool_type
195
208
 
196
209
  if __name__ == "__main__":
197
210
  import asyncio
@@ -207,14 +220,14 @@ if __name__ == "__main__":
207
220
 
208
221
  task_id = "task1"
209
222
  await mcp_tool_box.load_mcp_tools_schema()
210
- await mcp_tool_box.creat_task_tool_box(task_id=task_id, general_tools=["*"], custom_tools=["bomc_fault.*"])
211
- tool_schemas = mcp_tool_box.get_task_tool_schemas(task_id, "general_tool")
223
+ await mcp_tool_box.creat_task_tool_box(task_id=task_id, general_tools=["*"], custom_tools=["*"])
224
+ tool_schemas = mcp_tool_box.get_task_tool_schemas(task_id, "general")
212
225
  print("general_tools_schemas" + "*"*50)
213
226
  for tool_schema in tool_schemas:
214
227
  print(asdict(tool_schema))
215
228
  print()
216
229
 
217
- tool_schemas = mcp_tool_box.get_task_tool_schemas(task_id, "custom_tool")
230
+ tool_schemas = mcp_tool_box.get_task_tool_schemas(task_id, "custom")
218
231
  print("custom_tools_schemas" + "*" * 50)
219
232
  for tool_schema in tool_schemas:
220
233
  print(asdict(tool_schema))
@@ -11,7 +11,11 @@ class XGAPromptBuilder():
11
11
  def __init__(self, system_prompt: Optional[str] = None):
12
12
  self.system_prompt = system_prompt
13
13
 
14
- def build_task_prompt(self, model_name: str, general_tool_schemas: List[XGAToolSchema], custom_tool_schemas: List[XGAToolSchema])-> str:
14
+ def build_task_prompt(self,
15
+ model_name: str,
16
+ general_tool_schemas: List[XGAToolSchema],
17
+ custom_tool_schemas: List[XGAToolSchema],
18
+ agent_tool_schemas: List[XGAToolSchema])-> str:
15
19
  if self.system_prompt is None:
16
20
  self.system_prompt = self._load_default_system_prompt(model_name)
17
21
 
@@ -23,6 +27,9 @@ class XGAPromptBuilder():
23
27
  tool_prompt = self.build_custom_tool_prompt(custom_tool_schemas)
24
28
  task_prompt = task_prompt + "\n" + tool_prompt
25
29
 
30
+ tool_prompt = self.build_agent_tool_prompt(agent_tool_schemas)
31
+ task_prompt = task_prompt + "\n" + tool_prompt
32
+
26
33
  return task_prompt
27
34
 
28
35
  def build_general_tool_prompt(self, tool_schemas:List[XGAToolSchema])-> str:
@@ -61,10 +68,18 @@ class XGAPromptBuilder():
61
68
 
62
69
 
63
70
  def build_custom_tool_prompt(self, tool_schemas:List[XGAToolSchema])-> str:
71
+ tool_prompt = self.build_mcp_tool_prompt("templates/custom_tool_prompt_template.txt", tool_schemas)
72
+ return tool_prompt
73
+
74
+ def build_agent_tool_prompt(self, tool_schemas:List[XGAToolSchema])-> str:
75
+ tool_prompt = self.build_mcp_tool_prompt("templates/agent_tool_prompt_template.txt", tool_schemas)
76
+ return tool_prompt
77
+
78
+ def build_mcp_tool_prompt(self, file_path: str, tool_schemas:List[XGAToolSchema])-> str:
64
79
  tool_prompt = ""
65
80
  tool_schemas = tool_schemas or []
66
81
  if len(tool_schemas) > 0:
67
- tool_prompt = read_file("templates/custom_tool_prompt_template.txt")
82
+ tool_prompt = read_file(file_path)
68
83
  tool_info = ""
69
84
  for tool_schema in tool_schemas:
70
85
  description = tool_schema.description if tool_schema.description else 'No description available'
@@ -64,7 +64,7 @@ class NonStreamTaskResponser(TaskResponseProcessor):
64
64
  tool_start_msg = self._add_tool_start_message(tool_context)
65
65
  yield tool_start_msg
66
66
 
67
- tool_message = self._add_tool_messsage(tool_call, tool_result, self.xml_adding_strategy, assistant_msg_id, parsing_details)
67
+ tool_message = self._add_tool_messsage(tool_context, self.xml_adding_strategy)
68
68
 
69
69
  tool_completed_msg = self._add_tool_completed_message(tool_context, tool_message['message_id'])
70
70
  yield tool_completed_msg
@@ -376,19 +376,31 @@ class TaskResponseProcessor(ABC):
376
376
  logging.info(f"TaskProcessor execute_tools_in_parallel: Execution completed for {len(results)} tools, total {tool_num} tools)")
377
377
  return processed_results
378
378
 
379
+ def _create_tool_context(self,
380
+ tool_call: Dict[str, Any],
381
+ tool_index: int,
382
+ assistant_message_id: Optional[str] = None,
383
+ parsing_details: Optional[Dict[str, Any]] = None,
384
+ result: Optional[XGAToolResult] = None,
385
+ ) -> ToolExecutionContext:
386
+ """Create a tool execution context with display name and parsing details populated."""
387
+ return ToolExecutionContext(
388
+ tool_call = tool_call,
389
+ tool_index = tool_index,
390
+ function_name = tool_call['function_name'],
391
+ xml_tag_name = tool_call['xml_tag_name'],
392
+ assistant_message_id = assistant_message_id,
393
+ parsing_details = parsing_details,
394
+ result = result
395
+ )
379
396
 
380
- def _add_tool_messsage(self,
381
- tool_call: Dict[str, Any],
382
- result: XGAToolResult,
383
- strategy: XmlAddingStrategy = "assistant_message",
384
- assistant_message_id: Optional[str] = None,
385
- parsing_details: Optional[Dict[str, Any]] = None
386
- ) -> Optional[Dict[str, Any]]: # Return the full message object
397
+
398
+ def _add_tool_messsage(self,context:ToolExecutionContext, strategy: XmlAddingStrategy) -> Optional[Dict[str, Any]]: # Return the full message object
387
399
  # Create two versions of the structured result
388
400
  # Rich version for the frontend
389
- result_for_frontend = self._create_structured_tool_result(tool_call, result, parsing_details, for_llm=False)
401
+ result_for_frontend = self._create_structured_tool_result(context.tool_call, context.result, context.parsing_details, for_llm=False)
390
402
  # Concise version for the LLM
391
- result_for_llm = self._create_structured_tool_result(tool_call, result, parsing_details, for_llm=True)
403
+ result_for_llm = self._create_structured_tool_result(context.tool_call, context.result, context.parsing_details, for_llm=True)
392
404
 
393
405
  # Add the message with the appropriate role to the conversation history
394
406
  # This allows the LLM to see the tool result in subsequent interactions
@@ -399,11 +411,11 @@ class TaskResponseProcessor(ABC):
399
411
  }
400
412
 
401
413
  metadata = {}
402
- if assistant_message_id:
403
- metadata['assistant_message_id'] = assistant_message_id
414
+ if context.assistant_message_id:
415
+ metadata['assistant_message_id'] = context.assistant_message_id
404
416
 
405
- if parsing_details:
406
- metadata['parsing_details'] = parsing_details
417
+ if context.parsing_details:
418
+ metadata['parsing_details'] = context.parsing_details
407
419
 
408
420
  metadata['frontend_content'] = result_for_frontend
409
421
 
@@ -455,25 +467,6 @@ class TaskResponseProcessor(ABC):
455
467
  return structured_result
456
468
 
457
469
 
458
- def _create_tool_context(self,
459
- tool_call: Dict[str, Any],
460
- tool_index: int,
461
- assistant_message_id: Optional[str] = None,
462
- parsing_details: Optional[Dict[str, Any]] = None,
463
- result: Optional[XGAToolResult] = None,
464
- ) -> ToolExecutionContext:
465
- """Create a tool execution context with display name and parsing details populated."""
466
- return ToolExecutionContext(
467
- tool_call = tool_call,
468
- tool_index = tool_index,
469
- function_name = tool_call['function_name'],
470
- xml_tag_name = tool_call['xml_tag_name'],
471
- assistant_message_id = assistant_message_id,
472
- parsing_details = parsing_details,
473
- result = result
474
- )
475
-
476
-
477
470
  def _add_tool_start_message(self, context: ToolExecutionContext) -> Optional[Dict[str, Any]]:
478
471
  """Formats, saves, and returns a tool started status message."""
479
472
  content = {
@@ -43,8 +43,9 @@ class StreamTaskResponser(TaskResponseProcessor):
43
43
  chunk_content = llm_chunk_msg.content
44
44
  accumulated_content += chunk_content
45
45
 
46
- xml_tool_call_count = len(self._extract_xml_chunks(accumulated_content))
47
- if self.max_xml_tool_calls <= 0 or xml_tool_call_count <= self.max_xml_tool_calls:
46
+ xml_tool_chunks = self._extract_xml_chunks(accumulated_content)
47
+ xml_tool_chunk_len = len(xml_tool_chunks)
48
+ if self.max_xml_tool_calls <= 0 or xml_tool_chunk_len <= self.max_xml_tool_calls:
48
49
  if use_assistant_chunk_msg:
49
50
  message_data = {"role": "assistant", "content": chunk_content}
50
51
  metadata = {"sequence": msg_sequence}
@@ -57,19 +58,13 @@ class StreamTaskResponser(TaskResponseProcessor):
57
58
  else:
58
59
  finish_reason = "xml_tool_limit_reached"
59
60
  logging.warning(f"StreamResp: Over XML Tool Limit, finish_reason='xml_tool_limit_reached', "
60
- f"xml_tool_call_count={xml_tool_call_count}")
61
+ f"xml_tool_chunk_len={xml_tool_chunk_len}")
61
62
  break
62
63
 
64
+ parsed_xml_data = self._parse_xml_tool_calls(accumulated_content)
63
65
  if finish_reason == "xml_tool_limit_reached":
64
- xml_chunks = self._extract_xml_chunks(accumulated_content)
65
- if len(xml_chunks) > self.max_xml_tool_calls:
66
- limited_chunks = xml_chunks[:self.max_xml_tool_calls]
67
- if limited_chunks:
68
- last_chunk = limited_chunks[-1]
69
- last_chunk_pos = accumulated_content.find(last_chunk) + len(last_chunk)
70
- accumulated_content = accumulated_content[:last_chunk_pos]
66
+ parsed_xml_data = parsed_xml_data[:self.max_xml_tool_calls]
71
67
 
72
- parsed_xml_data = self._parse_xml_tool_calls(accumulated_content)
73
68
  should_auto_continue = (can_auto_continue and finish_reason == 'length')
74
69
 
75
70
  self.root_span.event(name=f"stream_processor_start[{self.task_no}]({auto_continue_count})", level="DEFAULT",
@@ -77,13 +72,12 @@ class StreamTaskResponser(TaskResponseProcessor):
77
72
  f"parsed_xml_data_len={len(parsed_xml_data)}, accumulated_content_len={len(accumulated_content)}, "
78
73
  f"should_auto_continue={should_auto_continue}")
79
74
 
80
- assistant_msg = None
75
+ assistant_msg_id = None
81
76
  if accumulated_content and not should_auto_continue:
82
77
  message_data = {"role": "assistant", "content": accumulated_content}
83
78
  assistant_msg = self.add_response_message(type="assistant", content=message_data, is_llm_message=True)
84
79
  yield assistant_msg
85
-
86
- assistant_msg_id = assistant_msg['message_id'] if assistant_msg else None
80
+ assistant_msg_id = assistant_msg['message_id']
87
81
 
88
82
  tool_calls_to_execute = [item['tool_call'] for item in parsed_xml_data]
89
83
  if len(tool_calls_to_execute) > 0 and not should_auto_continue:
@@ -94,12 +88,12 @@ class StreamTaskResponser(TaskResponseProcessor):
94
88
  tool_call = parsed_xml_item['tool_call']
95
89
  parsing_details = parsed_xml_item['parsing_details']
96
90
 
97
- tool_context = self._create_tool_context(tool_call, tool_index, assistant_msg_id,parsing_details, tool_result)
91
+ tool_context = self._create_tool_context(tool_call, tool_index, assistant_msg_id, parsing_details, tool_result)
98
92
 
99
93
  tool_start_msg = self._add_tool_start_message(tool_context)
100
94
  yield tool_start_msg
101
95
 
102
- tool_message = self._add_tool_messsage(tool_call, tool_result, self.xml_adding_strategy,assistant_msg_id, parsing_details)
96
+ tool_message = self._add_tool_messsage(tool_context, self.xml_adding_strategy)
103
97
 
104
98
  tool_completed_msg = self._add_tool_completed_message(tool_context, tool_message['message_id'])
105
99
  yield tool_completed_msg
@@ -17,8 +17,9 @@ from xgae.engine.responser.responser_base import TaskResponserContext, TaskRespo
17
17
 
18
18
  class XGATaskEngine:
19
19
  def __init__(self,
20
- session_id: Optional[str] = None,
21
20
  task_id: Optional[str] = None,
21
+ session_id: Optional[str] = None,
22
+ user_id: Optional[str] = None,
22
23
  agent_id: Optional[str] = None,
23
24
  general_tools: Optional[List[str]] = None,
24
25
  custom_tools: Optional[List[str]] = None,
@@ -29,8 +30,9 @@ class XGATaskEngine:
29
30
  prompt_builder: Optional[XGAPromptBuilder] = None,
30
31
  tool_box: Optional[XGAToolBox] = None):
31
32
  self.task_id = task_id if task_id else f"xga_task_{uuid4()}"
32
- self.agent_id = agent_id
33
33
  self.session_id = session_id
34
+ self.user_id = user_id
35
+ self.agent_id = agent_id
34
36
 
35
37
  self.llm_client = LLMClient(llm_config)
36
38
  self.model_name = self.llm_client.model_name
@@ -56,16 +58,16 @@ class XGATaskEngine:
56
58
  self.task_response_msgs: List[XGAResponseMessage] = []
57
59
 
58
60
  async def run_task_with_final_answer(self,
59
- task_message: Dict[str, Any],
61
+ task_input: Dict[str, Any],
60
62
  trace_id: Optional[str] = None) -> XGATaskResult:
61
- final_result:XGATaskResult = None
63
+ final_result: XGATaskResult = None
62
64
  try:
63
65
  await self._init_task()
64
66
 
65
- self.task_langfuse.start_root_span("run_task_with_final_answer", task_message, trace_id)
67
+ self.task_langfuse.start_root_span("run_task_with_final_answer", task_input, trace_id)
66
68
 
67
69
  chunks = []
68
- async for chunk in self.run_task(task_message=task_message, trace_id=trace_id):
70
+ async for chunk in self.run_task(task_input, trace_id):
69
71
  chunks.append(chunk)
70
72
 
71
73
  if len(chunks) > 0:
@@ -79,14 +81,14 @@ class XGATaskEngine:
79
81
 
80
82
 
81
83
  async def run_task(self,
82
- task_message: Dict[str, Any],
84
+ task_input: Dict[str, Any],
83
85
  trace_id: Optional[str] = None) -> AsyncGenerator[Dict[str, Any], None]:
84
86
  try:
85
87
  await self._init_task()
86
88
 
87
- self.task_langfuse.start_root_span("run_task", task_message, trace_id)
89
+ self.task_langfuse.start_root_span("run_task", task_input, trace_id)
88
90
 
89
- self.add_response_message(type="user", content=task_message, is_llm_message=True)
91
+ self.add_response_message(type="user", content=task_input, is_llm_message=True)
90
92
 
91
93
  async for chunk in self._run_task_auto():
92
94
  yield chunk
@@ -114,10 +116,14 @@ class XGATaskEngine:
114
116
  await self.tool_box.load_mcp_tools_schema()
115
117
 
116
118
  await self.tool_box.creat_task_tool_box(self.task_id, general_tools, custom_tools)
117
- general_tool_schemas = self.tool_box.get_task_tool_schemas(self.task_id, "general_tool")
118
- custom_tool_schemas = self.tool_box.get_task_tool_schemas(self.task_id, "custom_tool")
119
+ general_tool_schemas = self.tool_box.get_task_tool_schemas(self.task_id, "general")
120
+ custom_tool_schemas = self.tool_box.get_task_tool_schemas(self.task_id, "custom")
121
+ agent_tool_schemas = self.tool_box.get_task_tool_schemas(self.task_id, "agent")
119
122
 
120
- self.task_prompt = self.prompt_builder.build_task_prompt(self.model_name, general_tool_schemas, custom_tool_schemas)
123
+ self.task_prompt = self.prompt_builder.build_task_prompt(self.model_name,
124
+ general_tool_schemas,
125
+ custom_tool_schemas,
126
+ agent_tool_schemas)
121
127
 
122
128
  logging.info("*" * 10 + f" XGATaskEngine Task'{self.task_id}' Initialized " + "*" * 10)
123
129
  logging.info(f"model_name={self.model_name}, is_stream={self.is_stream}")
@@ -149,30 +155,30 @@ class XGATaskEngine:
149
155
  status_content = chunk['content']
150
156
  status_type = status_content['status_type']
151
157
  if status_type == "error":
152
- logging.error(f"TaskEngine run_task_auto: task_response error: {chunk.get('message')}")
158
+ logging.error(f"XGATaskEngine run_task_auto: task_response error: {chunk.get('message')}")
153
159
  auto_continue = False
154
160
  break
155
161
  elif status_type == "finish":
156
162
  finish_reason = status_content['finish_reason']
157
163
  if finish_reason == "completed":
158
- logging.info(f"TaskEngine run_task_auto: Detected finish_reason='completed', TASK_COMPLETE Success !")
164
+ logging.info(f"XGATaskEngine run_task_auto: Detected finish_reason='completed', TASK_COMPLETE Success !")
159
165
  auto_continue = False
160
166
  break
161
167
  elif finish_reason == "xml_tool_limit_reached":
162
- logging.warning(f"TaskEngine run_task_auto: Detected finish_reason='xml_tool_limit_reached', stop auto-continue")
168
+ logging.warning(f"XGATaskEngine run_task_auto: Detected finish_reason='xml_tool_limit_reached', stop auto-continue")
163
169
  auto_continue = False
164
170
  break
165
171
  elif finish_reason == "non_tool_call":
166
- logging.warning(f"TaskEngine run_task_auto: Detected finish_reason='non_tool_call', stop auto-continue")
172
+ logging.warning(f"XGATaskEngine run_task_auto: Detected finish_reason='non_tool_call', stop auto-continue")
167
173
  auto_continue = False
168
174
  break
169
175
  elif finish_reason in ["stop", "length"]: # 'length' occur on some LLM
170
176
  auto_continue_count += 1
171
177
  auto_continue = True if auto_continue_count < self.max_auto_run else False
172
178
  update_continuous_state(auto_continue_count, auto_continue)
173
- logging.info(f"TaskEngine run_task_auto: Detected finish_reason='{finish_reason}', auto-continuing ({auto_continue_count}/{self.max_auto_run})")
179
+ logging.info(f"XGATaskEngine run_task_auto: Detected finish_reason='{finish_reason}', auto-continuing ({auto_continue_count}/{self.max_auto_run})")
174
180
  except Exception as parse_error:
175
- trace = log_trace(parse_error,f"TaskEngine run_task_auto: Parse chunk error, chunk: {chunk}")
181
+ trace = log_trace(parse_error,f"XGATaskEngine run_task_auto: Parse chunk error, chunk: {chunk}")
176
182
  self.task_langfuse.root_span.event(name="engine_parse_chunk_error", level="ERROR",
177
183
  status_message=f"Task Engine parse chunk error: {parse_error}",
178
184
  metadata={"content": chunk, "trace": trace})
@@ -181,7 +187,7 @@ class XGATaskEngine:
181
187
  error_msg = self.add_response_message(type="status", content=status_content, is_llm_message=False)
182
188
  yield error_msg
183
189
  except Exception as run_error:
184
- trace = log_trace(run_error, "TaskEngine run_task_auto: Call task_run_once")
190
+ trace = log_trace(run_error, "XGATaskEngine run_task_auto: Call task_run_once")
185
191
  self.task_langfuse.root_span.event(name="engine_task_run_once_error", level="ERROR",
186
192
  status_message=f"Call task_run_once error: {run_error}",
187
193
  metadata={"trace": trace})
@@ -267,7 +273,7 @@ class XGATaskEngine:
267
273
  logging.warning(f"❌ FINAL_RESULT: LLM Result is EMPTY, finish_reason={finish_reason}")
268
274
  final_result = XGATaskResult(type="error", content="LLM has no answer")
269
275
  except Exception as e:
270
- trace = log_trace(e, f"TaskEngine parse_final_result: Parse message chunk error, chunk: {chunk}")
276
+ trace = log_trace(e, f"XGATaskEngine parse_final_result: Parse message chunk error, chunk: {chunk}")
271
277
  self.task_langfuse.root_span.event(name="engine_parse_final_result_error", level="ERROR",
272
278
  status_message=f"Task Engine parse final result error: {e}",
273
279
  metadata={"content": chunk, "trace": trace})
@@ -281,11 +287,12 @@ class XGATaskEngine:
281
287
  is_llm_message: bool,
282
288
  metadata: Optional[Dict[str, Any]]=None)-> XGAResponseMessage:
283
289
  metadata = metadata or {}
284
- metadata["task_id"] = self.task_id
285
- metadata["task_run_id"] = self.task_run_id
286
- metadata["trace_id"] = self.task_langfuse.trace_id
287
- metadata["session_id"] = self.session_id
288
- metadata["agent_id"] = self.agent_id
290
+ metadata['task_id'] = self.task_id
291
+ metadata['task_run_id'] = self.task_run_id
292
+ metadata['trace_id'] = self.task_langfuse.trace_id
293
+ metadata['session_id'] = self.session_id
294
+ metadata['user_id'] = self.user_id
295
+ metadata['agent_id'] = self.agent_id
289
296
 
290
297
  message = XGAResponseMessage(
291
298
  message_id = f"xga_msg_{uuid4()}",
@@ -355,7 +362,14 @@ class XGATaskEngine:
355
362
 
356
363
 
357
364
  def _create_task_langfuse(self)-> XGATaskLangFuse:
358
- return XGATaskLangFuse(self.session_id, self.task_id, self.task_run_id, self.task_no, self.agent_id)
365
+ return XGATaskLangFuse(
366
+ task_id = self.task_id,
367
+ task_run_id = self.task_run_id,
368
+ task_no = self.task_no,
369
+ session_id = self.session_id,
370
+ agent_id = self.agent_id,
371
+ user_id = self.user_id
372
+ )
359
373
 
360
374
 
361
375
  def _logging_reponse_chunk(self, chunk, auto_count: int)-> None:
@@ -380,11 +394,11 @@ class XGATaskEngine:
380
394
  pretty_content = json.dumps(status_content, ensure_ascii=False, indent=2)
381
395
 
382
396
  if chunk_type == "assistant_chunk":
383
- logging.debug(f"TASK_RESP_CHUNK[{auto_count}]<{chunk_type}{prefix}> content: {pretty_content}")
397
+ logging.debug(f"TASK_RESP_CHUNK[{self.task_no}]({auto_count})<{chunk_type}{prefix}> content: {pretty_content}")
384
398
  else:
385
- logging.info(f"TASK_RESP_CHUNK[{auto_count}]<{chunk_type}{prefix}> content: {pretty_content}")
399
+ logging.info(f"TASK_RESP_CHUNK[{self.task_no}]({auto_count})<{chunk_type}{prefix}> content: {pretty_content}")
386
400
  except Exception as e:
387
- logging.error(f"TaskEngine logging_reponse_chunk: Decorate chunk={chunk}, error: {e}")
401
+ logging.error(f"XGATaskEngine logging_reponse_chunk: Decorate chunk={chunk}, error: {e}")
388
402
 
389
403
 
390
404
 
@@ -406,7 +420,7 @@ if __name__ == "__main__":
406
420
  agent_id="agent_1"
407
421
  )
408
422
  user_input = "locate 10.0.0.1 fault and solution"
409
- final_result = await engine.run_task_with_final_answer(task_message={'role': "user", 'content': user_input})
423
+ final_result = await engine.run_task_with_final_answer(task_input={'role': "user", 'content': user_input})
410
424
  print(f"FINAL RESULT:{final_result}")
411
425
 
412
426
 
@@ -1,4 +1,4 @@
1
-
1
+ import logging
2
2
  from typing import Any, Dict, Optional
3
3
  from langfuse import Langfuse
4
4
 
@@ -10,10 +10,11 @@ class XGATaskLangFuse:
10
10
  langfuse: Langfuse = None
11
11
 
12
12
  def __init__(self,
13
- session_id: str,
14
13
  task_id:str,
15
14
  task_run_id: str,
16
15
  task_no: int,
16
+ session_id: str,
17
+ user_id: str,
17
18
  agent_id: str) -> None:
18
19
  if XGATaskLangFuse.langfuse is None:
19
20
  XGATaskLangFuse.langfuse = setup_langfuse()
@@ -22,6 +23,7 @@ class XGATaskLangFuse:
22
23
  self.task_id = task_id
23
24
  self.task_run_id = task_run_id
24
25
  self.task_no = task_no
26
+ self.user_id = user_id
25
27
  self.agent_id = agent_id
26
28
 
27
29
  self.trace_id = None
@@ -31,7 +33,7 @@ class XGATaskLangFuse:
31
33
 
32
34
  def start_root_span(self,
33
35
  root_span_name: str,
34
- task_message: Dict[str, Any],
36
+ task_input: Dict[str, Any],
35
37
  trace_id: Optional[str] = None):
36
38
  if self.root_span is None:
37
39
  trace = None
@@ -39,13 +41,23 @@ class XGATaskLangFuse:
39
41
  self.trace_id = trace_id
40
42
  trace = XGATaskLangFuse.langfuse.trace(id=trace_id)
41
43
  else:
42
- trace = XGATaskLangFuse.langfuse.trace(name="xga_task_engine")
44
+ trace = XGATaskLangFuse.langfuse.trace(name="xga_task_engine", session_id=self.session_id)
43
45
  self.trace_id = trace.id
44
46
 
45
- metadata = {'task_id': self.task_id, 'session_id': self.session_id, 'agent_id': self.agent_id}
46
- self.root_span = trace.span(id=self.task_run_id, name=root_span_name, input=task_message,metadata=metadata)
47
+ metadata = {
48
+ 'task_id' : self.task_id,
49
+ 'session_id' : self.session_id,
50
+ 'user_id' : self.user_id,
51
+ 'agent_id' : self.agent_id
52
+ }
53
+
54
+ self.root_span = trace.span(id=self.task_run_id,
55
+ name=f"{root_span_name}[{self.task_no}]",
56
+ input=task_input,
57
+ metadata=metadata)
47
58
  self.root_span_name = root_span_name
48
59
 
60
+ logging.info(f"{root_span_name} TASK_INPUT: {task_input}")
49
61
 
50
62
  def end_root_span(self, root_span_name:str, output: Optional[XGATaskResult]=None):
51
63
  if self.root_span and self.root_span_name == root_span_name:
@@ -55,7 +67,7 @@ class XGATaskLangFuse:
55
67
 
56
68
 
57
69
  def create_llm_langfuse_meta(self, llm_count:int)-> LangfuseMetadata:
58
- generation_name = f"xga_task_engine_llm_completion[{self.task_no}]({llm_count})"
70
+ generation_name = f"xga_engine_llm_completion[{self.task_no}]({llm_count})"
59
71
  generation_id = f"{self.task_run_id}({llm_count})"
60
72
  return LangfuseMetadata(
61
73
  generation_name = generation_name,
@@ -14,7 +14,7 @@ langfuse = setup_langfuse()
14
14
  def get_user_message(question)-> str:
15
15
  while True:
16
16
  user_message = input(f"\n💬 {question}: ")
17
- if user_message.lower() == 'exit':
17
+ if user_message.lower() == 'exit' or user_message.lower() == 'quit':
18
18
  print("\n====== Extreme General Agent Engine CLI EXIT ======")
19
19
  sys.exit()
20
20
 
@@ -44,7 +44,7 @@ async def cli() -> None:
44
44
  general_tools = ["*"]
45
45
 
46
46
  while True:
47
- user_message = get_user_message("Enter your message (or 'exit' to quit)")
47
+ user_message = get_user_message("Enter your task input message (or 'exit' to quit)")
48
48
 
49
49
  print("\n🔄 Running XGA Engine ...\n")
50
50
  engine = XGATaskEngine(tool_box=tool_box,
@@ -54,27 +54,24 @@ async def cli() -> None:
54
54
 
55
55
  # Two task run in same langfuse trace
56
56
  trace_id = langfuse.trace(name="xgae_cli").trace_id
57
-
58
- final_result = await engine.run_task_with_final_answer(
59
- task_message={'role': "user", 'content': user_message},
60
- trace_id=trace_id
61
- )
62
-
63
- if final_result["type"] == "ask":
64
- await asyncio.sleep(1)
65
- print(f"\n📌 ASK INFO: {final_result['content']}")
66
- user_message = get_user_message("Enter ASK information (or 'exit' to quit)")
57
+ auto_continue = True
58
+ while auto_continue:
59
+ auto_continue = False
67
60
  final_result = await engine.run_task_with_final_answer(
68
- task_message={'role': "user", 'content': user_message},
61
+ task_input={'role': "user", 'content': user_message},
69
62
  trace_id=trace_id
70
63
  )
71
64
 
72
- await asyncio.sleep(1)
73
- result_prefix = "✅" if final_result["type"] == "answer" else "❌"
74
- if final_result["type"] == "ask":
75
- print("\n *** IMPORTANT: XGA CLI only support showing ONE TURN ASK !")
76
- result_prefix = "⚠️"
77
- print(f"\n {result_prefix} FINAL RESULT: {final_result['content']}")
65
+ if final_result["type"] == "ask":
66
+ await asyncio.sleep(1)
67
+ print(f"\n📌 ASK INFO: {final_result['content']}")
68
+ user_message = get_user_message("Enter ASK information (or 'exit' to quit)")
69
+ auto_continue = True
70
+ continue
71
+
72
+ await asyncio.sleep(1)
73
+ result_prefix = "✅" if final_result["type"] == "answer" else "❌"
74
+ print(f"\n {result_prefix} FINAL RESULT: {final_result['content']}")
78
75
 
79
76
 
80
77
  def main():
@@ -10,7 +10,7 @@ mcp = FastMCP(name="XGAE Message Tools")
10
10
  @mcp.tool(
11
11
  description="""A special tool to indicate you have completed all tasks and are about to enter complete state. Use ONLY when: 1) All tasks in todo.md are marked complete [x], 2) The user's original request has been fully addressed, 3) There are no pending actions or follow-ups required, 4) You've delivered all final outputs and results to the user. IMPORTANT: This is the ONLY way to properly terminate execution. Never use this tool unless ALL tasks are complete and verified. Always ensure you've provided all necessary outputs and references before using this tool. Include relevant attachments when the completion relates to specific files or resources."""
12
12
  )
13
- async def complete(task_id: str,
13
+ def complete(task_id: str,
14
14
  text: Annotated[Optional[str], Field(default=None,
15
15
  description="Completion summary. Include: 1) Task summary 2) Key deliverables 3) Next steps 4) Impact achieved")],
16
16
  attachments: Annotated[Optional[str], Field(default=None,
@@ -23,7 +23,7 @@ async def complete(task_id: str,
23
23
  @mcp.tool(
24
24
  description="""Ask user a question and wait for response. Use for: 1) Requesting clarification on ambiguous requirements, 2) Seeking confirmation before proceeding with high-impact changes, 3) Gathering additional information needed to complete a task, 4) Offering options and requesting user preference, 5) Validating assumptions when critical to task success, 6) When encountering unclear or ambiguous results during task execution, 7) When tool results don't match expectations, 8) For natural conversation and follow-up questions, 9) When research reveals multiple entities with the same name, 10) When user requirements are unclear or could be interpreted differently. IMPORTANT: Use this tool when user input is essential to proceed. Always provide clear context and options when applicable. Use natural, conversational language that feels like talking with a helpful friend. Include relevant attachments when the question relates to specific files or resources. CRITICAL: When you discover ambiguity (like multiple people with the same name), immediately stop and ask for clarification rather than making assumptions."""
25
25
  )
26
- async def ask(task_id: str,
26
+ def ask(task_id: str,
27
27
  text: Annotated[str, Field(
28
28
  description="Question text to present to user. Include: 1) Clear question/request 2) Context why input is needed 3) Available options 4) Impact of choices 5) Relevant constraints")],
29
29
  attachments: Annotated[Optional[str], Field(default=None,
@@ -36,7 +36,7 @@ async def ask(task_id: str,
36
36
  @mcp.tool(
37
37
  description="end task, destroy sandbox"
38
38
  )
39
- async def end_task(task_id: str) :
39
+ def end_task(task_id: str) :
40
40
  print(f"<XGAETools-end_task> task_id: {task_id}")
41
41
  return XGAToolResult(success=True, output="")
42
42
 
@@ -1,10 +1,11 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: xgae
3
- Version: 0.1.17
3
+ Version: 0.1.19
4
4
  Summary: Extreme General Agent Engine
5
5
  Requires-Python: >=3.13
6
6
  Requires-Dist: colorlog==6.9.0
7
7
  Requires-Dist: langchain-mcp-adapters==0.1.9
8
+ Requires-Dist: langchain==0.3.27
8
9
  Requires-Dist: langfuse==2.60.9
9
10
  Requires-Dist: langgraph==0.6.5
10
11
  Requires-Dist: litellm==1.74.15
@@ -0,0 +1,21 @@
1
+ xgae/__init__.py,sha256=oBX_YzTliM-343BAlR-sD7BUZmsCJ7PY2oYrGBhsdLM,79
2
+ xgae/engine_cli_app.py,sha256=FdmIpq8KDsgyZNfwCDgNX7FEZFeRFyGOt_H1oZF8aKs,2890
3
+ xgae/engine/engine_base.py,sha256=RR1em2wHiM2jP-peHt77SKdHWjnYOjdIIzN93zT61cA,1715
4
+ xgae/engine/mcp_tool_box.py,sha256=G4hKIMguwg1cO4Us2NMfdloYim8kuikVyVTIPucJr7o,10903
5
+ xgae/engine/prompt_builder.py,sha256=6I5rjgvNJ27QJ8DDuBTplutoPZdGs9LYFv3TSgT7zmc,5045
6
+ xgae/engine/task_engine.py,sha256=7B7nbERTh9HDgDH4lXS_s4KTsVU41AgR7rnN4jKLozQ,21588
7
+ xgae/engine/task_langfuse.py,sha256=J9suOhlIGSJor-mZsggPZf3XQld9dTkfpHAS2fVirvM,2859
8
+ xgae/engine/responser/non_stream_responser.py,sha256=zEJjqCgZVe2B8gkHYRFU7tmBV834f7w2a4Ws25P1N-c,5289
9
+ xgae/engine/responser/responser_base.py,sha256=jhl1Bdz1Fs3KofGEymThNXlQuCORFTTkTAR_U47krds,24403
10
+ xgae/engine/responser/stream_responser.py,sha256=cv4UGcxj8OksEogW7DUGTCvSJabu-DF6GceFyUwaXI4,7627
11
+ xgae/tools/without_general_tools_app.py,sha256=KqsdhxD3hvTpiygaGUVHysRFjvv_1A8zOwMKN1J0J0U,3821
12
+ xgae/utils/__init__.py,sha256=ElaGS-zdeZeu6is41u3Ny7lkvhg7BDSK-jMNg9j6K5A,499
13
+ xgae/utils/json_helpers.py,sha256=WD4G5U9Dh8N6J9O0L5wGyqj-NHi09kcXHGdLD_26nlc,3607
14
+ xgae/utils/llm_client.py,sha256=mWRtvtSMk_8NuzFReT9x52ayHlCNVZMZAltD6TQ-xZ8,14404
15
+ xgae/utils/misc.py,sha256=aMWOvJ9VW52q-L9Lkjl1hvXqLwpJAmyxA-Z8jzqFG0U,907
16
+ xgae/utils/setup_env.py,sha256=MqNG0c2QQBDFU1kI8frxr9kB5d08Mmi3QZ1OoorgIa0,2662
17
+ xgae/utils/xml_tool_parser.py,sha256=Mb0d8kBrfyAEvUwW1Nqir-3BgxZRr0ZX3WymQouuFSo,4859
18
+ xgae-0.1.19.dist-info/METADATA,sha256=MHUqZkPIWB9Tn_sLyFkFBw4XZKPAQqC_z0QudI1s5hk,343
19
+ xgae-0.1.19.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
20
+ xgae-0.1.19.dist-info/entry_points.txt,sha256=wmvgtMQbtzTbDPETS-tbQJD7jVlcs4hp0w6wOB0ooCc,229
21
+ xgae-0.1.19.dist-info/RECORD,,
@@ -1,4 +1,5 @@
1
1
  [console_scripts]
2
+ example-a2a-tools = examples.tools.simu_a2a_tools_app:main
2
3
  example-fault-tools = examples.tools.custom_fault_tools_app:main
3
- xgae = xgae.cli_app:main
4
+ xgae = xgae.engine_cli_app:main
4
5
  xgae-tools = xgae.tools.without_general_tools_app:main
@@ -1,21 +0,0 @@
1
- xgae/__init__.py,sha256=OEUd9y9AoGBd3xYerdTTpz9xl4NWkmXeq1a2eil7Qro,72
2
- xgae/cli_app.py,sha256=ieTaS0b532P_8g9Mz2xda8TOZwYD2hKGnNZiarADAM0,3000
3
- xgae/engine/engine_base.py,sha256=-QZqLRbQdwRUfbY4l3i7dFfMB-BL267a-wGZR9bMPLc,1662
4
- xgae/engine/mcp_tool_box.py,sha256=iAWUWP_goHAEeYYMUvTKiMkR2VkEOoRtNmjLV9HaIUg,10415
5
- xgae/engine/prompt_builder.py,sha256=dqv0xcB-UWQhqISbMCYCTM1ANtthY6xUe7sJ9vPRqQ4,4364
6
- xgae/engine/task_engine.py,sha256=oEyDORfDqHn7MzodeLrbhITd8TIIZrDIouF09j0twf0,20940
7
- xgae/engine/task_langfuse.py,sha256=n2bajsHq2Zt3jetel8cSlN2lo42mZgTmbR4Zbx9JvsM,2416
8
- xgae/engine/responser/non_stream_responser.py,sha256=HkmeFBIoxORhnFVh6XT0i6ixfF5vNzvN7B_BP6FzPVM,5334
9
- xgae/engine/responser/responser_base.py,sha256=eQ4E1p_PoQISrIOLmjapGRH_RRX-7LOY1P2SrDjvyTM,24624
10
- xgae/engine/responser/stream_responser.py,sha256=P6IkPniGli8XNq_BVciHeMCJiE0k3lNokTvX1GqRRbc,8046
11
- xgae/tools/without_general_tools_app.py,sha256=H3JrwA0u7BIKW69yYBPLDpPesklY5quQIMaSVyvJ4s8,3839
12
- xgae/utils/__init__.py,sha256=ElaGS-zdeZeu6is41u3Ny7lkvhg7BDSK-jMNg9j6K5A,499
13
- xgae/utils/json_helpers.py,sha256=WD4G5U9Dh8N6J9O0L5wGyqj-NHi09kcXHGdLD_26nlc,3607
14
- xgae/utils/llm_client.py,sha256=mWRtvtSMk_8NuzFReT9x52ayHlCNVZMZAltD6TQ-xZ8,14404
15
- xgae/utils/misc.py,sha256=aMWOvJ9VW52q-L9Lkjl1hvXqLwpJAmyxA-Z8jzqFG0U,907
16
- xgae/utils/setup_env.py,sha256=MqNG0c2QQBDFU1kI8frxr9kB5d08Mmi3QZ1OoorgIa0,2662
17
- xgae/utils/xml_tool_parser.py,sha256=Mb0d8kBrfyAEvUwW1Nqir-3BgxZRr0ZX3WymQouuFSo,4859
18
- xgae-0.1.17.dist-info/METADATA,sha256=mdr1LUj6EQejsp3BWcl8_RKvtg5kLwpS4jlW-wi2_1k,310
19
- xgae-0.1.17.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
20
- xgae-0.1.17.dist-info/entry_points.txt,sha256=SWN01JNAncV0oApEvFzpH0wsXfnFlB1adCH4IrAJxGc,163
21
- xgae-0.1.17.dist-info/RECORD,,
File without changes