xgae 0.1.3__tar.gz → 0.1.4__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of xgae might be problematic. Click here for more details.

Files changed (39) hide show
  1. {xgae-0.1.3 → xgae-0.1.4}/.idea/workspace.xml +6 -3
  2. {xgae-0.1.3 → xgae-0.1.4}/PKG-INFO +1 -1
  3. {xgae-0.1.3 → xgae-0.1.4}/pyproject.toml +1 -1
  4. xgae-0.1.4/src/xgae/engine/responser/xga_non_stream_responser.py +213 -0
  5. xgae-0.1.4/src/xgae/engine/responser/xga_responser_base.py +751 -0
  6. xgae-0.1.4/src/xgae/engine/responser/xga_stream_responser.py +787 -0
  7. {xgae-0.1.3 → xgae-0.1.4}/src/xgae/engine/xga_base.py +21 -9
  8. xgae-0.1.4/src/xgae/engine/xga_engine.py +278 -0
  9. {xgae-0.1.3 → xgae-0.1.4}/src/xgae/engine/xga_mcp_tool_box.py +6 -0
  10. xgae-0.1.4/src/xgae/utils/json_helpers.py +174 -0
  11. {xgae-0.1.3 → xgae-0.1.4}/src/xgae/utils/llm_client.py +17 -6
  12. {xgae-0.1.3 → xgae-0.1.4}/src/xgae/utils/setup_env.py +1 -4
  13. xgae-0.1.4/src/xgae/utils/xml_tool_parser.py +236 -0
  14. xgae-0.1.3/src/xgae/engine/responser/xga_non_stream_responser.py +0 -0
  15. xgae-0.1.3/src/xgae/engine/responser/xga_responser_utils.py +0 -0
  16. xgae-0.1.3/src/xgae/engine/responser/xga_stream_reponser.py +0 -0
  17. xgae-0.1.3/src/xgae/engine/xga_engine.py +0 -99
  18. {xgae-0.1.3 → xgae-0.1.4}/.env +0 -0
  19. {xgae-0.1.3 → xgae-0.1.4}/.idea/.gitignore +0 -0
  20. {xgae-0.1.3 → xgae-0.1.4}/.idea/inspectionProfiles/Project_Default.xml +0 -0
  21. {xgae-0.1.3 → xgae-0.1.4}/.idea/inspectionProfiles/profiles_settings.xml +0 -0
  22. {xgae-0.1.3 → xgae-0.1.4}/.idea/misc.xml +0 -0
  23. {xgae-0.1.3 → xgae-0.1.4}/.idea/modules.xml +0 -0
  24. {xgae-0.1.3 → xgae-0.1.4}/.idea/vcs.xml +0 -0
  25. {xgae-0.1.3 → xgae-0.1.4}/.idea/xgae.iml +0 -0
  26. {xgae-0.1.3 → xgae-0.1.4}/.python-version +0 -0
  27. {xgae-0.1.3 → xgae-0.1.4}/README.md +0 -0
  28. {xgae-0.1.3 → xgae-0.1.4}/mcpservers/custom_servers.json +0 -0
  29. {xgae-0.1.3 → xgae-0.1.4}/mcpservers/xga_server.json +0 -0
  30. {xgae-0.1.3 → xgae-0.1.4}/src/xgae/__init__.py +0 -0
  31. {xgae-0.1.3 → xgae-0.1.4}/src/xgae/engine/xga_prompt_builder.py +0 -0
  32. {xgae-0.1.3 → xgae-0.1.4}/src/xgae/utils/utils.py +0 -0
  33. {xgae-0.1.3 → xgae-0.1.4}/templates/custom_tool_prompt_template.txt +0 -0
  34. {xgae-0.1.3 → xgae-0.1.4}/templates/gemini_system_prompt_template.txt +0 -0
  35. {xgae-0.1.3 → xgae-0.1.4}/templates/general_tool_prompt_template.txt +0 -0
  36. {xgae-0.1.3 → xgae-0.1.4}/templates/scp_test_prompt.txt +0 -0
  37. {xgae-0.1.3 → xgae-0.1.4}/templates/system_prompt_response_sample.txt +0 -0
  38. {xgae-0.1.3 → xgae-0.1.4}/templates/system_prompt_template.txt +0 -0
  39. {xgae-0.1.3 → xgae-0.1.4}/uv.lock +0 -0
@@ -148,7 +148,10 @@
148
148
  <workItem from="1755525531052" duration="13030000" />
149
149
  <workItem from="1755585869510" duration="5796000" />
150
150
  <workItem from="1755593112104" duration="786000" />
151
- <workItem from="1755611972189" duration="6282000" />
151
+ <workItem from="1755611972189" duration="13340000" />
152
+ <workItem from="1755668525673" duration="14877000" />
153
+ <workItem from="1755700523844" duration="24000" />
154
+ <workItem from="1755737435202" duration="24716000" />
152
155
  </task>
153
156
  <servers />
154
157
  </component>
@@ -169,9 +172,9 @@
169
172
  <component name="com.intellij.coverage.CoverageDataManagerImpl">
170
173
  <SUITE FILE_PATH="coverage/xgae$xga_engine.coverage" NAME="xga_engine Coverage Results" MODIFIED="1755580277172" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="false" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$" />
171
174
  <SUITE FILE_PATH="coverage/xgae$xga_prompt_builder.coverage" NAME="xga_prompt_builder Coverage Results" MODIFIED="1755587456555" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="false" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$" />
172
- <SUITE FILE_PATH="coverage/xgae$run_xga_engine.coverage" NAME="run_xga_engine Coverage Results" MODIFIED="1755657339432" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="false" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$" />
175
+ <SUITE FILE_PATH="coverage/xgae$run_xga_engine.coverage" NAME="run_xga_engine Coverage Results" MODIFIED="1755768463510" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="false" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$" />
173
176
  <SUITE FILE_PATH="coverage/xgae$utils.coverage" NAME="utils Coverage Results" MODIFIED="1755226923439" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="false" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$" />
174
- <SUITE FILE_PATH="coverage/xgae$setup_env.coverage" NAME="setup_env Coverage Results" MODIFIED="1755565531587" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="false" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$" />
177
+ <SUITE FILE_PATH="coverage/xgae$setup_env.coverage" NAME="setup_env Coverage Results" MODIFIED="1755657717310" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="false" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$" />
175
178
  <SUITE FILE_PATH="coverage/xgae$xga_mcp_tool_box.coverage" NAME="xga_mcp_tool_box Coverage Results" MODIFIED="1755583099719" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="false" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$" />
176
179
  <SUITE FILE_PATH="coverage/xgae$llm_client.coverage" NAME="llm_client Coverage Results" MODIFIED="1755565705235" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="false" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$" />
177
180
  </component>
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: xgae
3
- Version: 0.1.3
3
+ Version: 0.1.4
4
4
  Summary: Extreme General Agent Engine
5
5
  Requires-Python: >=3.13
6
6
  Requires-Dist: colorlog>=6.9.0
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "xgae"
3
- version = "0.1.3"
3
+ version = "0.1.4"
4
4
  description = "Extreme General Agent Engine"
5
5
  readme = "README.md"
6
6
  requires-python = ">=3.13"
@@ -0,0 +1,213 @@
1
+ import uuid
2
+ import logging
3
+
4
+ from typing import List, Dict, Any, AsyncGenerator, override,Optional
5
+
6
+ from xgae.engine.responser.xga_responser_base import TaskResponseProcessor, TaskResponseContext, TaskRunContinuousState
7
+
8
+ from xgae.utils.json_helpers import (
9
+ safe_json_parse,
10
+ to_json_string, format_for_yield
11
+ )
12
+
13
+ class NonStreamTaskResponser(TaskResponseProcessor):
14
+ def __init__(self, response_context: TaskResponseContext):
15
+ super().__init__(response_context)
16
+
17
+ @override
18
+ async def process_response(self,
19
+ llm_response: Any,
20
+ prompt_messages: List[Dict[str, Any]],
21
+ continuous_state: Optional[TaskRunContinuousState] = None) -> AsyncGenerator[Dict[str, Any], None]:
22
+ content = ""
23
+ all_tool_data = [] # Stores {'tool_call': ..., 'parsing_details': ...}
24
+ tool_index = 0
25
+ assistant_message_object = None
26
+ tool_result_message_objects = {}
27
+ finish_reason = None
28
+
29
+ native_tool_calls_for_message = []
30
+ llm_model = self.response_context.get("model_name")
31
+ thread_id = self.response_context.get("task_id")
32
+ thread_run_id = self.response_context.get("task_run_id")
33
+ tool_execution_strategy = self.response_context.get("tool_execution_strategy", "parallel")
34
+ xml_adding_strategy = self.response_context.get("xml_adding_strategy", "user_message")
35
+ max_xml_tool_calls = self.response_context.get("max_xml_tool_calls", 0)
36
+ try:
37
+ # Save and Yield thread_run_start status message
38
+ start_content = {"status_type": "thread_run_start", "thread_run_id": thread_run_id}
39
+ start_msg_obj = self.add_message(
40
+ type="status", content=start_content,
41
+ is_llm_message=False, metadata={"thread_run_id": thread_run_id}
42
+ )
43
+ if start_msg_obj: yield format_for_yield(start_msg_obj)
44
+
45
+ # Extract finish_reason, content, tool calls
46
+ if hasattr(llm_response, 'choices') and llm_response.choices:
47
+ if hasattr(llm_response.choices[0], 'finish_reason'):
48
+ finish_reason = llm_response.choices[0].finish_reason
49
+ logging.info(f"Non-streaming finish_reason: {finish_reason}")
50
+ self.trace.event(name="non_streaming_finish_reason", level="DEFAULT",
51
+ status_message=(f"Non-streaming finish_reason: {finish_reason}"))
52
+ response_message = llm_response.choices[0].message if hasattr(llm_response.choices[0],
53
+ 'message') else None
54
+ if response_message:
55
+ if hasattr(response_message, 'content') and response_message.content:
56
+ content = response_message.content
57
+
58
+ parsed_xml_data = self._parse_xml_tool_calls(content)
59
+ if max_xml_tool_calls > 0 and len(parsed_xml_data) > max_xml_tool_calls:
60
+ # Truncate content and tool data if limit exceeded
61
+ # ... (Truncation logic similar to streaming) ...
62
+ if parsed_xml_data:
63
+ xml_chunks = self._extract_xml_chunks(content)[:max_xml_tool_calls]
64
+ if xml_chunks:
65
+ last_chunk = xml_chunks[-1]
66
+ last_chunk_pos = content.find(last_chunk)
67
+ if last_chunk_pos >= 0: content = content[:last_chunk_pos + len(last_chunk)]
68
+ parsed_xml_data = parsed_xml_data[:max_xml_tool_calls]
69
+ finish_reason = "xml_tool_limit_reached"
70
+ all_tool_data.extend(parsed_xml_data)
71
+
72
+ # if config.native_tool_calling and hasattr(response_message,
73
+ # 'tool_calls') and response_message.tool_calls:
74
+ # for tool_call in response_message.tool_calls:
75
+ # if hasattr(tool_call, 'function'):
76
+ # exec_tool_call = {
77
+ # "function_name": tool_call.function.name,
78
+ # "arguments": safe_json_parse(tool_call.function.arguments) if isinstance(
79
+ # tool_call.function.arguments, str) else tool_call.function.arguments,
80
+ # "id": tool_call.id if hasattr(tool_call, 'id') else str(uuid.uuid4())
81
+ # }
82
+ # all_tool_data.append({"tool_call": exec_tool_call, "parsing_details": None})
83
+ # native_tool_calls_for_message.append({
84
+ # "id": exec_tool_call["id"], "type": "function",
85
+ # "function": {
86
+ # "name": tool_call.function.name,
87
+ # "arguments": tool_call.function.arguments if isinstance(
88
+ # tool_call.function.arguments, str) else to_json_string(
89
+ # tool_call.function.arguments)
90
+ # }
91
+ # })
92
+
93
+ # --- SAVE and YIELD Final Assistant Message ---
94
+ message_data = {"role": "assistant", "content": content,
95
+ "tool_calls": native_tool_calls_for_message or None}
96
+ assistant_message_object = self._add_message_with_agent_info(type="assistant", content=message_data,
97
+ is_llm_message=True, metadata={"thread_run_id": thread_run_id}
98
+ )
99
+ if assistant_message_object:
100
+ yield assistant_message_object
101
+ else:
102
+ logging.error(f"Failed to save non-streaming assistant message for thread {thread_id}")
103
+ self.trace.event(name="failed_to_save_non_streaming_assistant_message_for_thread", level="ERROR",
104
+ status_message=(
105
+ f"Failed to save non-streaming assistant message for thread {thread_id}"))
106
+ err_content = {"role": "system", "status_type": "error", "message": "Failed to save assistant message"}
107
+ err_msg_obj = self.add_message(
108
+ type="status", content=err_content,
109
+ is_llm_message=False, metadata={"thread_run_id": thread_run_id}
110
+ )
111
+ if err_msg_obj: yield format_for_yield(err_msg_obj)
112
+
113
+ # --- Execute Tools and Yield Results ---
114
+ tool_calls_to_execute = [item['tool_call'] for item in all_tool_data]
115
+ if tool_calls_to_execute:
116
+ logging.info(
117
+ f"Executing {len(tool_calls_to_execute)} tools with strategy: {tool_execution_strategy}")
118
+ self.trace.event(name="executing_tools_with_strategy", level="DEFAULT", status_message=(
119
+ f"Executing {len(tool_calls_to_execute)} tools with strategy: {tool_execution_strategy}"))
120
+ tool_results = await self._execute_tools(tool_calls_to_execute, tool_execution_strategy)
121
+
122
+ for i, (returned_tool_call, result) in enumerate(tool_results):
123
+ original_data = all_tool_data[i]
124
+ tool_call_from_data = original_data['tool_call']
125
+ parsing_details = original_data['parsing_details']
126
+ current_assistant_id = assistant_message_object['message_id'] if assistant_message_object else None
127
+
128
+ context = self._create_tool_context(
129
+ tool_call_from_data, tool_index, current_assistant_id, parsing_details
130
+ )
131
+ context.result = result
132
+
133
+ # Save and Yield start status
134
+ started_msg_obj = self._yield_and_save_tool_started(context, thread_id, thread_run_id)
135
+ if started_msg_obj: yield format_for_yield(started_msg_obj)
136
+
137
+ # Save tool result
138
+ saved_tool_result_object = self._add_tool_result(
139
+ thread_id, tool_call_from_data, result, xml_adding_strategy,
140
+ current_assistant_id, parsing_details
141
+ )
142
+
143
+ # Save and Yield completed/failed status
144
+ completed_msg_obj = self._yield_and_save_tool_completed(
145
+ context,
146
+ saved_tool_result_object['message_id'] if saved_tool_result_object else None,
147
+ thread_id, thread_run_id
148
+ )
149
+ if completed_msg_obj: yield format_for_yield(completed_msg_obj)
150
+
151
+ # Yield the saved tool result object
152
+ if saved_tool_result_object:
153
+ tool_result_message_objects[tool_index] = saved_tool_result_object
154
+ yield format_for_yield(saved_tool_result_object)
155
+ else:
156
+ logging.error(f"Failed to save tool result for index {tool_index}")
157
+ self.trace.event(name="failed_to_save_tool_result_for_index", level="ERROR",
158
+ status_message=(f"Failed to save tool result for index {tool_index}"))
159
+
160
+ tool_index += 1
161
+
162
+ # --- Save and Yield Final Status ---
163
+ if finish_reason:
164
+ finish_content = {"status_type": "finish", "finish_reason": finish_reason}
165
+ finish_msg_obj = self.add_message(
166
+ type="status", content=finish_content,
167
+ is_llm_message=False, metadata={"thread_run_id": thread_run_id}
168
+ )
169
+ if finish_msg_obj: yield format_for_yield(finish_msg_obj)
170
+
171
+ # --- Save and Yield assistant_response_end ---
172
+ if assistant_message_object: # Only save if assistant message was saved
173
+ try:
174
+ # Save the full LiteLLM response object directly in content
175
+ self.add_message(
176
+ type="assistant_response_end",
177
+ content=llm_response,
178
+ is_llm_message=False,
179
+ metadata={"thread_run_id": thread_run_id}
180
+ )
181
+ logging.info("Assistant response end saved for non-stream")
182
+ except Exception as e:
183
+ logging.error(f"Error saving assistant response end for non-stream: {str(e)}")
184
+ self.trace.event(name="error_saving_assistant_response_end_for_non_stream", level="ERROR",
185
+ status_message=(f"Error saving assistant response end for non-stream: {str(e)}"))
186
+
187
+ except Exception as e:
188
+ logging.error(f"Error processing non-streaming response: {str(e)}", exc_info=True)
189
+ self.trace.event(name="error_processing_non_streaming_response", level="ERROR",
190
+ status_message=(f"Error processing non-streaming response: {str(e)}"))
191
+ # Save and yield error status
192
+ err_content = {"role": "system", "status_type": "error", "message": str(e)}
193
+ err_msg_obj = self.add_message(
194
+ type="status", content=err_content,
195
+ is_llm_message=False, metadata={"thread_run_id": thread_run_id if 'thread_run_id' in locals() else None}
196
+ )
197
+ if err_msg_obj: yield format_for_yield(err_msg_obj)
198
+
199
+ # Re-raise the same exception (not a new one) to ensure proper error propagation
200
+ logging.critical(f"Re-raising error to stop further processing: {str(e)}")
201
+ self.trace.event(name="re_raising_error_to_stop_further_processing", level="CRITICAL",
202
+ status_message=(f"Re-raising error to stop further processing: {str(e)}"))
203
+ raise # Use bare 'raise' to preserve the original exception with its traceback
204
+
205
+ finally:
206
+ # Save and Yield the final thread_run_end status
207
+ end_content = {"status_type": "thread_run_end"}
208
+ end_msg_obj = self.add_message(
209
+ type="status", content=end_content,
210
+ is_llm_message=False, metadata={"thread_run_id": thread_run_id if 'thread_run_id' in locals() else None}
211
+ )
212
+ if end_msg_obj: yield format_for_yield(end_msg_obj)
213
+