massgen 0.1.0a3__py3-none-any.whl → 0.1.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of massgen might be problematic. Click here for more details.
- massgen/__init__.py +1 -1
- massgen/agent_config.py +17 -0
- massgen/api_params_handler/_api_params_handler_base.py +1 -0
- massgen/api_params_handler/_chat_completions_api_params_handler.py +15 -2
- massgen/api_params_handler/_claude_api_params_handler.py +8 -1
- massgen/api_params_handler/_gemini_api_params_handler.py +73 -0
- massgen/api_params_handler/_response_api_params_handler.py +8 -1
- massgen/backend/base.py +83 -0
- massgen/backend/{base_with_mcp.py → base_with_custom_tool_and_mcp.py} +286 -15
- massgen/backend/capabilities.py +6 -6
- massgen/backend/chat_completions.py +200 -103
- massgen/backend/claude.py +115 -18
- massgen/backend/claude_code.py +378 -14
- massgen/backend/docs/CLAUDE_API_RESEARCH.md +3 -3
- massgen/backend/gemini.py +1333 -1629
- massgen/backend/gemini_mcp_manager.py +545 -0
- massgen/backend/gemini_trackers.py +344 -0
- massgen/backend/gemini_utils.py +43 -0
- massgen/backend/grok.py +39 -6
- massgen/backend/response.py +147 -81
- massgen/cli.py +605 -110
- massgen/config_builder.py +376 -27
- massgen/configs/README.md +123 -80
- massgen/configs/basic/multi/three_agents_default.yaml +3 -3
- massgen/configs/basic/single/single_agent.yaml +1 -1
- massgen/configs/providers/openai/gpt5_nano.yaml +3 -3
- massgen/configs/tools/custom_tools/claude_code_custom_tool_example.yaml +32 -0
- massgen/configs/tools/custom_tools/claude_code_custom_tool_example_no_path.yaml +28 -0
- massgen/configs/tools/custom_tools/claude_code_custom_tool_with_mcp_example.yaml +40 -0
- massgen/configs/tools/custom_tools/claude_code_custom_tool_with_wrong_mcp_example.yaml +38 -0
- massgen/configs/tools/custom_tools/claude_code_wrong_custom_tool_with_mcp_example.yaml +38 -0
- massgen/configs/tools/custom_tools/claude_custom_tool_example.yaml +24 -0
- massgen/configs/tools/custom_tools/claude_custom_tool_example_no_path.yaml +22 -0
- massgen/configs/tools/custom_tools/claude_custom_tool_with_mcp_example.yaml +35 -0
- massgen/configs/tools/custom_tools/claude_custom_tool_with_wrong_mcp_example.yaml +33 -0
- massgen/configs/tools/custom_tools/claude_wrong_custom_tool_with_mcp_example.yaml +33 -0
- massgen/configs/tools/custom_tools/gemini_custom_tool_example.yaml +24 -0
- massgen/configs/tools/custom_tools/gemini_custom_tool_example_no_path.yaml +22 -0
- massgen/configs/tools/custom_tools/gemini_custom_tool_with_mcp_example.yaml +35 -0
- massgen/configs/tools/custom_tools/gemini_custom_tool_with_wrong_mcp_example.yaml +33 -0
- massgen/configs/tools/custom_tools/gemini_wrong_custom_tool_with_mcp_example.yaml +33 -0
- massgen/configs/tools/custom_tools/github_issue_market_analysis.yaml +94 -0
- massgen/configs/tools/custom_tools/gpt5_nano_custom_tool_example.yaml +24 -0
- massgen/configs/tools/custom_tools/gpt5_nano_custom_tool_example_no_path.yaml +22 -0
- massgen/configs/tools/custom_tools/gpt5_nano_custom_tool_with_mcp_example.yaml +35 -0
- massgen/configs/tools/custom_tools/gpt5_nano_custom_tool_with_wrong_mcp_example.yaml +33 -0
- massgen/configs/tools/custom_tools/gpt5_nano_wrong_custom_tool_with_mcp_example.yaml +33 -0
- massgen/configs/tools/custom_tools/gpt_oss_custom_tool_example.yaml +25 -0
- massgen/configs/tools/custom_tools/gpt_oss_custom_tool_example_no_path.yaml +23 -0
- massgen/configs/tools/custom_tools/gpt_oss_custom_tool_with_mcp_example.yaml +34 -0
- massgen/configs/tools/custom_tools/gpt_oss_custom_tool_with_wrong_mcp_example.yaml +34 -0
- massgen/configs/tools/custom_tools/gpt_oss_wrong_custom_tool_with_mcp_example.yaml +34 -0
- massgen/configs/tools/custom_tools/grok3_mini_custom_tool_example.yaml +24 -0
- massgen/configs/tools/custom_tools/grok3_mini_custom_tool_example_no_path.yaml +22 -0
- massgen/configs/tools/custom_tools/grok3_mini_custom_tool_with_mcp_example.yaml +35 -0
- massgen/configs/tools/custom_tools/grok3_mini_custom_tool_with_wrong_mcp_example.yaml +33 -0
- massgen/configs/tools/custom_tools/grok3_mini_wrong_custom_tool_with_mcp_example.yaml +33 -0
- massgen/configs/tools/custom_tools/qwen_api_custom_tool_example.yaml +25 -0
- massgen/configs/tools/custom_tools/qwen_api_custom_tool_example_no_path.yaml +23 -0
- massgen/configs/tools/custom_tools/qwen_api_custom_tool_with_mcp_example.yaml +36 -0
- massgen/configs/tools/custom_tools/qwen_api_custom_tool_with_wrong_mcp_example.yaml +34 -0
- massgen/configs/tools/custom_tools/qwen_api_wrong_custom_tool_with_mcp_example.yaml +34 -0
- massgen/configs/tools/custom_tools/qwen_local_custom_tool_example.yaml +24 -0
- massgen/configs/tools/custom_tools/qwen_local_custom_tool_example_no_path.yaml +22 -0
- massgen/configs/tools/custom_tools/qwen_local_custom_tool_with_mcp_example.yaml +35 -0
- massgen/configs/tools/custom_tools/qwen_local_custom_tool_with_wrong_mcp_example.yaml +33 -0
- massgen/configs/tools/custom_tools/qwen_local_wrong_custom_tool_with_mcp_example.yaml +33 -0
- massgen/configs/tools/filesystem/claude_code_context_sharing.yaml +1 -1
- massgen/configs/tools/planning/five_agents_discord_mcp_planning_mode.yaml +7 -29
- massgen/configs/tools/planning/five_agents_filesystem_mcp_planning_mode.yaml +5 -6
- massgen/configs/tools/planning/five_agents_notion_mcp_planning_mode.yaml +4 -4
- massgen/configs/tools/planning/five_agents_twitter_mcp_planning_mode.yaml +4 -4
- massgen/configs/tools/planning/gpt5_mini_case_study_mcp_planning_mode.yaml +2 -2
- massgen/configs/voting/gemini_gpt_voting_sensitivity.yaml +67 -0
- massgen/formatter/_chat_completions_formatter.py +104 -0
- massgen/formatter/_claude_formatter.py +120 -0
- massgen/formatter/_gemini_formatter.py +448 -0
- massgen/formatter/_response_formatter.py +88 -0
- massgen/frontend/coordination_ui.py +4 -2
- massgen/logger_config.py +35 -3
- massgen/message_templates.py +56 -6
- massgen/orchestrator.py +512 -16
- massgen/stream_chunk/base.py +3 -0
- massgen/tests/custom_tools_example.py +392 -0
- massgen/tests/mcp_test_server.py +17 -7
- massgen/tests/test_config_builder.py +423 -0
- massgen/tests/test_custom_tools.py +401 -0
- massgen/tests/test_intelligent_planning_mode.py +643 -0
- massgen/tests/test_tools.py +127 -0
- massgen/token_manager/token_manager.py +13 -4
- massgen/tool/README.md +935 -0
- massgen/tool/__init__.py +39 -0
- massgen/tool/_async_helpers.py +70 -0
- massgen/tool/_basic/__init__.py +8 -0
- massgen/tool/_basic/_two_num_tool.py +24 -0
- massgen/tool/_code_executors/__init__.py +10 -0
- massgen/tool/_code_executors/_python_executor.py +74 -0
- massgen/tool/_code_executors/_shell_executor.py +61 -0
- massgen/tool/_exceptions.py +39 -0
- massgen/tool/_file_handlers/__init__.py +10 -0
- massgen/tool/_file_handlers/_file_operations.py +218 -0
- massgen/tool/_manager.py +634 -0
- massgen/tool/_registered_tool.py +88 -0
- massgen/tool/_result.py +66 -0
- massgen/tool/_self_evolution/_github_issue_analyzer.py +369 -0
- massgen/tool/docs/builtin_tools.md +681 -0
- massgen/tool/docs/exceptions.md +794 -0
- massgen/tool/docs/execution_results.md +691 -0
- massgen/tool/docs/manager.md +887 -0
- massgen/tool/docs/workflow_toolkits.md +529 -0
- massgen/tool/workflow_toolkits/__init__.py +57 -0
- massgen/tool/workflow_toolkits/base.py +55 -0
- massgen/tool/workflow_toolkits/new_answer.py +126 -0
- massgen/tool/workflow_toolkits/vote.py +167 -0
- {massgen-0.1.0a3.dist-info → massgen-0.1.2.dist-info}/METADATA +87 -129
- {massgen-0.1.0a3.dist-info → massgen-0.1.2.dist-info}/RECORD +120 -44
- {massgen-0.1.0a3.dist-info → massgen-0.1.2.dist-info}/WHEEL +0 -0
- {massgen-0.1.0a3.dist-info → massgen-0.1.2.dist-info}/entry_points.txt +0 -0
- {massgen-0.1.0a3.dist-info → massgen-0.1.2.dist-info}/licenses/LICENSE +0 -0
- {massgen-0.1.0a3.dist-info → massgen-0.1.2.dist-info}/top_level.txt +0 -0
massgen/backend/response.py
CHANGED
|
@@ -23,16 +23,18 @@ from ..formatter import ResponseFormatter
|
|
|
23
23
|
from ..logger_config import log_backend_agent_message, log_stream_chunk, logger
|
|
24
24
|
from ..stream_chunk import ChunkType, TextStreamChunk
|
|
25
25
|
from .base import FilesystemSupport, StreamChunk
|
|
26
|
-
from .
|
|
26
|
+
from .base_with_custom_tool_and_mcp import CustomToolAndMCPBackend, UploadFileError
|
|
27
27
|
|
|
28
28
|
|
|
29
|
-
class ResponseBackend(
|
|
29
|
+
class ResponseBackend(CustomToolAndMCPBackend):
|
|
30
30
|
"""Backend using the standard Response API format with multimodal support."""
|
|
31
31
|
|
|
32
32
|
def __init__(self, api_key: Optional[str] = None, **kwargs):
|
|
33
33
|
super().__init__(api_key, **kwargs)
|
|
34
34
|
self.api_key = api_key or os.getenv("OPENAI_API_KEY")
|
|
35
35
|
self.formatter = ResponseFormatter()
|
|
36
|
+
|
|
37
|
+
# Initialize API params handler after custom_tool_manager
|
|
36
38
|
self.api_params_handler = ResponseAPIParamsHandler(self)
|
|
37
39
|
|
|
38
40
|
# Queue for pending image saves
|
|
@@ -88,7 +90,7 @@ class ResponseBackend(MCPBackend):
|
|
|
88
90
|
except Exception:
|
|
89
91
|
pass
|
|
90
92
|
|
|
91
|
-
async def
|
|
93
|
+
async def _stream_without_custom_and_mcp_tools(
|
|
92
94
|
self,
|
|
93
95
|
messages: List[Dict[str, Any]],
|
|
94
96
|
tools: List[Dict[str, Any]],
|
|
@@ -123,6 +125,8 @@ class ResponseBackend(MCPBackend):
|
|
|
123
125
|
name = tool.get("function", {}).get("name") if "function" in tool else tool.get("name")
|
|
124
126
|
if name and name in self._mcp_function_names:
|
|
125
127
|
continue
|
|
128
|
+
if name and name in self._custom_tool_names:
|
|
129
|
+
continue
|
|
126
130
|
elif tool.get("type") == "mcp":
|
|
127
131
|
continue
|
|
128
132
|
non_mcp_tools.append(tool)
|
|
@@ -133,7 +137,7 @@ class ResponseBackend(MCPBackend):
|
|
|
133
137
|
async for chunk in self._process_stream(stream, all_params, agent_id):
|
|
134
138
|
yield chunk
|
|
135
139
|
|
|
136
|
-
async def
|
|
140
|
+
async def _stream_with_custom_and_mcp_tools(
|
|
137
141
|
self,
|
|
138
142
|
current_messages: List[Dict[str, Any]],
|
|
139
143
|
tools: List[Dict[str, Any]],
|
|
@@ -218,16 +222,127 @@ class ResponseBackend(MCPBackend):
|
|
|
218
222
|
|
|
219
223
|
# Execute any captured function calls
|
|
220
224
|
if captured_function_calls and response_completed:
|
|
221
|
-
#
|
|
222
|
-
|
|
225
|
+
# Categorize function calls
|
|
226
|
+
mcp_calls = []
|
|
227
|
+
custom_calls = []
|
|
228
|
+
provider_calls = []
|
|
229
|
+
|
|
230
|
+
for call in captured_function_calls:
|
|
231
|
+
if call["name"] in self._mcp_functions:
|
|
232
|
+
mcp_calls.append(call)
|
|
233
|
+
elif call["name"] in self._custom_tool_names:
|
|
234
|
+
custom_calls.append(call)
|
|
235
|
+
else:
|
|
236
|
+
provider_calls.append(call)
|
|
223
237
|
|
|
224
|
-
|
|
225
|
-
|
|
238
|
+
# If there are provider calls (non-MCP, non-custom), let API handle them
|
|
239
|
+
if provider_calls:
|
|
240
|
+
logger.info(f"Provider function calls detected: {[call['name'] for call in provider_calls]}. Ending local processing.")
|
|
226
241
|
yield TextStreamChunk(type=ChunkType.DONE, source="response_api")
|
|
227
242
|
return
|
|
228
243
|
|
|
244
|
+
# Initialize for execution
|
|
245
|
+
functions_executed = False
|
|
246
|
+
updated_messages = current_messages.copy()
|
|
247
|
+
processed_call_ids = set() # Initialize processed_call_ids here
|
|
248
|
+
|
|
249
|
+
# Execute custom tools first
|
|
250
|
+
for call in custom_calls:
|
|
251
|
+
try:
|
|
252
|
+
# Yield custom tool call status
|
|
253
|
+
yield TextStreamChunk(
|
|
254
|
+
type=ChunkType.CUSTOM_TOOL_STATUS,
|
|
255
|
+
status="custom_tool_called",
|
|
256
|
+
content=f"🔧 [Custom Tool] Calling {call['name']}...",
|
|
257
|
+
source=f"custom_{call['name']}",
|
|
258
|
+
)
|
|
259
|
+
|
|
260
|
+
# Yield custom tool arguments (like MCP tools)
|
|
261
|
+
yield TextStreamChunk(
|
|
262
|
+
type=ChunkType.CUSTOM_TOOL_STATUS,
|
|
263
|
+
status="function_call",
|
|
264
|
+
content=f"Arguments for Calling {call['name']}: {call['arguments']}",
|
|
265
|
+
source=f"custom_{call['name']}",
|
|
266
|
+
)
|
|
267
|
+
|
|
268
|
+
# Execute custom tool
|
|
269
|
+
result = await self._execute_custom_tool(call)
|
|
270
|
+
|
|
271
|
+
# Add function call and result to messages
|
|
272
|
+
function_call_msg = {
|
|
273
|
+
"type": "function_call",
|
|
274
|
+
"call_id": call["call_id"],
|
|
275
|
+
"name": call["name"],
|
|
276
|
+
"arguments": call["arguments"],
|
|
277
|
+
}
|
|
278
|
+
updated_messages.append(function_call_msg)
|
|
279
|
+
|
|
280
|
+
function_output_msg = {
|
|
281
|
+
"type": "function_call_output",
|
|
282
|
+
"call_id": call["call_id"],
|
|
283
|
+
"output": str(result),
|
|
284
|
+
}
|
|
285
|
+
updated_messages.append(function_output_msg)
|
|
286
|
+
|
|
287
|
+
# Yield custom tool results (like MCP tools)
|
|
288
|
+
yield TextStreamChunk(
|
|
289
|
+
type=ChunkType.CUSTOM_TOOL_STATUS,
|
|
290
|
+
status="function_call_output",
|
|
291
|
+
content=f"Results for Calling {call['name']}: {str(result)}",
|
|
292
|
+
source=f"custom_{call['name']}",
|
|
293
|
+
)
|
|
294
|
+
|
|
295
|
+
# Yield custom tool response status
|
|
296
|
+
yield TextStreamChunk(
|
|
297
|
+
type=ChunkType.CUSTOM_TOOL_STATUS,
|
|
298
|
+
status="custom_tool_response",
|
|
299
|
+
content=f"✅ [Custom Tool] {call['name']} completed",
|
|
300
|
+
source=f"custom_{call['name']}",
|
|
301
|
+
)
|
|
302
|
+
|
|
303
|
+
processed_call_ids.add(call["call_id"])
|
|
304
|
+
functions_executed = True
|
|
305
|
+
logger.info(f"Executed custom tool: {call['name']}")
|
|
306
|
+
|
|
307
|
+
except Exception as e:
|
|
308
|
+
logger.error(f"Error executing custom tool {call['name']}: {e}")
|
|
309
|
+
error_msg = f"Error executing {call['name']}: {str(e)}"
|
|
310
|
+
|
|
311
|
+
# Yield error with arguments shown
|
|
312
|
+
yield TextStreamChunk(
|
|
313
|
+
type=ChunkType.CUSTOM_TOOL_STATUS,
|
|
314
|
+
status="function_call",
|
|
315
|
+
content=f"Arguments for Calling {call['name']}: {call['arguments']}",
|
|
316
|
+
source=f"custom_{call['name']}",
|
|
317
|
+
)
|
|
318
|
+
|
|
319
|
+
yield TextStreamChunk(
|
|
320
|
+
type=ChunkType.CUSTOM_TOOL_STATUS,
|
|
321
|
+
status="custom_tool_error",
|
|
322
|
+
content=f"❌ [Custom Tool Error] {error_msg}",
|
|
323
|
+
source=f"custom_{call['name']}",
|
|
324
|
+
)
|
|
325
|
+
|
|
326
|
+
# Add error result to messages
|
|
327
|
+
function_call_msg = {
|
|
328
|
+
"type": "function_call",
|
|
329
|
+
"call_id": call["call_id"],
|
|
330
|
+
"name": call["name"],
|
|
331
|
+
"arguments": call["arguments"],
|
|
332
|
+
}
|
|
333
|
+
updated_messages.append(function_call_msg)
|
|
334
|
+
|
|
335
|
+
error_output_msg = {
|
|
336
|
+
"type": "function_call_output",
|
|
337
|
+
"call_id": call["call_id"],
|
|
338
|
+
"output": error_msg,
|
|
339
|
+
}
|
|
340
|
+
updated_messages.append(error_output_msg)
|
|
341
|
+
processed_call_ids.add(call["call_id"])
|
|
342
|
+
functions_executed = True
|
|
343
|
+
|
|
229
344
|
# Check circuit breaker status before executing MCP functions
|
|
230
|
-
if not await super()._check_circuit_breaker_before_execution():
|
|
345
|
+
if mcp_calls and not await super()._check_circuit_breaker_before_execution():
|
|
231
346
|
logger.warning("All MCP servers blocked by circuit breaker")
|
|
232
347
|
yield TextStreamChunk(
|
|
233
348
|
type=ChunkType.MCP_STATUS,
|
|
@@ -238,26 +353,30 @@ class ResponseBackend(MCPBackend):
|
|
|
238
353
|
yield TextStreamChunk(type=ChunkType.DONE, source="response_api")
|
|
239
354
|
return
|
|
240
355
|
|
|
241
|
-
# Execute
|
|
356
|
+
# Execute MCP function calls
|
|
242
357
|
mcp_functions_executed = False
|
|
243
|
-
updated_messages = current_messages.copy()
|
|
244
358
|
|
|
245
|
-
# Check if planning mode is enabled - block MCP tool execution during planning
|
|
359
|
+
# Check if planning mode is enabled - selectively block MCP tool execution during planning
|
|
246
360
|
if self.is_planning_mode_enabled():
|
|
247
|
-
|
|
248
|
-
|
|
249
|
-
|
|
250
|
-
|
|
251
|
-
|
|
252
|
-
|
|
253
|
-
|
|
254
|
-
|
|
255
|
-
|
|
256
|
-
|
|
361
|
+
blocked_tools = self.get_planning_mode_blocked_tools()
|
|
362
|
+
|
|
363
|
+
if not blocked_tools:
|
|
364
|
+
# Empty set means block ALL MCP tools (backward compatible)
|
|
365
|
+
logger.info("[Response] Planning mode enabled - blocking ALL MCP tool execution")
|
|
366
|
+
yield StreamChunk(
|
|
367
|
+
type="mcp_status",
|
|
368
|
+
status="planning_mode_blocked",
|
|
369
|
+
content="🚫 [MCP] Planning mode active - all MCP tools blocked during coordination",
|
|
370
|
+
source="planning_mode",
|
|
371
|
+
)
|
|
372
|
+
# Skip all MCP tool execution but still continue with workflow
|
|
373
|
+
yield StreamChunk(type="done")
|
|
374
|
+
return
|
|
375
|
+
else:
|
|
376
|
+
# Selective blocking - log but continue to check each tool individually
|
|
377
|
+
logger.info(f"[Response] Planning mode enabled - selective blocking of {len(blocked_tools)} tools")
|
|
257
378
|
|
|
258
379
|
# Ensure every captured function call gets a result to prevent hanging
|
|
259
|
-
processed_call_ids = set()
|
|
260
|
-
|
|
261
380
|
for call in captured_function_calls:
|
|
262
381
|
function_name = call["name"]
|
|
263
382
|
if function_name in self._mcp_functions:
|
|
@@ -367,6 +486,7 @@ class ResponseBackend(MCPBackend):
|
|
|
367
486
|
)
|
|
368
487
|
|
|
369
488
|
mcp_functions_executed = True
|
|
489
|
+
functions_executed = True
|
|
370
490
|
|
|
371
491
|
# Ensure all captured function calls have results to prevent hanging
|
|
372
492
|
for call in captured_function_calls:
|
|
@@ -391,15 +511,14 @@ class ResponseBackend(MCPBackend):
|
|
|
391
511
|
mcp_functions_executed = True
|
|
392
512
|
|
|
393
513
|
# Trim history after function executions to bound memory usage
|
|
394
|
-
if mcp_functions_executed:
|
|
514
|
+
if functions_executed or mcp_functions_executed:
|
|
395
515
|
updated_messages = super()._trim_message_history(updated_messages)
|
|
396
516
|
|
|
397
517
|
# Recursive call with updated messages
|
|
398
|
-
async for chunk in self.
|
|
518
|
+
async for chunk in self._stream_with_custom_and_mcp_tools(updated_messages, tools, client, **kwargs):
|
|
399
519
|
yield chunk
|
|
400
520
|
else:
|
|
401
|
-
# No
|
|
402
|
-
|
|
521
|
+
# No functions were executed, we're done
|
|
403
522
|
yield TextStreamChunk(type=ChunkType.DONE, source="response_api")
|
|
404
523
|
return
|
|
405
524
|
|
|
@@ -628,59 +747,6 @@ class ResponseBackend(MCPBackend):
|
|
|
628
747
|
self._vector_store_ids.clear()
|
|
629
748
|
self._uploaded_file_ids.clear()
|
|
630
749
|
|
|
631
|
-
# def _save_image_sync(
|
|
632
|
-
# self,
|
|
633
|
-
# image_data: str,
|
|
634
|
-
# prompt: str = None,
|
|
635
|
-
# image_format: str = "png",
|
|
636
|
-
# ) -> Optional[str]:
|
|
637
|
-
# """
|
|
638
|
-
# Save generated image directly to filesystem (synchronous version).
|
|
639
|
-
|
|
640
|
-
# Args:
|
|
641
|
-
# image_data: Base64 encoded image data
|
|
642
|
-
# prompt: Generation prompt (used for naming)
|
|
643
|
-
# image_format: Image format (default png)
|
|
644
|
-
|
|
645
|
-
# Returns:
|
|
646
|
-
# Saved file path, or None if failed
|
|
647
|
-
# """
|
|
648
|
-
# try:
|
|
649
|
-
# # Use agent's filesystem workspace if available, otherwise use current working directory
|
|
650
|
-
# if self.filesystem_manager:
|
|
651
|
-
# workspace_path = self.filesystem_manager.get_current_workspace()
|
|
652
|
-
# else:
|
|
653
|
-
# workspace_path = Path.cwd()
|
|
654
|
-
|
|
655
|
-
# # Create generated_images subdirectory path
|
|
656
|
-
# images_dir = workspace_path
|
|
657
|
-
|
|
658
|
-
# # Create directory if it doesn't exist
|
|
659
|
-
# images_dir.mkdir(parents=True, exist_ok=True)
|
|
660
|
-
|
|
661
|
-
# # Generate filename
|
|
662
|
-
# timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
|
663
|
-
# if prompt:
|
|
664
|
-
# # Clean prompt for filename
|
|
665
|
-
# clean_prompt = "".join(c for c in prompt[:30] if c.isalnum() or c in (" ", "-", "_")).strip()
|
|
666
|
-
# clean_prompt = clean_prompt.replace(" ", "_")
|
|
667
|
-
# filename = f"{timestamp}_{clean_prompt}.{image_format}"
|
|
668
|
-
# else:
|
|
669
|
-
# filename = f"{timestamp}_generated.{image_format}"
|
|
670
|
-
|
|
671
|
-
# file_path = images_dir / filename
|
|
672
|
-
|
|
673
|
-
# # Decode base64 and write to file
|
|
674
|
-
# image_bytes = base64.b64decode(image_data)
|
|
675
|
-
# file_path.write_bytes(image_bytes)
|
|
676
|
-
|
|
677
|
-
# logger.info(f"Image saved to: {file_path}")
|
|
678
|
-
# return str(file_path)
|
|
679
|
-
|
|
680
|
-
# except Exception as e:
|
|
681
|
-
# logger.error(f"Error saving image: {e}")
|
|
682
|
-
# return None
|
|
683
|
-
|
|
684
750
|
def _convert_mcp_tools_to_openai_format(self) -> List[Dict[str, Any]]:
|
|
685
751
|
"""Convert MCP tools (stdio + streamable-http) to OpenAI function declarations."""
|
|
686
752
|
if not self._mcp_functions:
|