massgen 0.1.5__py3-none-any.whl → 0.1.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of massgen might be problematic. Click here for more details.
- massgen/__init__.py +1 -1
- massgen/backend/base_with_custom_tool_and_mcp.py +453 -23
- massgen/backend/capabilities.py +39 -0
- massgen/backend/chat_completions.py +111 -197
- massgen/backend/claude.py +210 -181
- massgen/backend/gemini.py +1015 -1559
- massgen/backend/grok.py +3 -2
- massgen/backend/response.py +160 -220
- massgen/cli.py +73 -6
- massgen/config_builder.py +20 -54
- massgen/config_validator.py +931 -0
- massgen/configs/README.md +51 -8
- massgen/configs/tools/custom_tools/claude_code_custom_tool_with_mcp_example.yaml +1 -0
- massgen/configs/tools/custom_tools/claude_custom_tool_example_no_path.yaml +1 -1
- massgen/configs/tools/custom_tools/claude_custom_tool_with_mcp_example.yaml +1 -0
- massgen/configs/tools/custom_tools/computer_use_browser_example.yaml +1 -1
- massgen/configs/tools/custom_tools/computer_use_docker_example.yaml +1 -1
- massgen/configs/tools/custom_tools/gemini_custom_tool_with_mcp_example.yaml +1 -0
- massgen/configs/tools/custom_tools/gpt5_nano_custom_tool_with_mcp_example.yaml +1 -0
- massgen/configs/tools/custom_tools/gpt_oss_custom_tool_with_mcp_example.yaml +1 -0
- massgen/configs/tools/custom_tools/grok3_mini_custom_tool_with_mcp_example.yaml +1 -0
- massgen/configs/tools/custom_tools/interop/ag2_and_langgraph_lesson_planner.yaml +65 -0
- massgen/configs/tools/custom_tools/interop/ag2_and_openai_assistant_lesson_planner.yaml +65 -0
- massgen/configs/tools/custom_tools/interop/ag2_lesson_planner_example.yaml +48 -0
- massgen/configs/tools/custom_tools/interop/agentscope_lesson_planner_example.yaml +48 -0
- massgen/configs/tools/custom_tools/interop/langgraph_lesson_planner_example.yaml +49 -0
- massgen/configs/tools/custom_tools/interop/openai_assistant_lesson_planner_example.yaml +50 -0
- massgen/configs/tools/custom_tools/interop/smolagent_lesson_planner_example.yaml +49 -0
- massgen/configs/tools/custom_tools/qwen_api_custom_tool_with_mcp_example.yaml +1 -0
- massgen/configs/tools/custom_tools/two_models_with_tools_example.yaml +44 -0
- massgen/formatter/_gemini_formatter.py +61 -15
- massgen/tests/test_ag2_lesson_planner.py +223 -0
- massgen/tests/test_config_validator.py +1156 -0
- massgen/tests/test_langgraph_lesson_planner.py +223 -0
- massgen/tool/__init__.py +2 -9
- massgen/tool/_decorators.py +52 -0
- massgen/tool/_extraframework_agents/ag2_lesson_planner_tool.py +251 -0
- massgen/tool/_extraframework_agents/agentscope_lesson_planner_tool.py +303 -0
- massgen/tool/_extraframework_agents/langgraph_lesson_planner_tool.py +275 -0
- massgen/tool/_extraframework_agents/openai_assistant_lesson_planner_tool.py +247 -0
- massgen/tool/_extraframework_agents/smolagent_lesson_planner_tool.py +180 -0
- massgen/tool/_manager.py +102 -16
- massgen/tool/_registered_tool.py +3 -0
- massgen/tool/_result.py +3 -0
- {massgen-0.1.5.dist-info → massgen-0.1.6.dist-info}/METADATA +104 -76
- {massgen-0.1.5.dist-info → massgen-0.1.6.dist-info}/RECORD +50 -39
- massgen/backend/gemini_mcp_manager.py +0 -545
- massgen/backend/gemini_trackers.py +0 -344
- massgen/configs/tools/custom_tools/multimodal_tools/playwright_with_img_understanding.yaml +0 -98
- massgen/configs/tools/custom_tools/multimodal_tools/understand_video_example.yaml +0 -54
- massgen/tools/__init__.py +0 -8
- massgen/tools/_planning_mcp_server.py +0 -520
- massgen/tools/planning_dataclasses.py +0 -434
- {massgen-0.1.5.dist-info → massgen-0.1.6.dist-info}/WHEEL +0 -0
- {massgen-0.1.5.dist-info → massgen-0.1.6.dist-info}/entry_points.txt +0 -0
- {massgen-0.1.5.dist-info → massgen-0.1.6.dist-info}/licenses/LICENSE +0 -0
- {massgen-0.1.5.dist-info → massgen-0.1.6.dist-info}/top_level.txt +0 -0
massgen/backend/grok.py
CHANGED
|
@@ -20,11 +20,12 @@ from __future__ import annotations
|
|
|
20
20
|
|
|
21
21
|
import logging
|
|
22
22
|
import os
|
|
23
|
-
from typing import Any, Dict, List, Optional
|
|
23
|
+
from typing import Any, AsyncGenerator, Dict, List, Optional
|
|
24
24
|
|
|
25
25
|
from openai import AsyncOpenAI
|
|
26
26
|
|
|
27
27
|
from ..logger_config import log_stream_chunk
|
|
28
|
+
from .base import StreamChunk
|
|
28
29
|
from .chat_completions import ChatCompletionsBackend
|
|
29
30
|
|
|
30
31
|
logger = logging.getLogger(__name__)
|
|
@@ -70,7 +71,7 @@ class GrokBackend(ChatCompletionsBackend):
|
|
|
70
71
|
tools: List[Dict[str, Any]],
|
|
71
72
|
client,
|
|
72
73
|
**kwargs,
|
|
73
|
-
):
|
|
74
|
+
) -> AsyncGenerator[StreamChunk, None]:
|
|
74
75
|
"""Override to add Grok-specific search parameters before API call."""
|
|
75
76
|
# Build API params using parent method
|
|
76
77
|
all_params = {**self.config, **kwargs}
|
massgen/backend/response.py
CHANGED
|
@@ -23,7 +23,12 @@ from ..formatter import ResponseFormatter
|
|
|
23
23
|
from ..logger_config import log_backend_agent_message, log_stream_chunk, logger
|
|
24
24
|
from ..stream_chunk import ChunkType, TextStreamChunk
|
|
25
25
|
from .base import FilesystemSupport, StreamChunk
|
|
26
|
-
from .base_with_custom_tool_and_mcp import
|
|
26
|
+
from .base_with_custom_tool_and_mcp import (
|
|
27
|
+
CustomToolAndMCPBackend,
|
|
28
|
+
CustomToolChunk,
|
|
29
|
+
ToolExecutionConfig,
|
|
30
|
+
UploadFileError,
|
|
31
|
+
)
|
|
27
32
|
|
|
28
33
|
|
|
29
34
|
class ResponseBackend(CustomToolAndMCPBackend):
|
|
@@ -138,6 +143,97 @@ class ResponseBackend(CustomToolAndMCPBackend):
|
|
|
138
143
|
async for chunk in self._process_stream(stream, all_params, agent_id):
|
|
139
144
|
yield chunk
|
|
140
145
|
|
|
146
|
+
def _append_tool_result_message(
|
|
147
|
+
self,
|
|
148
|
+
updated_messages: List[Dict[str, Any]],
|
|
149
|
+
call: Dict[str, Any],
|
|
150
|
+
result: Any,
|
|
151
|
+
tool_type: str,
|
|
152
|
+
) -> None:
|
|
153
|
+
"""Append tool result to messages in Response API format.
|
|
154
|
+
|
|
155
|
+
Args:
|
|
156
|
+
updated_messages: Message list to append to
|
|
157
|
+
call: Tool call dictionary with call_id, name, arguments
|
|
158
|
+
result: Tool execution result
|
|
159
|
+
tool_type: "custom" or "mcp"
|
|
160
|
+
|
|
161
|
+
Note:
|
|
162
|
+
Response API requires both function_call and function_call_output messages.
|
|
163
|
+
"""
|
|
164
|
+
# Add function call message
|
|
165
|
+
function_call_msg = {
|
|
166
|
+
"type": "function_call",
|
|
167
|
+
"call_id": call.get("call_id", ""),
|
|
168
|
+
"name": call.get("name", ""),
|
|
169
|
+
"arguments": call.get("arguments", "{}"),
|
|
170
|
+
}
|
|
171
|
+
updated_messages.append(function_call_msg)
|
|
172
|
+
|
|
173
|
+
# Add function output message
|
|
174
|
+
function_output_msg = {
|
|
175
|
+
"type": "function_call_output",
|
|
176
|
+
"call_id": call.get("call_id", ""),
|
|
177
|
+
"output": str(result),
|
|
178
|
+
}
|
|
179
|
+
updated_messages.append(function_output_msg)
|
|
180
|
+
|
|
181
|
+
def _append_tool_error_message(
|
|
182
|
+
self,
|
|
183
|
+
updated_messages: List[Dict[str, Any]],
|
|
184
|
+
call: Dict[str, Any],
|
|
185
|
+
error_msg: str,
|
|
186
|
+
tool_type: str,
|
|
187
|
+
) -> None:
|
|
188
|
+
"""Append tool error to messages in Response API format.
|
|
189
|
+
|
|
190
|
+
Args:
|
|
191
|
+
updated_messages: Message list to append to
|
|
192
|
+
call: Tool call dictionary with call_id, name, arguments
|
|
193
|
+
error_msg: Error message string
|
|
194
|
+
tool_type: "custom" or "mcp"
|
|
195
|
+
|
|
196
|
+
Note:
|
|
197
|
+
Response API requires both function_call and function_call_output messages.
|
|
198
|
+
"""
|
|
199
|
+
# Add function call message
|
|
200
|
+
function_call_msg = {
|
|
201
|
+
"type": "function_call",
|
|
202
|
+
"call_id": call.get("call_id", ""),
|
|
203
|
+
"name": call.get("name", ""),
|
|
204
|
+
"arguments": call.get("arguments", "{}"),
|
|
205
|
+
}
|
|
206
|
+
updated_messages.append(function_call_msg)
|
|
207
|
+
|
|
208
|
+
# Add error output message
|
|
209
|
+
error_output_msg = {
|
|
210
|
+
"type": "function_call_output",
|
|
211
|
+
"call_id": call.get("call_id", ""),
|
|
212
|
+
"output": error_msg,
|
|
213
|
+
}
|
|
214
|
+
updated_messages.append(error_output_msg)
|
|
215
|
+
|
|
216
|
+
async def _execute_custom_tool(self, call: Dict[str, Any]) -> AsyncGenerator[CustomToolChunk, None]:
|
|
217
|
+
"""Execute custom tool with streaming support - async generator for base class.
|
|
218
|
+
|
|
219
|
+
This method is called by _execute_tool_with_logging and yields CustomToolChunk
|
|
220
|
+
objects for intermediate streaming output. The base class detects the async
|
|
221
|
+
generator and streams intermediate results to users in real-time.
|
|
222
|
+
|
|
223
|
+
Args:
|
|
224
|
+
call: Tool call dictionary with name and arguments
|
|
225
|
+
|
|
226
|
+
Yields:
|
|
227
|
+
CustomToolChunk objects with streaming data
|
|
228
|
+
|
|
229
|
+
Note:
|
|
230
|
+
- Intermediate chunks (completed=False) are streamed to users in real-time
|
|
231
|
+
- Final chunk (completed=True) contains the accumulated result for message history
|
|
232
|
+
- The base class automatically handles extracting and displaying intermediate chunks
|
|
233
|
+
"""
|
|
234
|
+
async for chunk in self.stream_custom_tool_execution(call):
|
|
235
|
+
yield chunk
|
|
236
|
+
|
|
141
237
|
async def _stream_with_custom_and_mcp_tools(
|
|
142
238
|
self,
|
|
143
239
|
current_messages: List[Dict[str, Any]],
|
|
@@ -224,18 +320,8 @@ class ResponseBackend(CustomToolAndMCPBackend):
|
|
|
224
320
|
|
|
225
321
|
# Execute any captured function calls
|
|
226
322
|
if captured_function_calls and response_completed:
|
|
227
|
-
# Categorize function calls
|
|
228
|
-
mcp_calls =
|
|
229
|
-
custom_calls = []
|
|
230
|
-
provider_calls = []
|
|
231
|
-
|
|
232
|
-
for call in captured_function_calls:
|
|
233
|
-
if call["name"] in self._mcp_functions:
|
|
234
|
-
mcp_calls.append(call)
|
|
235
|
-
elif call["name"] in self._custom_tool_names:
|
|
236
|
-
custom_calls.append(call)
|
|
237
|
-
else:
|
|
238
|
-
provider_calls.append(call)
|
|
323
|
+
# Categorize function calls using helper method
|
|
324
|
+
mcp_calls, custom_calls, provider_calls = self._categorize_tool_calls(captured_function_calls)
|
|
239
325
|
|
|
240
326
|
# If there are provider calls (non-MCP, non-custom), let API handle them
|
|
241
327
|
if provider_calls:
|
|
@@ -248,100 +334,54 @@ class ResponseBackend(CustomToolAndMCPBackend):
|
|
|
248
334
|
updated_messages = current_messages.copy()
|
|
249
335
|
processed_call_ids = set() # Initialize processed_call_ids here
|
|
250
336
|
|
|
251
|
-
#
|
|
252
|
-
|
|
253
|
-
|
|
254
|
-
|
|
255
|
-
|
|
256
|
-
|
|
257
|
-
|
|
258
|
-
|
|
259
|
-
|
|
260
|
-
|
|
261
|
-
|
|
262
|
-
|
|
263
|
-
|
|
264
|
-
type=ChunkType.CUSTOM_TOOL_STATUS,
|
|
265
|
-
status="function_call",
|
|
266
|
-
content=f"Arguments for Calling {call['name']}: {call['arguments']}",
|
|
267
|
-
source=f"custom_{call['name']}",
|
|
268
|
-
)
|
|
269
|
-
|
|
270
|
-
# Execute custom tool
|
|
271
|
-
result = await self._execute_custom_tool(call)
|
|
272
|
-
|
|
273
|
-
# Add function call and result to messages
|
|
274
|
-
function_call_msg = {
|
|
275
|
-
"type": "function_call",
|
|
276
|
-
"call_id": call["call_id"],
|
|
277
|
-
"name": call["name"],
|
|
278
|
-
"arguments": call["arguments"],
|
|
279
|
-
}
|
|
280
|
-
updated_messages.append(function_call_msg)
|
|
281
|
-
|
|
282
|
-
function_output_msg = {
|
|
283
|
-
"type": "function_call_output",
|
|
284
|
-
"call_id": call["call_id"],
|
|
285
|
-
"output": str(result),
|
|
286
|
-
}
|
|
287
|
-
updated_messages.append(function_output_msg)
|
|
288
|
-
|
|
289
|
-
# Yield custom tool results (like MCP tools)
|
|
290
|
-
yield TextStreamChunk(
|
|
291
|
-
type=ChunkType.CUSTOM_TOOL_STATUS,
|
|
292
|
-
status="function_call_output",
|
|
293
|
-
content=f"Results for Calling {call['name']}: {str(result)}",
|
|
294
|
-
source=f"custom_{call['name']}",
|
|
295
|
-
)
|
|
296
|
-
|
|
297
|
-
# Yield custom tool response status
|
|
298
|
-
yield TextStreamChunk(
|
|
299
|
-
type=ChunkType.CUSTOM_TOOL_STATUS,
|
|
300
|
-
status="custom_tool_response",
|
|
301
|
-
content=f"✅ [Custom Tool] {call['name']} completed",
|
|
302
|
-
source=f"custom_{call['name']}",
|
|
303
|
-
)
|
|
304
|
-
|
|
305
|
-
processed_call_ids.add(call["call_id"])
|
|
306
|
-
functions_executed = True
|
|
307
|
-
logger.info(f"Executed custom tool: {call['name']}")
|
|
337
|
+
# Configuration for custom tool execution
|
|
338
|
+
CUSTOM_TOOL_CONFIG = ToolExecutionConfig(
|
|
339
|
+
tool_type="custom",
|
|
340
|
+
chunk_type="custom_tool_status",
|
|
341
|
+
emoji_prefix="🔧 [Custom Tool]",
|
|
342
|
+
success_emoji="✅ [Custom Tool]",
|
|
343
|
+
error_emoji="❌ [Custom Tool Error]",
|
|
344
|
+
source_prefix="custom_",
|
|
345
|
+
status_called="custom_tool_called",
|
|
346
|
+
status_response="custom_tool_response",
|
|
347
|
+
status_error="custom_tool_error",
|
|
348
|
+
execution_callback=self._execute_custom_tool,
|
|
349
|
+
)
|
|
308
350
|
|
|
309
|
-
|
|
310
|
-
|
|
311
|
-
|
|
351
|
+
# Configuration for MCP tool execution
|
|
352
|
+
MCP_TOOL_CONFIG = ToolExecutionConfig(
|
|
353
|
+
tool_type="mcp",
|
|
354
|
+
chunk_type="mcp_status",
|
|
355
|
+
emoji_prefix="🔧 [MCP Tool]",
|
|
356
|
+
success_emoji="✅ [MCP Tool]",
|
|
357
|
+
error_emoji="❌ [MCP Tool Error]",
|
|
358
|
+
source_prefix="mcp_",
|
|
359
|
+
status_called="mcp_tool_called",
|
|
360
|
+
status_response="mcp_tool_response",
|
|
361
|
+
status_error="mcp_tool_error",
|
|
362
|
+
execution_callback=self._execute_mcp_function_with_retry,
|
|
363
|
+
)
|
|
312
364
|
|
|
313
|
-
|
|
314
|
-
|
|
315
|
-
|
|
316
|
-
|
|
317
|
-
content=f"Arguments for Calling {call['name']}: {call['arguments']}",
|
|
318
|
-
source=f"custom_{call['name']}",
|
|
319
|
-
)
|
|
365
|
+
chunk_type_map = {
|
|
366
|
+
"custom_tool_status": ChunkType.CUSTOM_TOOL_STATUS,
|
|
367
|
+
"mcp_status": ChunkType.MCP_STATUS,
|
|
368
|
+
}
|
|
320
369
|
|
|
370
|
+
# Execute custom tools using unified method
|
|
371
|
+
for call in custom_calls:
|
|
372
|
+
async for chunk in self._execute_tool_with_logging(
|
|
373
|
+
call,
|
|
374
|
+
CUSTOM_TOOL_CONFIG,
|
|
375
|
+
updated_messages,
|
|
376
|
+
processed_call_ids,
|
|
377
|
+
):
|
|
321
378
|
yield TextStreamChunk(
|
|
322
|
-
type=
|
|
323
|
-
status="
|
|
324
|
-
content=
|
|
325
|
-
source=
|
|
379
|
+
type=chunk_type_map.get(chunk.type, chunk.type),
|
|
380
|
+
status=getattr(chunk, "status", None),
|
|
381
|
+
content=getattr(chunk, "content", None),
|
|
382
|
+
source=getattr(chunk, "source", None),
|
|
326
383
|
)
|
|
327
|
-
|
|
328
|
-
# Add error result to messages
|
|
329
|
-
function_call_msg = {
|
|
330
|
-
"type": "function_call",
|
|
331
|
-
"call_id": call["call_id"],
|
|
332
|
-
"name": call["name"],
|
|
333
|
-
"arguments": call["arguments"],
|
|
334
|
-
}
|
|
335
|
-
updated_messages.append(function_call_msg)
|
|
336
|
-
|
|
337
|
-
error_output_msg = {
|
|
338
|
-
"type": "function_call_output",
|
|
339
|
-
"call_id": call["call_id"],
|
|
340
|
-
"output": error_msg,
|
|
341
|
-
}
|
|
342
|
-
updated_messages.append(error_output_msg)
|
|
343
|
-
processed_call_ids.add(call["call_id"])
|
|
344
|
-
functions_executed = True
|
|
384
|
+
functions_executed = True
|
|
345
385
|
|
|
346
386
|
# Check circuit breaker status before executing MCP functions
|
|
347
387
|
if mcp_calls and not await super()._check_circuit_breaker_before_execution():
|
|
@@ -352,11 +392,8 @@ class ResponseBackend(CustomToolAndMCPBackend):
|
|
|
352
392
|
content="⚠️ [MCP] All servers blocked by circuit breaker",
|
|
353
393
|
source="circuit_breaker",
|
|
354
394
|
)
|
|
355
|
-
|
|
356
|
-
|
|
357
|
-
|
|
358
|
-
# Execute MCP function calls
|
|
359
|
-
mcp_functions_executed = False
|
|
395
|
+
# Skip MCP tool execution but continue with custom tool results
|
|
396
|
+
mcp_calls = []
|
|
360
397
|
|
|
361
398
|
# Check if planning mode is enabled - selectively block MCP tool execution during planning
|
|
362
399
|
if self.is_planning_mode_enabled():
|
|
@@ -372,123 +409,26 @@ class ResponseBackend(CustomToolAndMCPBackend):
|
|
|
372
409
|
source="planning_mode",
|
|
373
410
|
)
|
|
374
411
|
# Skip all MCP tool execution but still continue with workflow
|
|
375
|
-
|
|
376
|
-
return
|
|
412
|
+
mcp_calls = []
|
|
377
413
|
else:
|
|
378
414
|
# Selective blocking - log but continue to check each tool individually
|
|
379
415
|
logger.info(f"[Response] Planning mode enabled - selective blocking of {len(blocked_tools)} tools")
|
|
380
416
|
|
|
381
|
-
#
|
|
382
|
-
for call in
|
|
383
|
-
|
|
384
|
-
|
|
385
|
-
|
|
417
|
+
# Execute MCP tools using unified method
|
|
418
|
+
for call in mcp_calls:
|
|
419
|
+
async for chunk in self._execute_tool_with_logging(
|
|
420
|
+
call,
|
|
421
|
+
MCP_TOOL_CONFIG,
|
|
422
|
+
updated_messages,
|
|
423
|
+
processed_call_ids,
|
|
424
|
+
):
|
|
386
425
|
yield TextStreamChunk(
|
|
387
|
-
type=
|
|
388
|
-
status="
|
|
389
|
-
content=
|
|
390
|
-
source=
|
|
426
|
+
type=chunk_type_map.get(chunk.type, chunk.type),
|
|
427
|
+
status=getattr(chunk, "status", None),
|
|
428
|
+
content=getattr(chunk, "content", None),
|
|
429
|
+
source=getattr(chunk, "source", None),
|
|
391
430
|
)
|
|
392
|
-
|
|
393
|
-
try:
|
|
394
|
-
# Execute MCP function with retry and exponential backoff
|
|
395
|
-
result, result_obj = await super()._execute_mcp_function_with_retry(
|
|
396
|
-
function_name,
|
|
397
|
-
call["arguments"],
|
|
398
|
-
)
|
|
399
|
-
|
|
400
|
-
# Check if function failed after all retries
|
|
401
|
-
if isinstance(result, str) and result.startswith("Error:"):
|
|
402
|
-
# Log failure but still create tool response
|
|
403
|
-
logger.warning(f"MCP function {function_name} failed after retries: {result}")
|
|
404
|
-
|
|
405
|
-
# Add error result to messages
|
|
406
|
-
function_call_msg = {
|
|
407
|
-
"type": "function_call",
|
|
408
|
-
"call_id": call["call_id"],
|
|
409
|
-
"name": function_name,
|
|
410
|
-
"arguments": call["arguments"],
|
|
411
|
-
}
|
|
412
|
-
updated_messages.append(function_call_msg)
|
|
413
|
-
|
|
414
|
-
error_output_msg = {
|
|
415
|
-
"type": "function_call_output",
|
|
416
|
-
"call_id": call["call_id"],
|
|
417
|
-
"output": result,
|
|
418
|
-
}
|
|
419
|
-
updated_messages.append(error_output_msg)
|
|
420
|
-
|
|
421
|
-
processed_call_ids.add(call["call_id"])
|
|
422
|
-
mcp_functions_executed = True
|
|
423
|
-
continue
|
|
424
|
-
|
|
425
|
-
except Exception as e:
|
|
426
|
-
# Only catch unexpected non-MCP system errors
|
|
427
|
-
logger.error(f"Unexpected error in MCP function execution: {e}")
|
|
428
|
-
error_msg = f"Error executing {function_name}: {str(e)}"
|
|
429
|
-
|
|
430
|
-
# Add error result to messages
|
|
431
|
-
function_call_msg = {
|
|
432
|
-
"type": "function_call",
|
|
433
|
-
"call_id": call["call_id"],
|
|
434
|
-
"name": function_name,
|
|
435
|
-
"arguments": call["arguments"],
|
|
436
|
-
}
|
|
437
|
-
updated_messages.append(function_call_msg)
|
|
438
|
-
|
|
439
|
-
error_output_msg = {
|
|
440
|
-
"type": "function_call_output",
|
|
441
|
-
"call_id": call["call_id"],
|
|
442
|
-
"output": error_msg,
|
|
443
|
-
}
|
|
444
|
-
updated_messages.append(error_output_msg)
|
|
445
|
-
|
|
446
|
-
processed_call_ids.add(call["call_id"])
|
|
447
|
-
mcp_functions_executed = True
|
|
448
|
-
continue
|
|
449
|
-
|
|
450
|
-
# Add function call to messages and yield status chunk
|
|
451
|
-
function_call_msg = {
|
|
452
|
-
"type": "function_call",
|
|
453
|
-
"call_id": call["call_id"],
|
|
454
|
-
"name": function_name,
|
|
455
|
-
"arguments": call["arguments"],
|
|
456
|
-
}
|
|
457
|
-
updated_messages.append(function_call_msg)
|
|
458
|
-
yield TextStreamChunk(
|
|
459
|
-
type=ChunkType.MCP_STATUS,
|
|
460
|
-
status="function_call",
|
|
461
|
-
content=f"Arguments for Calling {function_name}: {call['arguments']}",
|
|
462
|
-
source=f"mcp_{function_name}",
|
|
463
|
-
)
|
|
464
|
-
|
|
465
|
-
# Add function output to messages and yield status chunk
|
|
466
|
-
function_output_msg = {
|
|
467
|
-
"type": "function_call_output",
|
|
468
|
-
"call_id": call["call_id"],
|
|
469
|
-
"output": str(result),
|
|
470
|
-
}
|
|
471
|
-
updated_messages.append(function_output_msg)
|
|
472
|
-
yield TextStreamChunk(
|
|
473
|
-
type=ChunkType.MCP_STATUS,
|
|
474
|
-
status="function_call_output",
|
|
475
|
-
content=f"Results for Calling {function_name}: {str(result_obj.content[0].text)}",
|
|
476
|
-
source=f"mcp_{function_name}",
|
|
477
|
-
)
|
|
478
|
-
|
|
479
|
-
logger.info(f"Executed MCP function {function_name} (stdio/streamable-http)")
|
|
480
|
-
processed_call_ids.add(call["call_id"])
|
|
481
|
-
|
|
482
|
-
# Yield MCP tool response status
|
|
483
|
-
yield TextStreamChunk(
|
|
484
|
-
type=ChunkType.MCP_STATUS,
|
|
485
|
-
status="mcp_tool_response",
|
|
486
|
-
content=f"✅ [MCP Tool] {function_name} completed",
|
|
487
|
-
source=f"mcp_{function_name}",
|
|
488
|
-
)
|
|
489
|
-
|
|
490
|
-
mcp_functions_executed = True
|
|
491
|
-
functions_executed = True
|
|
431
|
+
functions_executed = True
|
|
492
432
|
|
|
493
433
|
# Ensure all captured function calls have results to prevent hanging
|
|
494
434
|
for call in captured_function_calls:
|
|
@@ -510,10 +450,10 @@ class ResponseBackend(CustomToolAndMCPBackend):
|
|
|
510
450
|
"output": f"Error: Tool call {call['call_id']} for function {call['name']} was not processed. This may indicate a validation or execution error.",
|
|
511
451
|
}
|
|
512
452
|
updated_messages.append(error_output_msg)
|
|
513
|
-
|
|
453
|
+
functions_executed = True
|
|
514
454
|
|
|
515
455
|
# Trim history after function executions to bound memory usage
|
|
516
|
-
if functions_executed
|
|
456
|
+
if functions_executed:
|
|
517
457
|
updated_messages = super()._trim_message_history(updated_messages)
|
|
518
458
|
|
|
519
459
|
# Recursive call with updated messages
|
massgen/cli.py
CHANGED
|
@@ -2512,6 +2512,27 @@ async def main(args):
|
|
|
2512
2512
|
if args.debug:
|
|
2513
2513
|
logger.debug(f"Resolved config path: {resolved_path}")
|
|
2514
2514
|
logger.debug(f"Config content: {json.dumps(config, indent=2)}")
|
|
2515
|
+
|
|
2516
|
+
# Automatic config validation (unless --skip-validation flag is set)
|
|
2517
|
+
if not args.skip_validation:
|
|
2518
|
+
from .config_validator import ConfigValidator
|
|
2519
|
+
|
|
2520
|
+
validator = ConfigValidator()
|
|
2521
|
+
validation_result = validator.validate_config(config)
|
|
2522
|
+
|
|
2523
|
+
# Show errors if any
|
|
2524
|
+
if validation_result.has_errors():
|
|
2525
|
+
print(validation_result.format_errors(), file=sys.stderr)
|
|
2526
|
+
print(f"\n{BRIGHT_RED}❌ Config validation failed. Fix errors above or use --skip-validation to bypass.{RESET}\n")
|
|
2527
|
+
sys.exit(1)
|
|
2528
|
+
|
|
2529
|
+
# Show warnings (non-blocking unless --strict-validation)
|
|
2530
|
+
if validation_result.has_warnings():
|
|
2531
|
+
print(validation_result.format_warnings())
|
|
2532
|
+
if args.strict_validation:
|
|
2533
|
+
print(f"\n{BRIGHT_RED}❌ Config validation failed in strict mode (warnings treated as errors).{RESET}\n")
|
|
2534
|
+
sys.exit(1)
|
|
2535
|
+
print() # Extra newline for readability
|
|
2515
2536
|
else:
|
|
2516
2537
|
model = args.model
|
|
2517
2538
|
if args.backend:
|
|
@@ -2825,6 +2846,33 @@ Environment Variables:
|
|
|
2825
2846
|
action="store_true",
|
|
2826
2847
|
help="Include example configurations in schema display",
|
|
2827
2848
|
)
|
|
2849
|
+
parser.add_argument(
|
|
2850
|
+
"--validate",
|
|
2851
|
+
type=str,
|
|
2852
|
+
metavar="CONFIG_FILE",
|
|
2853
|
+
help="Validate a configuration file without running it",
|
|
2854
|
+
)
|
|
2855
|
+
parser.add_argument(
|
|
2856
|
+
"--strict",
|
|
2857
|
+
action="store_true",
|
|
2858
|
+
help="Treat warnings as errors during validation (use with --validate)",
|
|
2859
|
+
)
|
|
2860
|
+
parser.add_argument(
|
|
2861
|
+
"--json",
|
|
2862
|
+
dest="json_output",
|
|
2863
|
+
action="store_true",
|
|
2864
|
+
help="Output validation results in JSON format (use with --validate)",
|
|
2865
|
+
)
|
|
2866
|
+
parser.add_argument(
|
|
2867
|
+
"--skip-validation",
|
|
2868
|
+
action="store_true",
|
|
2869
|
+
help="Skip automatic config validation when loading config files",
|
|
2870
|
+
)
|
|
2871
|
+
parser.add_argument(
|
|
2872
|
+
"--strict-validation",
|
|
2873
|
+
action="store_true",
|
|
2874
|
+
help="Treat config warnings as errors and abort execution",
|
|
2875
|
+
)
|
|
2828
2876
|
|
|
2829
2877
|
# Timeout options
|
|
2830
2878
|
timeout_group = parser.add_argument_group("timeout settings", "Override timeout settings from config")
|
|
@@ -2836,14 +2884,26 @@ Environment Variables:
|
|
|
2836
2884
|
|
|
2837
2885
|
args = parser.parse_args()
|
|
2838
2886
|
|
|
2839
|
-
#
|
|
2840
|
-
|
|
2887
|
+
# Handle special commands first (before logging setup to avoid creating log dirs)
|
|
2888
|
+
if args.validate:
|
|
2889
|
+
from .config_validator import ConfigValidator
|
|
2841
2890
|
|
|
2842
|
-
|
|
2843
|
-
|
|
2844
|
-
|
|
2891
|
+
validator = ConfigValidator()
|
|
2892
|
+
result = validator.validate_config_file(args.validate)
|
|
2893
|
+
|
|
2894
|
+
# Output results
|
|
2895
|
+
if args.json_output:
|
|
2896
|
+
# JSON output for machine parsing
|
|
2897
|
+
print(json.dumps(result.to_dict(), indent=2))
|
|
2898
|
+
else:
|
|
2899
|
+
# Human-readable output
|
|
2900
|
+
print(result.format_all())
|
|
2901
|
+
|
|
2902
|
+
# Exit with appropriate code
|
|
2903
|
+
if not result.is_valid() or (args.strict and result.has_warnings()):
|
|
2904
|
+
sys.exit(1)
|
|
2905
|
+
sys.exit(0)
|
|
2845
2906
|
|
|
2846
|
-
# Handle special commands first
|
|
2847
2907
|
if args.list_examples:
|
|
2848
2908
|
show_available_examples()
|
|
2849
2909
|
return
|
|
@@ -2858,6 +2918,13 @@ Environment Variables:
|
|
|
2858
2918
|
show_schema(backend=args.schema_backend, show_examples=args.with_examples)
|
|
2859
2919
|
return
|
|
2860
2920
|
|
|
2921
|
+
# Setup logging for all other commands (actual execution, setup, init, etc.)
|
|
2922
|
+
setup_logging(debug=args.debug)
|
|
2923
|
+
|
|
2924
|
+
if args.debug:
|
|
2925
|
+
logger.info("Debug mode enabled")
|
|
2926
|
+
logger.debug(f"Command line arguments: {vars(args)}")
|
|
2927
|
+
|
|
2861
2928
|
# Launch interactive API key setup if requested
|
|
2862
2929
|
if args.setup:
|
|
2863
2930
|
builder = ConfigBuilder()
|